From ce94bef9354e15b99a329e7c5a451ea0ffd59fb1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 19 Jun 2019 12:23:37 +0100 Subject: drm/i915: Signal fence completion from i915_request_wait With the upcoming change to automanaged i915_active, the intent is that whenever we wait on the set of active fences, they are signaled and collected. The requirement is that all successful returns from i915_request_wait() signal the fence, so fixup the one remaining path where we may return before the interrupt has been run. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190619112341.9082-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a195a92d0105..31a631f0b9c3 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1447,8 +1447,10 @@ long i915_request_wait(struct i915_request *rq, for (;;) { set_current_state(state); - if (i915_request_completed(rq)) + if (i915_request_completed(rq)) { + dma_fence_signal(&rq->fence); break; + } if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; -- cgit v1.2.3 From b87b6c0dfce7d6c9df7f2c4c0ebacfc71bf58b89 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 18 Jun 2019 08:41:31 +0100 Subject: drm/i915: Flush the execution-callbacks on retiring In the unlikely case the request completes while we regard it as not even executing on the GPU (see the next patch!), we have to flush any pending execution callbacks at retirement and ensure that we do not add any more. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190618074153.16055-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 93 +++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 44 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 31a631f0b9c3..7083e6ab92c5 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -119,6 +119,50 @@ const struct dma_fence_ops i915_fence_ops = { .release = i915_fence_release, }; +static void irq_execute_cb(struct irq_work *wrk) +{ + struct execute_cb *cb = container_of(wrk, typeof(*cb), work); + + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); +} + +static void irq_execute_cb_hook(struct irq_work *wrk) +{ + struct execute_cb *cb = container_of(wrk, typeof(*cb), work); + + cb->hook(container_of(cb->fence, struct i915_request, submit), + &cb->signal->fence); + i915_request_put(cb->signal); + + irq_execute_cb(wrk); +} + +static void __notify_execute_cb(struct i915_request *rq) +{ + struct execute_cb *cb; + + lockdep_assert_held(&rq->lock); + + if (list_empty(&rq->execute_cb)) + return; + + list_for_each_entry(cb, &rq->execute_cb, link) + irq_work_queue(&cb->work); + + /* + * XXX Rollback on __i915_request_unsubmit() + * + * In the future, perhaps when we have an active time-slicing scheduler, + * it will be interesting to unsubmit parallel execution and remove + * busywaits from the GPU until their master is restarted. This is + * quite hairy, we have to carefully rollback the fence and do a + * preempt-to-idle cycle on the target engine, all the while the + * master execute_cb may refire. + */ + INIT_LIST_HEAD(&rq->execute_cb); +} + static inline void i915_request_remove_from_client(struct i915_request *request) { @@ -246,6 +290,11 @@ static bool i915_request_retire(struct i915_request *rq) GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); atomic_dec(&rq->i915->gt_pm.rps.num_waiters); } + if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { + set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); + __notify_execute_cb(rq); + } + GEM_BUG_ON(!list_empty(&rq->execute_cb)); spin_unlock(&rq->lock); local_irq_enable(); @@ -285,50 +334,6 @@ void i915_request_retire_upto(struct i915_request *rq) } while (i915_request_retire(tmp) && tmp != rq); } -static void irq_execute_cb(struct irq_work *wrk) -{ - struct execute_cb *cb = container_of(wrk, typeof(*cb), work); - - i915_sw_fence_complete(cb->fence); - kmem_cache_free(global.slab_execute_cbs, cb); -} - -static void irq_execute_cb_hook(struct irq_work *wrk) -{ - struct execute_cb *cb = container_of(wrk, typeof(*cb), work); - - cb->hook(container_of(cb->fence, struct i915_request, submit), - &cb->signal->fence); - i915_request_put(cb->signal); - - irq_execute_cb(wrk); -} - -static void __notify_execute_cb(struct i915_request *rq) -{ - struct execute_cb *cb; - - lockdep_assert_held(&rq->lock); - - if (list_empty(&rq->execute_cb)) - return; - - list_for_each_entry(cb, &rq->execute_cb, link) - irq_work_queue(&cb->work); - - /* - * XXX Rollback on __i915_request_unsubmit() - * - * In the future, perhaps when we have an active time-slicing scheduler, - * it will be interesting to unsubmit parallel execution and remove - * busywaits from the GPU until their master is restarted. This is - * quite hairy, we have to carefully rollback the fence and do a - * preempt-to-idle cycle on the target engine, all the while the - * master execute_cb may refire. - */ - INIT_LIST_HEAD(&rq->execute_cb); -} - static int __i915_request_await_execution(struct i915_request *rq, struct i915_request *signal, -- cgit v1.2.3 From bdeb18dbcf8227e8f4b46db3f53a4155409e68cd Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 18 Jun 2019 10:51:31 -0700 Subject: drm/i915/ehl: Allow combo PHY A to drive a third external display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL has a mux on combo PHY A that allows it to be driven either by an internal display (DDI-A or DSI DPHY) or by an external display (DDI-D). This is a motherboard design decision that can not be changed on the fly. Unfortunately there are no strap registers that allow us to detect the board configuration directly, so let's use the VBT to try to figure it out and program the mux accordingly. For now if we run across a broken VBT that tries to claim that PHY A is attached to both internal and external displays at the same time, we'll resolve the conflict in favor of the internal display. To help debug these kind of bad VBT's, let's also add a quick DRM_DEBUG message during child device parsing so that it's easier to understand these cases if they show up in bug reports. v2: - Confirmed that VBT's dvo port refers to the DDI and not the PHY. Thus we can check more explicitly for (ddi_d && !(ddi_a || dsi)). If a bad VBT contradicts itself, let internal display win. (Ville) v3: - Switch condition from !IS_ICELAKE to IS_ELKHARTLAKE. Although the convention is usually to assume that future platforms will inherit all current platform behavior, this feels more like a one-platform quirk. (Ville) - Update commit message to describe what we do if/when we encounter broken VBT's, and note that the new debug print during child device parsing is intentional. Cc: Clint Taylor Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190618175131.9139-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 3 +++ drivers/gpu/drm/i915/display/intel_combo_phy.c | 36 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 1 + 3 files changed, 40 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c4710889cb32..0c9808132d67 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1668,6 +1668,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv, if (!child->device_type) continue; + DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n", + child->device_type); + /* * Copy as much as we know (sizeof) and is available * (child_dev_size) of the child device. Accessing the data must diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 841708da5a56..075bab2500eb 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -260,6 +260,32 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, I915_WRITE(ICL_PORT_CL_DW10(port), val); } +static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val) +{ + bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL; + bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL; + bool dsi_present = intel_bios_is_dsi_present(i915, NULL); + + /* + * VBT's 'dvo port' field for child devices references the DDI, not + * the PHY. So if combo PHY A is wired up to drive an external + * display, we should see a child device present on PORT_D and + * nothing on PORT_A and no DSI. + */ + if (ddi_d_present && !ddi_a_present && !dsi_present) + return val | ICL_PHY_MISC_MUX_DDID; + + /* + * If we encounter a VBT that claims to have an external display on + * DDI-D _and_ an internal display on DDI-A/DSI leave an error message + * in the log and let the internal display win. + */ + if (ddi_d_present) + DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n"); + + return val & ~ICL_PHY_MISC_MUX_DDID; +} + static void icl_combo_phys_init(struct drm_i915_private *dev_priv) { enum port port; @@ -273,7 +299,17 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) continue; } + /* + * EHL's combo PHY A can be hooked up to either an external + * display (via DDI-D) or an internal display (via DDI-A or + * the DSI DPHY). This is a motherboard design decision that + * can't be changed on the fly, so initialize the PHY's mux + * based on whether our VBT indicates the presence of any + * "internal" child devices. + */ val = I915_READ(ICL_PHY_MISC(port)); + if (IS_ELKHARTLAKE(dev_priv) && port == PORT_A) + val = ehl_combo_phy_a_mux(dev_priv, val); val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; I915_WRITE(ICL_PHY_MISC(port), val); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a408886adb47..02c7f8c6c20b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11145,6 +11145,7 @@ enum skl_power_gate { #define _ICL_PHY_MISC_B 0x64C04 #define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \ _ICL_PHY_MISC_B) +#define ICL_PHY_MISC_MUX_DDID (1 << 28) #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) /* Icelake Display Stream Compression Registers */ -- cgit v1.2.3 From 09c5ab384f6fb30f834a5777888b4486dd7f015d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 19 Jun 2019 18:01:35 +0100 Subject: drm/i915: Keep rings pinned while the context is active Remember to keep the rings pinned as well as the context image until the GPU is no longer active. v2: Introduce a ring->pin_count primarily to hide the mock_ring that doesn't fit into the normal GGTT vma picture. v3: Order is important in teardown, ringbuffer submission needs to drop the pin count on the engine->kernel_context before it can gleefully free its ring. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110946 Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190619170135.15281-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context.c | 27 ++++++++++++++++-------- drivers/gpu/drm/i915/gt/intel_engine_types.h | 12 +++++++++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 10 ++------- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 31 ++++++++++++++++++---------- drivers/gpu/drm/i915/gt/mock_engine.c | 1 + 5 files changed, 53 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 2c454f227c2e..23120901c55f 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active) if (ce->state) __context_unpin_state(ce->state); + intel_ring_unpin(ce->ring); intel_context_put(ce); } @@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) intel_context_get(ce); + err = intel_ring_pin(ce->ring); + if (err) + goto err_put; + if (!ce->state) return 0; err = __context_pin_state(ce->state, flags); - if (err) { - i915_active_cancel(&ce->active); - intel_context_put(ce); - return err; - } + if (err) + goto err_ring; /* Preallocate tracking nodes */ if (!i915_gem_context_is_kernel(ce->gem_context)) { err = i915_active_acquire_preallocate_barrier(&ce->active, ce->engine); - if (err) { - i915_active_release(&ce->active); - return err; - } + if (err) + goto err_state; } return 0; + +err_state: + __context_unpin_state(ce->state); +err_ring: + intel_ring_unpin(ce->ring); +err_put: + intel_context_put(ce); + i915_active_cancel(&ce->active); + return err; } void intel_context_active_release(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 868b220214f8..43e975a26016 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -70,6 +70,18 @@ struct intel_ring { struct list_head request_list; struct list_head active_link; + /* + * As we have two types of rings, one global to the engine used + * by ringbuffer submission and those that are exclusive to a + * context used by execlists, we have to play safe and allow + * atomic updates to the pin_count. However, the actual pinning + * of the context is either done during initialisation for + * ringbuffer submission or serialised as part of the context + * pinning for execlists, and so we do not need a mutex ourselves + * to serialise intel_ring_pin/intel_ring_unpin. + */ + atomic_t pin_count; + u32 head; u32 tail; u32 emit; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b42b5f158295..82b7ace62d97 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref) { struct intel_context *ce = container_of(kref, typeof(*ce), ref); + GEM_BUG_ON(!i915_active_is_idle(&ce->active)); GEM_BUG_ON(intel_context_is_pinned(ce)); if (ce->state) @@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce) { i915_gem_context_unpin_hw_id(ce->gem_context); i915_gem_object_unpin_map(ce->state->obj); - intel_ring_unpin(ce->ring); } static void @@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce, goto unpin_active; } - ret = intel_ring_pin(ce->ring); - if (ret) - goto unpin_map; - ret = i915_gem_context_pin_hw_id(ce->gem_context); if (ret) - goto unpin_ring; + goto unpin_map; ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce, return 0; -unpin_ring: - intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_active: diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c6023bc9452d..12010e798868 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq, int intel_ring_pin(struct intel_ring *ring) { struct i915_vma *vma = ring->vma; - enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); unsigned int flags; void *addr; int ret; - GEM_BUG_ON(ring->vaddr); + if (atomic_fetch_inc(&ring->pin_count)) + return 0; ret = i915_timeline_pin(ring->timeline); if (ret) - return ret; + goto err_unpin; flags = PIN_GLOBAL; @@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring) ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) - goto unpin_timeline; + goto err_timeline; if (i915_vma_is_map_and_fenceable(vma)) addr = (void __force *)i915_vma_pin_iomap(vma); else - addr = i915_gem_object_pin_map(vma->obj, map); + addr = i915_gem_object_pin_map(vma->obj, + i915_coherent_map_type(vma->vm->i915)); if (IS_ERR(addr)) { ret = PTR_ERR(addr); - goto unpin_ring; + goto err_ring; } vma->obj->pin_global++; + GEM_BUG_ON(ring->vaddr); ring->vaddr = addr; + return 0; -unpin_ring: +err_ring: i915_vma_unpin(vma); -unpin_timeline: +err_timeline: i915_timeline_unpin(ring->timeline); +err_unpin: + atomic_dec(&ring->pin_count); return ret; } @@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail) void intel_ring_unpin(struct intel_ring *ring) { - GEM_BUG_ON(!ring->vma); - GEM_BUG_ON(!ring->vaddr); + if (!atomic_dec_and_test(&ring->pin_count)) + return; /* Discard any unused bytes beyond that submitted to hw. */ intel_ring_reset(ring, ring->tail); + GEM_BUG_ON(!ring->vma); if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); else i915_gem_object_unpin_map(ring->vma->obj); + + GEM_BUG_ON(!ring->vaddr); ring->vaddr = NULL; ring->vma->obj->pin_global--; @@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine) WARN_ON(INTEL_GEN(dev_priv) > 2 && (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + intel_engine_cleanup_common(engine); + intel_ring_unpin(engine->buffer); intel_ring_put(engine->buffer); - intel_engine_cleanup_common(engine); kfree(engine); } diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 086801b51441..486c6953dcb1 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->base.effective_size = sz; ring->base.vaddr = (void *)(ring + 1); ring->base.timeline = &ring->timeline; + atomic_set(&ring->base.pin_count, 1); INIT_LIST_HEAD(&ring->base.request_list); intel_ring_update_space(&ring->base); -- cgit v1.2.3 From ccb2aceaaa5f9267ef7b485b41ae9be3f04b50d3 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 19 Jun 2019 18:00:16 -0700 Subject: drm/i915: use vfuncs for reg_read/write_fw_domains Instead of going through the if-else chain every time, let's save the function in the uncore structure. Note that the new functions are purposely not used from the reg read/write functions to keep the inlining there. While at it, use the new macro to call the old ones to clean the code a bit. v2: Rename macros for no-forcewake function assignment (Tvrtko) Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 172 +++++++++++---------------- drivers/gpu/drm/i915/intel_uncore.h | 5 + drivers/gpu/drm/i915/selftests/mock_uncore.c | 4 +- 3 files changed, 75 insertions(+), 106 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index da33aa672c3d..8e5716bc53e2 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -901,6 +901,12 @@ static bool is_gen##x##_shadowed(u32 offset) \ __is_genX_shadowed(8) __is_genX_shadowed(11) +static enum forcewake_domains +gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) +{ + return FORCEWAKE_RENDER; +} + #define __gen8_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ @@ -1145,26 +1151,23 @@ func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ val = __raw_uncore_read##x(uncore, reg); \ GEN6_READ_FOOTER; \ } -#define __gen6_read(x) __gen_read(gen6, x) -#define __fwtable_read(x) __gen_read(fwtable, x) -#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x) - -__gen11_fwtable_read(8) -__gen11_fwtable_read(16) -__gen11_fwtable_read(32) -__gen11_fwtable_read(64) -__fwtable_read(8) -__fwtable_read(16) -__fwtable_read(32) -__fwtable_read(64) -__gen6_read(8) -__gen6_read(16) -__gen6_read(32) -__gen6_read(64) - -#undef __gen11_fwtable_read -#undef __fwtable_read -#undef __gen6_read + +#define __gen_reg_read_funcs(func) \ +static enum forcewake_domains \ +func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ + return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ +} \ +\ +__gen_read(func, 8) \ +__gen_read(func, 16) \ +__gen_read(func, 32) \ +__gen_read(func, 64) + +__gen_reg_read_funcs(gen11_fwtable); +__gen_reg_read_funcs(fwtable); +__gen_reg_read_funcs(gen6); + +#undef __gen_reg_read_funcs #undef GEN6_READ_FOOTER #undef GEN6_READ_HEADER @@ -1225,6 +1228,9 @@ gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } +__gen6_write(8) +__gen6_write(16) +__gen6_write(32) #define __gen_write(func, x) \ static void \ @@ -1237,38 +1243,33 @@ func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trac __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } -#define __gen8_write(x) __gen_write(gen8, x) -#define __fwtable_write(x) __gen_write(fwtable, x) -#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x) - -__gen11_fwtable_write(8) -__gen11_fwtable_write(16) -__gen11_fwtable_write(32) -__fwtable_write(8) -__fwtable_write(16) -__fwtable_write(32) -__gen8_write(8) -__gen8_write(16) -__gen8_write(32) -__gen6_write(8) -__gen6_write(16) -__gen6_write(32) -#undef __gen11_fwtable_write -#undef __fwtable_write -#undef __gen8_write -#undef __gen6_write +#define __gen_reg_write_funcs(func) \ +static enum forcewake_domains \ +func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \ + return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \ +} \ +\ +__gen_write(func, 8) \ +__gen_write(func, 16) \ +__gen_write(func, 32) + +__gen_reg_write_funcs(gen11_fwtable); +__gen_reg_write_funcs(fwtable); +__gen_reg_write_funcs(gen8); + +#undef __gen_reg_write_funcs #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER -#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ +#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \ do { \ (uncore)->funcs.mmio_writeb = x##_write8; \ (uncore)->funcs.mmio_writew = x##_write16; \ (uncore)->funcs.mmio_writel = x##_write32; \ } while (0) -#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ +#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \ do { \ (uncore)->funcs.mmio_readb = x##_read8; \ (uncore)->funcs.mmio_readw = x##_read16; \ @@ -1276,6 +1277,17 @@ do { \ (uncore)->funcs.mmio_readq = x##_read64; \ } while (0) +#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ +do { \ + ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \ + (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \ +} while (0) + +#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ +do { \ + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \ + (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ +} while (0) static void fw_domain_init(struct intel_uncore *uncore, enum forcewake_domain_id domain_id, @@ -1559,11 +1571,11 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) if (!intel_uncore_has_forcewake(uncore)) { if (IS_GEN(i915, 5)) { - ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen5); + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); } else { - ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2); - ASSIGN_READ_MMIO_VFUNCS(uncore, gen2); + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); } } else if (IS_GEN_RANGE(i915, 6, 7)) { ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); @@ -1594,6 +1606,12 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); } + /* make sure fw funcs are set if and only if we have fw*/ + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); + GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); + if (HAS_FPGA_DBG_UNCLAIMED(i915)) uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; @@ -1871,62 +1889,6 @@ out: return ret; } -static enum forcewake_domains -intel_uncore_forcewake_for_read(struct intel_uncore *uncore, - i915_reg_t reg) -{ - struct drm_i915_private *i915 = uncore_to_i915(uncore); - u32 offset = i915_mmio_reg_offset(reg); - enum forcewake_domains fw_domains; - - if (INTEL_GEN(i915) >= 11) { - fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset); - } else if (HAS_FWTABLE(i915)) { - fw_domains = __fwtable_reg_read_fw_domains(uncore, offset); - } else if (INTEL_GEN(i915) >= 6) { - fw_domains = __gen6_reg_read_fw_domains(uncore, offset); - } else { - /* on devices with FW we expect to hit one of the above cases */ - if (intel_uncore_has_forcewake(uncore)) - MISSING_CASE(INTEL_GEN(i915)); - - fw_domains = 0; - } - - WARN_ON(fw_domains & ~uncore->fw_domains); - - return fw_domains; -} - -static enum forcewake_domains -intel_uncore_forcewake_for_write(struct intel_uncore *uncore, - i915_reg_t reg) -{ - struct drm_i915_private *i915 = uncore_to_i915(uncore); - u32 offset = i915_mmio_reg_offset(reg); - enum forcewake_domains fw_domains; - - if (INTEL_GEN(i915) >= 11) { - fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset); - } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) { - fw_domains = __fwtable_reg_write_fw_domains(uncore, offset); - } else if (IS_GEN(i915, 8)) { - fw_domains = __gen8_reg_write_fw_domains(uncore, offset); - } else if (IS_GEN_RANGE(i915, 6, 7)) { - fw_domains = FORCEWAKE_RENDER; - } else { - /* on devices with FW we expect to hit one of the above cases */ - if (intel_uncore_has_forcewake(uncore)) - MISSING_CASE(INTEL_GEN(i915)); - - fw_domains = 0; - } - - WARN_ON(fw_domains & ~uncore->fw_domains); - - return fw_domains; -} - /** * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access * a register @@ -1953,10 +1915,12 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, return 0; if (op & FW_REG_READ) - fw_domains = intel_uncore_forcewake_for_read(uncore, reg); + fw_domains = uncore->funcs.read_fw_domains(uncore, reg); if (op & FW_REG_WRITE) - fw_domains |= intel_uncore_forcewake_for_write(uncore, reg); + fw_domains |= uncore->funcs.write_fw_domains(uncore, reg); + + WARN_ON(fw_domains & ~uncore->fw_domains); return fw_domains; } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 804a0faacc91..4afde0c44ffe 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -70,6 +70,11 @@ struct intel_uncore_funcs { void (*force_wake_put)(struct intel_uncore *uncore, enum forcewake_domains domains); + enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore, + i915_reg_t r); + enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore, + i915_reg_t r); + u8 (*mmio_readb)(struct intel_uncore *uncore, i915_reg_t r, bool trace); u16 (*mmio_readw)(struct intel_uncore *uncore, diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c index ff8999c63a12..49585f16d4a2 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.c +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c @@ -41,6 +41,6 @@ __nop_read(64) void mock_uncore_init(struct intel_uncore *uncore) { - ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop); - ASSIGN_READ_MMIO_VFUNCS(uncore, nop); + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop); } -- cgit v1.2.3 From 19e0a8d454c3e581a877071a87b349a8bf6b9a7c Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 19 Jun 2019 18:00:17 -0700 Subject: drm/i915: kill uncore_sanitize uncore_sanitize performs no action on the uncore structure and just calls intel_sanitize_gt_powersave, so we can just call the latter directly. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Jani Nikula Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-3-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 12 ++++++++++-- drivers/gpu/drm/i915/intel_uncore.c | 9 --------- drivers/gpu/drm/i915/intel_uncore.h | 1 - 3 files changed, 10 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f62e3397d936..458784fd556e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1629,7 +1629,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - intel_uncore_sanitize(dev_priv); + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + intel_sanitize_gt_powersave(dev_priv); intel_gt_init_workarounds(dev_priv); @@ -1921,6 +1922,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_hw: i915_driver_cleanup_hw(dev_priv); i915_ggtt_cleanup_hw(dev_priv); + + /* Paranoia: make sure we have disabled everything before we exit. */ + intel_sanitize_gt_powersave(dev_priv); out_cleanup_mmio: i915_driver_cleanup_mmio(dev_priv); out_runtime_pm_put: @@ -1991,6 +1995,10 @@ static void i915_driver_release(struct drm_device *dev) i915_gem_fini(dev_priv); i915_ggtt_cleanup_hw(dev_priv); + + /* Paranoia: make sure we have disabled everything before we exit. */ + intel_sanitize_gt_powersave(dev_priv); + i915_driver_cleanup_mmio(dev_priv); enable_rpm_wakeref_asserts(rpm); @@ -2357,7 +2365,7 @@ static int i915_drm_resume_early(struct drm_device *dev) hsw_disable_pc8(dev_priv); } - intel_uncore_sanitize(dev_priv); + intel_sanitize_gt_powersave(dev_priv); intel_power_domains_resume(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8e5716bc53e2..63bdadacadcc 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -537,12 +537,6 @@ void intel_uncore_runtime_resume(struct intel_uncore *uncore) iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } -void intel_uncore_sanitize(struct drm_i915_private *dev_priv) -{ - /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_sanitize_gt_powersave(dev_priv); -} - static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { @@ -1664,9 +1658,6 @@ void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) void intel_uncore_fini_mmio(struct intel_uncore *uncore) { - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(uncore_to_i915(uncore)); - iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( &uncore->pmic_bus_access_nb); diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 4afde0c44ffe..94c00d3778b1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -182,7 +182,6 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore) return uncore->flags & UNCORE_HAS_FIFO; } -void intel_uncore_sanitize(struct drm_i915_private *dev_priv); void intel_uncore_init_early(struct intel_uncore *uncore); int intel_uncore_init_mmio(struct intel_uncore *uncore); void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); -- cgit v1.2.3 From 013857580bbece05a89d6a323799960ffaefdb8c Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 19 Jun 2019 18:00:18 -0700 Subject: drm/i915: kill uncore_to_i915 Let's get rid of it before it proliferates, since with split GT/Display uncores the container_of won't work anymore. I've kept the rpm pointer as well to minimize the pointer chasing in the MMIO accessors. v2: swap parameter order for intel_uncore_init_early (Tvrtko) Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-4-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 5 ----- drivers/gpu/drm/i915/intel_uncore.c | 24 ++++++++++++------------ drivers/gpu/drm/i915/intel_uncore.h | 4 +++- 4 files changed, 16 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 458784fd556e..00b6512cdee6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -900,7 +900,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) intel_device_info_subplatform_init(dev_priv); - intel_uncore_init_early(&dev_priv->uncore); + intel_uncore_init_early(&dev_priv->uncore, dev_priv); spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bc909ec5d9c3..2734f62ab19a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1950,11 +1950,6 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) return container_of(huc, struct drm_i915_private, huc); } -static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore) -{ - return container_of(uncore, struct drm_i915_private, uncore); -} - /* Simple iterator over all initialised engines */ #define for_each_engine(engine__, dev_priv__, id__) \ for ((id__) = 0; \ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 63bdadacadcc..59cb258312a2 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -322,7 +322,7 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) /* On VLV, FIFO will be shared by both SW and HW. * So, we need to read the FREE_ENTRIES everytime */ - if (IS_VALLEYVIEW(uncore_to_i915(uncore))) + if (IS_VALLEYVIEW(uncore->i915)) n = fifo_free_entries(uncore); else n = uncore->fifo_count; @@ -493,7 +493,7 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(uncore_to_i915(uncore))) { + if (IS_CHERRYVIEW(uncore->i915)) { __raw_uncore_write32(uncore, GTFIFOCTL, __raw_uncore_read32(uncore, GTFIFOCTL) | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | @@ -622,7 +622,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) spin_lock_irq(&uncore->lock); if (!--uncore->user_forcewake.count) { if (intel_uncore_unclaimed_mmio(uncore)) - dev_info(uncore_to_i915(uncore)->drm.dev, + dev_info(uncore->i915->drm.dev, "Invalid mmio detected during user access\n"); uncore->unclaimed_mmio_check = @@ -1346,7 +1346,7 @@ static void fw_domain_fini(struct intel_uncore *uncore, static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct drm_i915_private *i915 = uncore->i915; if (!intel_uncore_has_forcewake(uncore)) return; @@ -1499,7 +1499,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, static int uncore_mmio_setup(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct drm_i915_private *i915 = uncore->i915; struct pci_dev *pdev = i915->drm.pdev; int mmio_bar; int mmio_size; @@ -1529,20 +1529,22 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) static void uncore_mmio_cleanup(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = uncore->i915->drm.pdev; pci_iounmap(pdev, uncore->regs); } -void intel_uncore_init_early(struct intel_uncore *uncore) +void intel_uncore_init_early(struct intel_uncore *uncore, + struct drm_i915_private *i915) { spin_lock_init(&uncore->lock); + uncore->i915 = i915; + uncore->rpm = &i915->runtime_pm; } int intel_uncore_init_mmio(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct drm_i915_private *i915 = uncore->i915; int ret; ret = uncore_mmio_setup(uncore); @@ -1561,8 +1563,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; - uncore->rpm = &i915->runtime_pm; - if (!intel_uncore_has_forcewake(uncore)) { if (IS_GEN(i915, 5)) { ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); @@ -1627,7 +1627,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) */ void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct drm_i915_private *i915 = uncore->i915; if (INTEL_GEN(i915) >= 11) { enum forcewake_domains fw_domains = uncore->fw_domains; diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 94c00d3778b1..59505a2f9097 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -102,6 +102,7 @@ struct intel_forcewake_range { struct intel_uncore { void __iomem *regs; + struct drm_i915_private *i915; struct intel_runtime_pm *rpm; spinlock_t lock; /** lock is also taken in irq contexts. */ @@ -182,7 +183,8 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore) return uncore->flags & UNCORE_HAS_FIFO; } -void intel_uncore_init_early(struct intel_uncore *uncore); +void intel_uncore_init_early(struct intel_uncore *uncore, + struct drm_i915_private *i915); int intel_uncore_init_mmio(struct intel_uncore *uncore); void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); -- cgit v1.2.3 From 2e81bc61d4e3ee4592b2a1d341696fe4fe3fc5eb Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 19 Jun 2019 18:00:19 -0700 Subject: drm/i915: skip forcewake actions on forcewake-less uncore We always call some of the setup/cleanup functions for forcewake, even if the feature is not actually available. Skipping these operations if forcewake is not available saves us some operations on older gens and prepares us for having a forcewake-less display uncore. v2: do not make suspend/resume functions forcewake-specific (Chris, Tvrtko), use GEM_BUG_ON in internal forcewake-only functions (Tvrtko) Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-5-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 143 ++++++++++++++++++++++-------------- 1 file changed, 87 insertions(+), 56 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 59cb258312a2..00bf5e085a2c 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -485,12 +485,10 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore) return ret; } -static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, - unsigned int restore_forcewake) +static void forcewake_early_sanitize(struct intel_uncore *uncore, + unsigned int restore_forcewake) { - /* clear out unclaimed reg detection bit */ - if (check_for_unclaimed_mmio(uncore)) - DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); /* WaDisableShadowRegForCpd:chv */ if (IS_CHERRYVIEW(uncore->i915)) { @@ -515,6 +513,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, void intel_uncore_suspend(struct intel_uncore *uncore) { + if (!intel_uncore_has_forcewake(uncore)) + return; + iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( &uncore->pmic_bus_access_nb); @@ -526,14 +527,23 @@ void intel_uncore_resume_early(struct intel_uncore *uncore) { unsigned int restore_forcewake; + if (intel_uncore_unclaimed_mmio(uncore)) + DRM_DEBUG("unclaimed mmio detected on resume, clearing\n"); + + if (!intel_uncore_has_forcewake(uncore)) + return; + restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); - __intel_uncore_early_sanitize(uncore, restore_forcewake); + forcewake_early_sanitize(uncore, restore_forcewake); iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } void intel_uncore_runtime_resume(struct intel_uncore *uncore) { + if (!intel_uncore_has_forcewake(uncore)) + return; + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } @@ -1348,8 +1358,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; - if (!intel_uncore_has_forcewake(uncore)) - return; + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); if (INTEL_GEN(i915) >= 11) { int i; @@ -1542,36 +1551,29 @@ void intel_uncore_init_early(struct intel_uncore *uncore, uncore->rpm = &i915->runtime_pm; } -int intel_uncore_init_mmio(struct intel_uncore *uncore) +static void uncore_raw_init(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = uncore->i915; - int ret; + GEM_BUG_ON(intel_uncore_has_forcewake(uncore)); - ret = uncore_mmio_setup(uncore); - if (ret) - return ret; + if (IS_GEN(uncore->i915, 5)) { + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); + } else { + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); + ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); + } +} - i915_check_vgpu(i915); +static void uncore_forcewake_init(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore->i915; - if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) - uncore->flags |= UNCORE_HAS_FORCEWAKE; + GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); intel_uncore_fw_domains_init(uncore); - __intel_uncore_early_sanitize(uncore, 0); - - uncore->unclaimed_mmio_check = 1; - uncore->pmic_bus_access_nb.notifier_call = - i915_pmic_bus_access_notifier; + forcewake_early_sanitize(uncore, 0); - if (!intel_uncore_has_forcewake(uncore)) { - if (IS_GEN(i915, 5)) { - ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5); - ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5); - } else { - ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2); - ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2); - } - } else if (IS_GEN_RANGE(i915, 6, 7)) { + if (IS_GEN_RANGE(i915, 6, 7)) { ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); if (IS_VALLEYVIEW(i915)) { @@ -1585,7 +1587,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); - } else { ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); @@ -1600,6 +1601,31 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); } + uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); +} + +int intel_uncore_init_mmio(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore->i915; + int ret; + + ret = uncore_mmio_setup(uncore); + if (ret) + return ret; + + i915_check_vgpu(i915); + + if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) + uncore->flags |= UNCORE_HAS_FORCEWAKE; + + uncore->unclaimed_mmio_check = 1; + + if (!intel_uncore_has_forcewake(uncore)) + uncore_raw_init(uncore); + else + uncore_forcewake_init(uncore); + /* make sure fw funcs are set if and only if we have fw*/ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); @@ -1615,7 +1641,9 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) if (IS_GEN_RANGE(i915, 6, 7)) uncore->flags |= UNCORE_HAS_FIFO; - iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); + /* clear out unclaimed reg detection bit */ + if (check_for_unclaimed_mmio(uncore)) + DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); return 0; } @@ -1628,41 +1656,44 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; + enum forcewake_domains fw_domains = uncore->fw_domains; + enum forcewake_domain_id domain_id; + int i; - if (INTEL_GEN(i915) >= 11) { - enum forcewake_domains fw_domains = uncore->fw_domains; - enum forcewake_domain_id domain_id; - int i; + if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11) + return; - for (i = 0; i < I915_MAX_VCS; i++) { - domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; + for (i = 0; i < I915_MAX_VCS; i++) { + domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; - if (HAS_ENGINE(i915, _VCS(i))) - continue; + if (HAS_ENGINE(i915, _VCS(i))) + continue; - if (fw_domains & BIT(domain_id)) - fw_domain_fini(uncore, domain_id); - } + if (fw_domains & BIT(domain_id)) + fw_domain_fini(uncore, domain_id); + } - for (i = 0; i < I915_MAX_VECS; i++) { - domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; + for (i = 0; i < I915_MAX_VECS; i++) { + domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; - if (HAS_ENGINE(i915, _VECS(i))) - continue; + if (HAS_ENGINE(i915, _VECS(i))) + continue; - if (fw_domains & BIT(domain_id)) - fw_domain_fini(uncore, domain_id); - } + if (fw_domains & BIT(domain_id)) + fw_domain_fini(uncore, domain_id); } } void intel_uncore_fini_mmio(struct intel_uncore *uncore) { - iosf_mbi_punit_acquire(); - iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( - &uncore->pmic_bus_access_nb); - intel_uncore_forcewake_reset(uncore); - iosf_mbi_punit_release(); + if (intel_uncore_has_forcewake(uncore)) { + iosf_mbi_punit_acquire(); + iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( + &uncore->pmic_bus_access_nb); + intel_uncore_forcewake_reset(uncore); + iosf_mbi_punit_release(); + } + uncore_mmio_cleanup(uncore); } -- cgit v1.2.3 From f833cdb06bf7cea6827d67b73cb24da335b70266 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 19 Jun 2019 18:00:20 -0700 Subject: drm/i915: dynamically allocate forcewake domains We'd like to introduce a display uncore with no forcewake domains, so let's avoid wasting memory and be ready to allocate only what we need. Even without multiple uncore, we still don't need all the domains on all gens. v2: avoid hidden control flow, improve checks (Tvrtko), fix IVB special case, add failure injection point Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-6-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 101 ++++++++++++++++++++++++++---------- drivers/gpu/drm/i915/intel_uncore.h | 13 ++--- 2 files changed, 77 insertions(+), 37 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 00bf5e085a2c..2bd602a41bb7 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -344,7 +344,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) { struct intel_uncore_forcewake_domain *domain = container_of(timer, struct intel_uncore_forcewake_domain, timer); - struct intel_uncore *uncore = forcewake_domain_to_uncore(domain); + struct intel_uncore *uncore = domain->uncore; unsigned long irqflags; assert_rpm_device_not_suspended(uncore->rpm); @@ -1293,23 +1293,27 @@ do { \ (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \ } while (0) -static void fw_domain_init(struct intel_uncore *uncore, - enum forcewake_domain_id domain_id, - i915_reg_t reg_set, - i915_reg_t reg_ack) +static int __fw_domain_init(struct intel_uncore *uncore, + enum forcewake_domain_id domain_id, + i915_reg_t reg_set, + i915_reg_t reg_ack) { struct intel_uncore_forcewake_domain *d; - if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) - return; + GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); + GEM_BUG_ON(uncore->fw_domain[domain_id]); - d = &uncore->fw_domain[domain_id]; + if (i915_inject_load_failure()) + return -ENOMEM; - WARN_ON(d->wake_count); + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; WARN_ON(!i915_mmio_reg_valid(reg_set)); WARN_ON(!i915_mmio_reg_valid(reg_ack)); + d->uncore = uncore; d->wake_count = 0; d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); @@ -1326,7 +1330,6 @@ static void fw_domain_init(struct intel_uncore *uncore, BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0)); BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1)); - d->mask = BIT(domain_id); hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -1335,6 +1338,10 @@ static void fw_domain_init(struct intel_uncore *uncore, uncore->fw_domains |= BIT(domain_id); fw_domain_reset(d); + + uncore->fw_domain[domain_id] = d; + + return 0; } static void fw_domain_fini(struct intel_uncore *uncore, @@ -1342,29 +1349,41 @@ static void fw_domain_fini(struct intel_uncore *uncore, { struct intel_uncore_forcewake_domain *d; - if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) - return; + GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); - d = &uncore->fw_domain[domain_id]; + d = fetch_and_zero(&uncore->fw_domain[domain_id]); + if (!d) + return; + uncore->fw_domains &= ~BIT(domain_id); WARN_ON(d->wake_count); WARN_ON(hrtimer_cancel(&d->timer)); - memset(d, 0, sizeof(*d)); + kfree(d); +} - uncore->fw_domains &= ~BIT(domain_id); +static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore) +{ + struct intel_uncore_forcewake_domain *d; + int tmp; + + for_each_fw_domain(d, uncore, tmp) + fw_domain_fini(uncore, d->id); } -static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) +static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; + int ret = 0; GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); +#define fw_domain_init(uncore__, id__, set__, ack__) \ + (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) + if (INTEL_GEN(i915) >= 11) { int i; - uncore->funcs.force_wake_get = - fw_domains_get_with_fallback; + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; uncore->funcs.force_wake_put = fw_domains_put; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, @@ -1372,6 +1391,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, FORCEWAKE_BLITTER_GEN9, FORCEWAKE_ACK_BLITTER_GEN9); + for (i = 0; i < I915_MAX_VCS; i++) { if (!HAS_ENGINE(i915, _VCS(i))) continue; @@ -1389,8 +1409,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); } } else if (IS_GEN_RANGE(i915, 9, 10)) { - uncore->funcs.force_wake_get = - fw_domains_get_with_fallback; + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; uncore->funcs.force_wake_put = fw_domains_put; fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, @@ -1439,8 +1458,10 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) __raw_uncore_write32(uncore, FORCEWAKE, 0); __raw_posting_read(uncore, ECOBUS); - fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, - FORCEWAKE_MT, FORCEWAKE_MT_ACK); + ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, + FORCEWAKE_MT, FORCEWAKE_MT_ACK); + if (ret) + goto out; spin_lock_irq(&uncore->lock); fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); @@ -1451,6 +1472,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); DRM_INFO("when using vblank-synced partial screen updates.\n"); + fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER); fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } @@ -1462,8 +1484,16 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE, FORCEWAKE_ACK); } +#undef fw_domain_init + /* All future platforms are expected to require complex power gating */ - WARN_ON(uncore->fw_domains == 0); + WARN_ON(!ret && uncore->fw_domains == 0); + +out: + if (ret) + intel_uncore_fw_domains_fini(uncore); + + return ret; } #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ @@ -1564,13 +1594,17 @@ static void uncore_raw_init(struct intel_uncore *uncore) } } -static void uncore_forcewake_init(struct intel_uncore *uncore) +static int uncore_forcewake_init(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; + int ret; GEM_BUG_ON(!intel_uncore_has_forcewake(uncore)); - intel_uncore_fw_domains_init(uncore); + ret = intel_uncore_fw_domains_init(uncore); + if (ret) + return ret; + forcewake_early_sanitize(uncore, 0); if (IS_GEN_RANGE(i915, 6, 7)) { @@ -1603,6 +1637,8 @@ static void uncore_forcewake_init(struct intel_uncore *uncore) uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); + + return 0; } int intel_uncore_init_mmio(struct intel_uncore *uncore) @@ -1621,10 +1657,13 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) uncore->unclaimed_mmio_check = 1; - if (!intel_uncore_has_forcewake(uncore)) + if (!intel_uncore_has_forcewake(uncore)) { uncore_raw_init(uncore); - else - uncore_forcewake_init(uncore); + } else { + ret = uncore_forcewake_init(uncore); + if (ret) + goto out_mmio_cleanup; + } /* make sure fw funcs are set if and only if we have fw*/ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); @@ -1646,6 +1685,11 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); return 0; + +out_mmio_cleanup: + uncore_mmio_cleanup(uncore); + + return ret; } /* @@ -1691,6 +1735,7 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore) iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( &uncore->pmic_bus_access_nb); intel_uncore_forcewake_reset(uncore); + intel_uncore_fw_domains_fini(uncore); iosf_mbi_punit_release(); } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 59505a2f9097..7108475d9b24 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -126,6 +126,7 @@ struct intel_uncore { enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ struct intel_uncore_forcewake_domain { + struct intel_uncore *uncore; enum forcewake_domain_id id; enum forcewake_domains mask; unsigned int wake_count; @@ -133,7 +134,7 @@ struct intel_uncore { struct hrtimer timer; u32 __iomem *reg_set; u32 __iomem *reg_ack; - } fw_domain[FW_DOMAIN_ID_COUNT]; + } *fw_domain[FW_DOMAIN_ID_COUNT]; struct { unsigned int count; @@ -147,18 +148,12 @@ struct intel_uncore { /* Iterate over initialised fw domains */ #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \ - for (tmp__ = (mask__); \ - tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) + for (tmp__ = (mask__); tmp__ ;) \ + for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)]) #define for_each_fw_domain(domain__, uncore__, tmp__) \ for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__) -static inline struct intel_uncore * -forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d) -{ - return container_of(d, struct intel_uncore, fw_domain[d->id]); -} - static inline bool intel_uncore_has_forcewake(const struct intel_uncore *uncore) { -- cgit v1.2.3 From 9e138ea1bdb1d1c1f1c0b74f022f886196abf625 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 19 Jun 2019 18:00:21 -0700 Subject: drm/i915/gvt: decouple check_vgpu() from uncore_init() With multiple uncore to initialize (GT vs Display), it makes little sense to have the vgpu_check inside uncore_init(). We also have a catch-22 scenario where the uncore is required to read the vgpu capabilities while the vgpu capabilities are required to decide if we need to initialize forcewake support. To remove this circular dependency, we can perform the required MMIO access by mmapping just the vgtif shared page in mmio space and use raw accessors. v2: rename check_vgpu to detect_vgpu (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Zhenyu Wang Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-7-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/i915_pvinfo.h | 5 +++-- drivers/gpu/drm/i915/i915_vgpu.c | 35 +++++++++++++++++++++++++++-------- drivers/gpu/drm/i915/i915_vgpu.h | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 2 -- 5 files changed, 33 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 00b6512cdee6..8f84ed26e972 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1899,6 +1899,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + i915_detect_vgpu(dev_priv); + ret = i915_driver_init_mmio(dev_priv); if (ret < 0) goto out_runtime_pm_put; diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index 969e514916ab..ca4661e98f79 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -110,8 +110,9 @@ struct vgt_if { u32 rsv7[0x200 - 24]; /* pad to one page */ } __packed; -#define vgtif_reg(x) \ - _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x))) +#define vgtif_offset(x) (offsetof(struct vgt_if, x)) + +#define vgtif_reg(x) _MMIO(VGT_PVINFO_PAGE + vgtif_offset(x)) /* vGPU display status to be used by the host side */ #define VGT_DRV_DISPLAY_NOT_READY 0 diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 94d3992b599d..1a8f7c731126 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -52,34 +52,53 @@ */ /** - * i915_check_vgpu - detect virtual GPU + * i915_detect_vgpu - detect virtual GPU * @dev_priv: i915 device private * * This function is called at the initialization stage, to detect whether * running on a vGPU. */ -void i915_check_vgpu(struct drm_i915_private *dev_priv) +void i915_detect_vgpu(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct pci_dev *pdev = dev_priv->drm.pdev; u64 magic; u16 version_major; + void __iomem *shared_area; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - magic = __raw_uncore_read64(uncore, vgtif_reg(magic)); - if (magic != VGT_MAGIC) + /* + * This is called before we setup the main MMIO BAR mappings used via + * the uncore structure, so we need to access the BAR directly. Since + * we do not support VGT on older gens, return early so we don't have + * to consider differently numbered or sized MMIO bars + */ + if (INTEL_GEN(dev_priv) < 6) + return; + + shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE); + if (!shared_area) { + DRM_ERROR("failed to map MMIO bar to check for VGT\n"); return; + } + + magic = readq(shared_area + vgtif_offset(magic)); + if (magic != VGT_MAGIC) + goto out; - version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major)); + version_major = readw(shared_area + vgtif_offset(version_major)); if (version_major < VGT_VERSION_MAJOR) { DRM_INFO("VGT interface version mismatch!\n"); - return; + goto out; } - dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps)); + dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps)); dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); + +out: + pci_iounmap(pdev, shared_area); } bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index ebe1b7bced98..a919735fb6ce 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -26,7 +26,7 @@ #include "i915_pvinfo.h" -void i915_check_vgpu(struct drm_i915_private *dev_priv); +void i915_detect_vgpu(struct drm_i915_private *dev_priv); bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 2bd602a41bb7..68d54e126d79 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1650,8 +1650,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) if (ret) return ret; - i915_check_vgpu(i915); - if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) uncore->flags |= UNCORE_HAS_FORCEWAKE; -- cgit v1.2.3 From 22b7a426bbe1ebe1520f92da4cd1617d1e1b5fc4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 20 Jun 2019 15:20:51 +0100 Subject: drm/i915/execlists: Preempt-to-busy When using a global seqno, we required a precise stop-the-workd event to handle preemption and unwind the global seqno counter. To accomplish this, we would preempt to a special out-of-band context and wait for the machine to report that it was idle. Given an idle machine, we could very precisely see which requests had completed and which we needed to feed back into the run queue. However, now that we have scrapped the global seqno, we no longer need to precisely unwind the global counter and only track requests by their per-context seqno. This allows us to loosely unwind inflight requests while scheduling a preemption, with the enormous caveat that the requests we put back on the run queue are still _inflight_ (until the preemption request is complete). This makes request tracking much more messy, as at any point then we can see a completed request that we believe is not currently scheduled for execution. We also have to be careful not to rewind RING_TAIL past RING_HEAD on preempting to the running context, and for this we use a semaphore to prevent completion of the request before continuing. To accomplish this feat, we change how we track requests scheduled to the HW. Instead of appending our requests onto a single list as we submit, we track each submission to ELSP as its own block. Then upon receiving the CS preemption event, we promote the pending block to the inflight block (discarding what was previously being tracked). As normal CS completion events arrive, we then remove stale entries from the inflight tracker. v2: Be a tinge paranoid and ensure we flush the write into the HWS page for the GPU semaphore to pick in a timely fashion. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190620142052.19311-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_context_types.h | 5 + drivers/gpu/drm/i915/gt/intel_engine.h | 61 +-- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 63 ++- drivers/gpu/drm/i915/gt/intel_engine_types.h | 60 +-- drivers/gpu/drm/i915/gt/intel_lrc.c | 703 ++++++++++++-------------- drivers/gpu/drm/i915/i915_gpu_error.c | 19 +- drivers/gpu/drm/i915/i915_request.c | 6 + drivers/gpu/drm/i915/i915_request.h | 1 + drivers/gpu/drm/i915/i915_scheduler.c | 3 +- drivers/gpu/drm/i915/i915_utils.h | 12 + drivers/gpu/drm/i915/intel_guc_submission.c | 175 +++---- drivers/gpu/drm/i915/selftests/i915_request.c | 8 +- 13 files changed, 508 insertions(+), 610 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 0f2c22a3bcb6..35871c8a42a6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -646,7 +646,7 @@ static void init_contexts(struct drm_i915_private *i915) static bool needs_preempt_context(struct drm_i915_private *i915) { - return HAS_EXECLISTS(i915); + return USES_GUC_SUBMISSION(i915); } int i915_gem_contexts_init(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 08049ee91cee..4c0e211c715d 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -13,6 +13,7 @@ #include #include "i915_active_types.h" +#include "i915_utils.h" #include "intel_engine_types.h" #include "intel_sseu.h" @@ -38,6 +39,10 @@ struct intel_context { struct i915_gem_context *gem_context; struct intel_engine_cs *engine; struct intel_engine_cs *inflight; +#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2) +#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2) +#define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight) +#define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight) struct list_head signal_link; struct list_head signals; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 2f1c6871ee95..9bb6ff76680e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -125,71 +125,26 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) void intel_engines_set_scheduler_caps(struct drm_i915_private *i915); -static inline void -execlists_set_active(struct intel_engine_execlists *execlists, - unsigned int bit) -{ - __set_bit(bit, (unsigned long *)&execlists->active); -} - -static inline bool -execlists_set_active_once(struct intel_engine_execlists *execlists, - unsigned int bit) -{ - return !__test_and_set_bit(bit, (unsigned long *)&execlists->active); -} - -static inline void -execlists_clear_active(struct intel_engine_execlists *execlists, - unsigned int bit) -{ - __clear_bit(bit, (unsigned long *)&execlists->active); -} - -static inline void -execlists_clear_all_active(struct intel_engine_execlists *execlists) +static inline unsigned int +execlists_num_ports(const struct intel_engine_execlists * const execlists) { - execlists->active = 0; + return execlists->port_mask + 1; } -static inline bool -execlists_is_active(const struct intel_engine_execlists *execlists, - unsigned int bit) +static inline struct i915_request * +execlists_active(const struct intel_engine_execlists *execlists) { - return test_bit(bit, (unsigned long *)&execlists->active); + GEM_BUG_ON(execlists->active - execlists->inflight > + execlists_num_ports(execlists)); + return READ_ONCE(*execlists->active); } -void execlists_user_begin(struct intel_engine_execlists *execlists, - const struct execlist_port *port); -void execlists_user_end(struct intel_engine_execlists *execlists); - void execlists_cancel_port_requests(struct intel_engine_execlists * const execlists); struct i915_request * execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists); -static inline unsigned int -execlists_num_ports(const struct intel_engine_execlists * const execlists) -{ - return execlists->port_mask + 1; -} - -static inline struct execlist_port * -execlists_port_complete(struct intel_engine_execlists * const execlists, - struct execlist_port * const port) -{ - const unsigned int m = execlists->port_mask; - - GEM_BUG_ON(port_index(port, execlists) != 0); - GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); - - memmove(port, port + 1, m * sizeof(struct execlist_port)); - memset(port + m, 0, sizeof(struct execlist_port)); - - return port; -} - static inline u32 intel_read_status_page(const struct intel_engine_cs *engine, int reg) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7fd33e81c2d9..d45328e254dc 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -508,6 +508,10 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine) GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); + memset(execlists->pending, 0, sizeof(execlists->pending)); + execlists->active = + memset(execlists->inflight, 0, sizeof(execlists->inflight)); + execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; } @@ -1152,7 +1156,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return true; /* Waiting to drain ELSP? */ - if (READ_ONCE(engine->execlists.active)) { + if (execlists_active(&engine->execlists)) { struct tasklet_struct *t = &engine->execlists.tasklet; synchronize_hardirq(engine->i915->drm.irq); @@ -1169,7 +1173,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) /* Otherwise flush the tasklet if it was on another cpu */ tasklet_unlock_wait(t); - if (READ_ONCE(engine->execlists.active)) + if (execlists_active(&engine->execlists)) return false; } @@ -1367,6 +1371,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } if (HAS_EXECLISTS(dev_priv)) { + struct i915_request * const *port, *rq; const u32 *hws = &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; const u8 num_entries = execlists->csb_size; @@ -1399,27 +1404,33 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } spin_lock_irqsave(&engine->active.lock, flags); - for (idx = 0; idx < execlists_num_ports(execlists); idx++) { - struct i915_request *rq; - unsigned int count; + for (port = execlists->active; (rq = *port); port++) { + char hdr[80]; + int len; + + len = snprintf(hdr, sizeof(hdr), + "\t\tActive[%d: ", + (int)(port - execlists->active)); + if (!i915_request_signaled(rq)) + len += snprintf(hdr + len, sizeof(hdr) - len, + "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ", + i915_ggtt_offset(rq->ring->vma), + rq->timeline->hwsp_offset, + hwsp_seqno(rq)); + snprintf(hdr + len, sizeof(hdr) - len, "rq: "); + print_request(m, rq, hdr); + } + for (port = execlists->pending; (rq = *port); port++) { char hdr[80]; - rq = port_unpack(&execlists->port[idx], &count); - if (!rq) { - drm_printf(m, "\t\tELSP[%d] idle\n", idx); - } else if (!i915_request_signaled(rq)) { - snprintf(hdr, sizeof(hdr), - "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", - idx, count, - i915_ggtt_offset(rq->ring->vma), - rq->timeline->hwsp_offset, - hwsp_seqno(rq)); - print_request(m, rq, hdr); - } else { - print_request(m, rq, "\t\tELSP[%d] rq: "); - } + snprintf(hdr, sizeof(hdr), + "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", + (int)(port - execlists->pending), + i915_ggtt_offset(rq->ring->vma), + rq->timeline->hwsp_offset, + hwsp_seqno(rq)); + print_request(m, rq, hdr); } - drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active); spin_unlock_irqrestore(&engine->active.lock, flags); } else if (INTEL_GEN(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", @@ -1583,15 +1594,19 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine) } if (engine->stats.enabled++ == 0) { - const struct execlist_port *port = execlists->port; - unsigned int num_ports = execlists_num_ports(execlists); + struct i915_request * const *port; + struct i915_request *rq; engine->stats.enabled_at = ktime_get(); /* XXX submission method oblivious? */ - while (num_ports-- && port_isset(port)) { + for (port = execlists->active; (rq = *port); port++) engine->stats.active++; - port++; + + for (port = execlists->pending; (rq = *port); port++) { + /* Exclude any contexts already counted in active */ + if (intel_context_inflight_count(rq->hw_context) == 1) + engine->stats.active++; } if (engine->stats.active) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 43e975a26016..b4f7b81a3c3e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -172,51 +172,28 @@ struct intel_engine_execlists { */ u32 __iomem *ctrl_reg; +#define EXECLIST_MAX_PORTS 2 + /** + * @active: the currently known context executing on HW + */ + struct i915_request * const *active; /** - * @port: execlist port states + * @inflight: the set of contexts submitted and acknowleged by HW * - * For each hardware ELSP (ExecList Submission Port) we keep - * track of the last request and the number of times we submitted - * that port to hw. We then count the number of times the hw reports - * a context completion or preemption. As only one context can - * be active on hw, we limit resubmission of context to port[0]. This - * is called Lite Restore, of the context. + * The set of inflight contexts is managed by reading CS events + * from the HW. On a context-switch event (not preemption), we + * know the HW has transitioned from port0 to port1, and we + * advance our inflight/active tracking accordingly. */ - struct execlist_port { - /** - * @request_count: combined request and submission count - */ - struct i915_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, execlists) ((p) - (execlists)->port) - - /** - * @context_id: context ID for port - */ - GEM_DEBUG_DECL(u32 context_id); - -#define EXECLIST_MAX_PORTS 2 - } port[EXECLIST_MAX_PORTS]; - + struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */]; /** - * @active: is the HW active? We consider the HW as active after - * submitting any context for execution and until we have seen the - * last context completion event. After that, we do not expect any - * more events until we submit, and so can park the HW. + * @pending: the next set of contexts submitted to ELSP * - * As we have a small number of different sources from which we feed - * the HW, we track the state of each inside a single bitfield. + * We store the array of contexts that we submit to HW (via ELSP) and + * promote them to the inflight array once HW has signaled the + * preemption or idle-to-active event. */ - unsigned int active; -#define EXECLISTS_ACTIVE_USER 0 -#define EXECLISTS_ACTIVE_PREEMPT 1 -#define EXECLISTS_ACTIVE_HWACK 2 + struct i915_request *pending[EXECLIST_MAX_PORTS + 1]; /** * @port_mask: number of execlist ports - 1 @@ -257,11 +234,6 @@ struct intel_engine_execlists { */ u32 *csb_status; - /** - * @preempt_complete_status: expected CSB upon completing preemption - */ - u32 preempt_complete_status; - /** * @csb_size: context status buffer FIFO size */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 82b7ace62d97..cb9d285bd00a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -161,6 +161,8 @@ #define GEN8_CTX_STATUS_COMPLETED_MASK \ (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) +#define CTX_DESC_FORCE_RESTORE BIT_ULL(2) + /* Typical size of the average request (2 pipecontrols and a MI_BB) */ #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ #define WA_TAIL_DWORDS 2 @@ -221,6 +223,25 @@ static void execlists_init_reg_state(u32 *reg_state, struct intel_engine_cs *engine, struct intel_ring *ring); +static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_PREEMPT_ADDR); +} + +static inline void +ring_set_paused(const struct intel_engine_cs *engine, int state) +{ + /* + * We inspect HWS_PREEMPT with a semaphore inside + * engine->emit_fini_breadcrumb. If the dword is true, + * the ring is paused as the semaphore will busywait + * until the dword is false. + */ + engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; + wmb(); +} + static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -271,12 +292,6 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, { int last_prio; - if (!engine->preempt_context) - return false; - - if (i915_request_completed(rq)) - return false; - /* * Check if the current priority hint merits a preemption attempt. * @@ -338,9 +353,6 @@ __maybe_unused static inline bool assert_priority_queue(const struct i915_request *prev, const struct i915_request *next) { - const struct intel_engine_execlists *execlists = - &prev->engine->execlists; - /* * Without preemption, the prev may refer to the still active element * which we refuse to let go. @@ -348,7 +360,7 @@ assert_priority_queue(const struct i915_request *prev, * Even with preemption, there are times when we think it is better not * to preempt and leave an ostensibly lower priority request in flight. */ - if (port_request(execlists->port) == prev) + if (i915_request_is_active(prev)) return true; return rq_prio(prev) >= rq_prio(next); @@ -442,13 +454,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) struct intel_engine_cs *owner; if (i915_request_completed(rq)) - break; + continue; /* XXX */ __i915_request_unsubmit(rq); unwind_wa_tail(rq); - GEM_BUG_ON(rq->hw_context->inflight); - /* * Push the request back into the queue for later resubmission. * If this request is not native to this physical engine (i.e. @@ -500,32 +510,32 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status) status, rq); } -inline void -execlists_user_begin(struct intel_engine_execlists *execlists, - const struct execlist_port *port) +static inline struct i915_request * +execlists_schedule_in(struct i915_request *rq, int idx) { - execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER); -} + struct intel_context *ce = rq->hw_context; + int count; -inline void -execlists_user_end(struct intel_engine_execlists *execlists) -{ - execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); -} + trace_i915_request_in(rq, idx); -static inline void -execlists_context_schedule_in(struct i915_request *rq) -{ - GEM_BUG_ON(rq->hw_context->inflight); + count = intel_context_inflight_count(ce); + if (!count) { + intel_context_get(ce); + ce->inflight = rq->engine; + + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); + intel_engine_context_in(ce->inflight); + } + + intel_context_inflight_inc(ce); + GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); - execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); - intel_engine_context_in(rq->engine); - rq->hw_context->inflight = rq->engine; + return i915_request_get(rq); } -static void kick_siblings(struct i915_request *rq) +static void kick_siblings(struct i915_request *rq, struct intel_context *ce) { - struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine); + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); struct i915_request *next = READ_ONCE(ve->request); if (next && next->execution_mask & ~rq->execution_mask) @@ -533,29 +543,42 @@ static void kick_siblings(struct i915_request *rq) } static inline void -execlists_context_schedule_out(struct i915_request *rq, unsigned long status) +execlists_schedule_out(struct i915_request *rq) { - rq->hw_context->inflight = NULL; - intel_engine_context_out(rq->engine); - execlists_context_status_change(rq, status); + struct intel_context *ce = rq->hw_context; + + GEM_BUG_ON(!intel_context_inflight_count(ce)); + trace_i915_request_out(rq); - /* - * If this is part of a virtual engine, its next request may have - * been blocked waiting for access to the active context. We have - * to kick all the siblings again in case we need to switch (e.g. - * the next request is not runnable on this engine). Hopefully, - * we will already have submitted the next request before the - * tasklet runs and do not need to rebuild each virtual tree - * and kick everyone again. - */ - if (rq->engine != rq->hw_context->engine) - kick_siblings(rq); + intel_context_inflight_dec(ce); + if (!intel_context_inflight_count(ce)) { + intel_engine_context_out(ce->inflight); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); + + ce->inflight = NULL; + intel_context_put(ce); + + /* + * If this is part of a virtual engine, its next request may + * have been blocked waiting for access to the active context. + * We have to kick all the siblings again in case we need to + * switch (e.g. the next request is not runnable on this + * engine). Hopefully, we will already have submitted the next + * request before the tasklet runs and do not need to rebuild + * each virtual tree and kick everyone again. + */ + if (rq->engine != ce->engine) + kick_siblings(rq, ce); + } + + i915_request_put(rq); } -static u64 execlists_update_context(struct i915_request *rq) +static u64 execlists_update_context(const struct i915_request *rq) { struct intel_context *ce = rq->hw_context; + u64 desc; ce->lrc_reg_state[CTX_RING_TAIL + 1] = intel_ring_set_tail(rq->ring, rq->tail); @@ -576,7 +599,11 @@ static u64 execlists_update_context(struct i915_request *rq) * wmb). */ mb(); - return ce->lrc_desc; + + desc = ce->lrc_desc; + ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + + return desc; } static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) @@ -590,12 +617,62 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc } } +static __maybe_unused void +trace_ports(const struct intel_engine_execlists *execlists, + const char *msg, + struct i915_request * const *ports) +{ + const struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + + GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n", + engine->name, msg, + ports[0]->fence.context, + ports[0]->fence.seqno, + i915_request_completed(ports[0]) ? "!" : + i915_request_started(ports[0]) ? "*" : + "", + ports[1] ? ports[1]->fence.context : 0, + ports[1] ? ports[1]->fence.seqno : 0); +} + +static __maybe_unused bool +assert_pending_valid(const struct intel_engine_execlists *execlists, + const char *msg) +{ + struct i915_request * const *port, *rq; + struct intel_context *ce = NULL; + + trace_ports(execlists, msg, execlists->pending); + + if (execlists->pending[execlists_num_ports(execlists)]) + return false; + + for (port = execlists->pending; (rq = *port); port++) { + if (ce == rq->hw_context) + return false; + + ce = rq->hw_context; + if (i915_request_completed(rq)) + continue; + + if (i915_active_is_idle(&ce->active)) + return false; + + if (!i915_vma_is_pinned(ce->state)) + return false; + } + + return ce; +} + static void execlists_submit_ports(struct intel_engine_cs *engine) { struct intel_engine_execlists *execlists = &engine->execlists; - struct execlist_port *port = execlists->port; unsigned int n; + GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); + /* * We can skip acquiring intel_runtime_pm_get() here as it was taken * on our behalf by the request (see i915_gem_mark_busy()) and it will @@ -613,38 +690,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) * of elsq entries, keep this in mind before changing the loop below. */ for (n = execlists_num_ports(execlists); n--; ) { - struct i915_request *rq; - unsigned int count; - u64 desc; + struct i915_request *rq = execlists->pending[n]; - rq = port_unpack(&port[n], &count); - if (rq) { - GEM_BUG_ON(count > !n); - if (!count++) - execlists_context_schedule_in(rq); - port_set(&port[n], port_pack(rq, count)); - desc = execlists_update_context(rq); - GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); - - GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", - engine->name, n, - port[n].context_id, count, - rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq), - rq_prio(rq)); - } else { - GEM_BUG_ON(!n); - desc = 0; - } - - write_desc(execlists, desc, n); + write_desc(execlists, + rq ? execlists_update_context(rq) : 0, + n); } /* we need to manually load the submit queue */ if (execlists->ctrl_reg) writel(EL_CTRL_LOAD, execlists->ctrl_reg); - - execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); } static bool ctx_single_port_submission(const struct intel_context *ce) @@ -668,6 +723,7 @@ static bool can_merge_ctx(const struct intel_context *prev, static bool can_merge_rq(const struct i915_request *prev, const struct i915_request *next) { + GEM_BUG_ON(prev == next); GEM_BUG_ON(!assert_priority_queue(prev, next)); if (!can_merge_ctx(prev->hw_context, next->hw_context)) @@ -676,58 +732,6 @@ static bool can_merge_rq(const struct i915_request *prev, return true; } -static void port_assign(struct execlist_port *port, struct i915_request *rq) -{ - GEM_BUG_ON(rq == port_request(port)); - - if (port_isset(port)) - i915_request_put(port_request(port)); - - port_set(port, port_pack(i915_request_get(rq), port_count(port))); -} - -static void inject_preempt_context(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists *execlists = &engine->execlists; - struct intel_context *ce = engine->preempt_context; - unsigned int n; - - GEM_BUG_ON(execlists->preempt_complete_status != - upper_32_bits(ce->lrc_desc)); - - /* - * Switch to our empty preempt context so - * the state of the GPU is known (idle). - */ - GEM_TRACE("%s\n", engine->name); - for (n = execlists_num_ports(execlists); --n; ) - write_desc(execlists, 0, n); - - write_desc(execlists, ce->lrc_desc, n); - - /* we need to manually load the submit queue */ - if (execlists->ctrl_reg) - writel(EL_CTRL_LOAD, execlists->ctrl_reg); - - execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); - execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); - - (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); -} - -static void complete_preempt_context(struct intel_engine_execlists *execlists) -{ - GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); - - if (inject_preempt_hang(execlists)) - return; - - execlists_cancel_port_requests(execlists); - __unwind_incomplete_requests(container_of(execlists, - struct intel_engine_cs, - execlists)); -} - static void virtual_update_register_offsets(u32 *regs, struct intel_engine_cs *engine) { @@ -792,7 +796,7 @@ static bool virtual_matches(const struct virtual_engine *ve, * we reuse the register offsets). This is a very small * hystersis on the greedy seelction algorithm. */ - inflight = READ_ONCE(ve->context.inflight); + inflight = intel_context_inflight(&ve->context); if (inflight && inflight != engine) return false; @@ -815,13 +819,23 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, spin_unlock(&old->breadcrumbs.irq_lock); } +static struct i915_request * +last_active(const struct intel_engine_execlists *execlists) +{ + struct i915_request * const *last = execlists->active; + + while (*last && i915_request_completed(*last)) + last++; + + return *last; +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - const struct execlist_port * const last_port = - &execlists->port[execlists->port_mask]; - struct i915_request *last = port_request(port); + struct i915_request **port = execlists->pending; + struct i915_request ** const last_port = port + execlists->port_mask; + struct i915_request *last; struct rb_node *rb; bool submit = false; @@ -867,65 +881,72 @@ static void execlists_dequeue(struct intel_engine_cs *engine) break; } + /* + * If the queue is higher priority than the last + * request in the currently active context, submit afresh. + * We will resubmit again afterwards in case we need to split + * the active context to interject the preemption request, + * i.e. we will retrigger preemption following the ack in case + * of trouble. + */ + last = last_active(execlists); if (last) { - /* - * Don't resubmit or switch until all outstanding - * preemptions (lite-restore) are seen. Then we - * know the next preemption status we see corresponds - * to this ELSP update. - */ - GEM_BUG_ON(!execlists_is_active(execlists, - EXECLISTS_ACTIVE_USER)); - GEM_BUG_ON(!port_count(&port[0])); - - /* - * If we write to ELSP a second time before the HW has had - * a chance to respond to the previous write, we can confuse - * the HW and hit "undefined behaviour". After writing to ELSP, - * we must then wait until we see a context-switch event from - * the HW to indicate that it has had a chance to respond. - */ - if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) - return; - if (need_preempt(engine, last, rb)) { - inject_preempt_context(engine); - return; - } + GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n", + engine->name, + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); + /* + * Don't let the RING_HEAD advance past the breadcrumb + * as we unwind (and until we resubmit) so that we do + * not accidentally tell it to go backwards. + */ + ring_set_paused(engine, 1); - /* - * In theory, we could coalesce more requests onto - * the second port (the first port is active, with - * no preemptions pending). However, that means we - * then have to deal with the possible lite-restore - * of the second port (as we submit the ELSP, there - * may be a context-switch) but also we may complete - * the resubmission before the context-switch. Ergo, - * coalescing onto the second port will cause a - * preemption event, but we cannot predict whether - * that will affect port[0] or port[1]. - * - * If the second port is already active, we can wait - * until the next context-switch before contemplating - * new requests. The GPU will be busy and we should be - * able to resubmit the new ELSP before it idles, - * avoiding pipeline bubbles (momentary pauses where - * the driver is unable to keep up the supply of new - * work). However, we have to double check that the - * priorities of the ports haven't been switch. - */ - if (port_count(&port[1])) - return; + /* + * Note that we have not stopped the GPU at this point, + * so we are unwinding the incomplete requests as they + * remain inflight and so by the time we do complete + * the preemption, some of the unwound requests may + * complete! + */ + __unwind_incomplete_requests(engine); - /* - * WaIdleLiteRestore:bdw,skl - * Apply the wa NOOPs to prevent - * ring:HEAD == rq:TAIL as we resubmit the - * request. See gen8_emit_fini_breadcrumb() for - * where we prepare the padding after the - * end of the request. - */ - last->tail = last->wa_tail; + /* + * If we need to return to the preempted context, we + * need to skip the lite-restore and force it to + * reload the RING_TAIL. Otherwise, the HW has a + * tendency to ignore us rewinding the TAIL to the + * end of an earlier request. + */ + last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; + last = NULL; + } else { + /* + * Otherwise if we already have a request pending + * for execution after the current one, we can + * just wait until the next CS event before + * queuing more. In either case we will force a + * lite-restore preemption event, but if we wait + * we hopefully coalesce several updates into a single + * submission. + */ + if (!list_is_last(&last->sched.link, + &engine->active.requests)) + return; + + /* + * WaIdleLiteRestore:bdw,skl + * Apply the wa NOOPs to prevent + * ring:HEAD == rq:TAIL as we resubmit the + * request. See gen8_emit_fini_breadcrumb() for + * where we prepare the padding after the + * end of the request. + */ + last->tail = last->wa_tail; + } } while (rb) { /* XXX virtual is always taking precedence */ @@ -955,9 +976,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine) continue; } + if (i915_request_completed(rq)) { + ve->request = NULL; + ve->base.execlists.queue_priority_hint = INT_MIN; + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + + rq->engine = engine; + __i915_request_submit(rq); + + spin_unlock(&ve->base.active.lock); + + rb = rb_first_cached(&execlists->virtual); + continue; + } + if (last && !can_merge_rq(last, rq)) { spin_unlock(&ve->base.active.lock); - return; /* leave this rq for another engine */ + return; /* leave this for another */ } GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n", @@ -1006,9 +1042,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } __i915_request_submit(rq); - trace_i915_request_in(rq, port_index(port, execlists)); - submit = true; - last = rq; + if (!i915_request_completed(rq)) { + submit = true; + last = rq; + } } spin_unlock(&ve->base.active.lock); @@ -1021,6 +1058,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { + if (i915_request_completed(rq)) + goto skip; + /* * Can we combine this request with the current port? * It has to be the same context/ringbuffer and not @@ -1060,19 +1100,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ctx_single_port_submission(rq->hw_context)) goto done; - - if (submit) - port_assign(port, last); + *port = execlists_schedule_in(last, port - execlists->pending); port++; - - GEM_BUG_ON(port_isset(port)); } - __i915_request_submit(rq); - trace_i915_request_in(rq, port_index(port, execlists)); - last = rq; submit = true; +skip: + __i915_request_submit(rq); } rb_erase_cached(&p->node, &execlists->queue); @@ -1097,54 +1132,30 @@ done: * interrupt for secondary ports). */ execlists->queue_priority_hint = queue_prio(execlists); + GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n", + engine->name, execlists->queue_priority_hint, + yesno(submit)); if (submit) { - port_assign(port, last); + *port = execlists_schedule_in(last, port - execlists->pending); + memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists_submit_ports(engine); } - - /* We must always keep the beast fed if we have work piled up */ - GEM_BUG_ON(rb_first_cached(&execlists->queue) && - !port_isset(execlists->port)); - - /* Re-evaluate the executing context setup after each preemptive kick */ - if (last) - execlists_user_begin(execlists, execlists->port); - - /* If the engine is now idle, so should be the flag; and vice versa. */ - GEM_BUG_ON(execlists_is_active(&engine->execlists, - EXECLISTS_ACTIVE_USER) == - !port_isset(engine->execlists.port)); } void execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) { - struct execlist_port *port = execlists->port; - unsigned int num_ports = execlists_num_ports(execlists); - - while (num_ports-- && port_isset(port)) { - struct i915_request *rq = port_request(port); + struct i915_request * const *port, *rq; - GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n", - rq->engine->name, - (unsigned int)(port - execlists->port), - rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq)); + for (port = execlists->pending; (rq = *port); port++) + execlists_schedule_out(rq); + memset(execlists->pending, 0, sizeof(execlists->pending)); - GEM_BUG_ON(!execlists->active); - execlists_context_schedule_out(rq, - i915_request_completed(rq) ? - INTEL_CONTEXT_SCHEDULE_OUT : - INTEL_CONTEXT_SCHEDULE_PREEMPTED); - - i915_request_put(rq); - - memset(port, 0, sizeof(*port)); - port++; - } - - execlists_clear_all_active(execlists); + for (port = execlists->active; (rq = *port); port++) + execlists_schedule_out(rq); + execlists->active = + memset(execlists->inflight, 0, sizeof(execlists->inflight)); } static inline void @@ -1163,7 +1174,6 @@ reset_in_progress(const struct intel_engine_execlists *execlists) static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; const u32 * const buf = execlists->csb_status; const u8 num_entries = execlists->csb_size; u8 head, tail; @@ -1198,9 +1208,7 @@ static void process_csb(struct intel_engine_cs *engine) rmb(); do { - struct i915_request *rq; unsigned int status; - unsigned int count; if (++head == num_entries) head = 0; @@ -1223,68 +1231,38 @@ static void process_csb(struct intel_engine_cs *engine) * status notifier. */ - GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", + GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n", engine->name, head, - buf[2 * head + 0], buf[2 * head + 1], - execlists->active); + buf[2 * head + 0], buf[2 * head + 1]); status = buf[2 * head]; - if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | - GEN8_CTX_STATUS_PREEMPTED)) - execlists_set_active(execlists, - EXECLISTS_ACTIVE_HWACK); - if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) - execlists_clear_active(execlists, - EXECLISTS_ACTIVE_HWACK); - - if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) - continue; - - /* We should never get a COMPLETED | IDLE_ACTIVE! */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); - - if (status & GEN8_CTX_STATUS_COMPLETE && - buf[2*head + 1] == execlists->preempt_complete_status) { - GEM_TRACE("%s preempt-idle\n", engine->name); - complete_preempt_context(execlists); - continue; - } - - if (status & GEN8_CTX_STATUS_PREEMPTED && - execlists_is_active(execlists, - EXECLISTS_ACTIVE_PREEMPT)) - continue; - - GEM_BUG_ON(!execlists_is_active(execlists, - EXECLISTS_ACTIVE_USER)); - - rq = port_unpack(port, &count); - GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", - engine->name, - port->context_id, count, - rq ? rq->fence.context : 0, - rq ? rq->fence.seqno : 0, - rq ? hwsp_seqno(rq) : 0, - rq ? rq_prio(rq) : 0); - - /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); - - GEM_BUG_ON(count == 0); - if (--count == 0) { - /* - * On the final event corresponding to the - * submission of this context, we expect either - * an element-switch event or a completion - * event (and on completion, the active-idle - * marker). No more preemptions, lite-restore - * or otherwise. - */ - GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); - GEM_BUG_ON(port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); - GEM_BUG_ON(!port_isset(&port[1]) && - !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); + if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) { + GEM_BUG_ON(*execlists->active); +promote: + GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); + execlists->active = + memcpy(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists) * + sizeof(*execlists->pending)); + execlists->pending[0] = NULL; + + if (!inject_preempt_hang(execlists)) + ring_set_paused(engine, 0); + } else if (status & GEN8_CTX_STATUS_PREEMPTED) { + struct i915_request * const *port = execlists->active; + + trace_ports(execlists, "preempted", execlists->active); + + while (*port) + execlists_schedule_out(*port++); + + goto promote; + } else if (*execlists->active) { + struct i915_request *rq = *execlists->active++; + + trace_ports(execlists, "completed", + execlists->active - 1); /* * We rely on the hardware being strongly @@ -1293,21 +1271,10 @@ static void process_csb(struct intel_engine_cs *engine) * user interrupt and CSB is processed. */ GEM_BUG_ON(!i915_request_completed(rq)); + execlists_schedule_out(rq); - execlists_context_schedule_out(rq, - INTEL_CONTEXT_SCHEDULE_OUT); - i915_request_put(rq); - - GEM_TRACE("%s completed ctx=%d\n", - engine->name, port->context_id); - - port = execlists_port_complete(execlists, port); - if (port_isset(port)) - execlists_user_begin(execlists, port); - else - execlists_user_end(execlists); - } else { - port_set(port, port_pack(rq, count)); + GEM_BUG_ON(execlists->active - execlists->inflight > + execlists_num_ports(execlists)); } } while (head != tail); @@ -1332,7 +1299,7 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) lockdep_assert_held(&engine->active.lock); process_csb(engine); - if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) + if (!engine->execlists.pending[0]) execlists_dequeue(engine); } @@ -1345,11 +1312,6 @@ static void execlists_submission_tasklet(unsigned long data) struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; unsigned long flags; - GEM_TRACE("%s awake?=%d, active=%x\n", - engine->name, - !!intel_wakeref_active(&engine->wakeref), - engine->execlists.active); - spin_lock_irqsave(&engine->active.lock, flags); __execlists_submission_tasklet(engine); spin_unlock_irqrestore(&engine->active.lock, flags); @@ -1376,12 +1338,16 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) tasklet_hi_schedule(&execlists->tasklet); } -static void submit_queue(struct intel_engine_cs *engine, int prio) +static void submit_queue(struct intel_engine_cs *engine, + const struct i915_request *rq) { - if (prio > engine->execlists.queue_priority_hint) { - engine->execlists.queue_priority_hint = prio; - __submit_queue_imm(engine); - } + struct intel_engine_execlists *execlists = &engine->execlists; + + if (rq_prio(rq) <= execlists->queue_priority_hint) + return; + + execlists->queue_priority_hint = rq_prio(rq); + __submit_queue_imm(engine); } static void execlists_submit_request(struct i915_request *request) @@ -1397,7 +1363,7 @@ static void execlists_submit_request(struct i915_request *request) GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); GEM_BUG_ON(list_empty(&request->sched.link)); - submit_queue(engine, rq_prio(request)); + submit_queue(engine, request); spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -2048,27 +2014,13 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) spin_unlock_irqrestore(&engine->active.lock, flags); } -static bool lrc_regs_ok(const struct i915_request *rq) -{ - const struct intel_ring *ring = rq->ring; - const u32 *regs = rq->hw_context->lrc_reg_state; - - /* Quick spot check for the common signs of context corruption */ - - if (regs[CTX_RING_BUFFER_CONTROL + 1] != - (RING_CTL_SIZE(ring->size) | RING_VALID)) - return false; - - if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma)) - return false; - - return true; -} - -static void reset_csb_pointers(struct intel_engine_execlists *execlists) +static void reset_csb_pointers(struct intel_engine_cs *engine) { + struct intel_engine_execlists * const execlists = &engine->execlists; const unsigned int reset_value = execlists->csb_size - 1; + ring_set_paused(engine, 0); + /* * After a reset, the HW starts writing into CSB entry [0]. We * therefore have to set our HEAD pointer back one entry so that @@ -2115,18 +2067,21 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) process_csb(engine); /* drain preemption events */ /* Following the reset, we need to reload the CSB read/write pointers */ - reset_csb_pointers(&engine->execlists); + reset_csb_pointers(engine); /* * Save the currently executing context, even if we completed * its request, it was still running at the time of the * reset and will have been clobbered. */ - if (!port_isset(execlists->port)) - goto out_clear; + rq = execlists_active(execlists); + if (!rq) + return; - rq = port_request(execlists->port); ce = rq->hw_context; + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); + rq = active_request(rq); /* * Catch up with any missed context-switch interrupts. @@ -2139,9 +2094,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) */ execlists_cancel_port_requests(execlists); - rq = active_request(rq); - if (!rq) + if (!rq) { + ce->ring->head = ce->ring->tail; goto out_replay; + } + + ce->ring->head = intel_ring_wrap(ce->ring, rq->head); /* * If this request hasn't started yet, e.g. it is waiting on a @@ -2155,7 +2113,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * Otherwise, if we have not started yet, the request should replay * perfectly and we do not need to flag the result as being erroneous. */ - if (!i915_request_started(rq) && lrc_regs_ok(rq)) + if (!i915_request_started(rq)) goto out_replay; /* @@ -2170,7 +2128,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * image back to the expected values to skip over the guilty request. */ i915_reset_request(rq, stalled); - if (!stalled && lrc_regs_ok(rq)) + if (!stalled) goto out_replay; /* @@ -2190,17 +2148,13 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) execlists_init_reg_state(regs, ce, engine, ce->ring); out_replay: - /* Rerun the request; its payload has been neutered (if guilty). */ - ce->ring->head = - rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail; + GEM_TRACE("%s replay {head:%04x, tail:%04x\n", + engine->name, ce->ring->head, ce->ring->tail); intel_ring_update_space(ce->ring); __execlists_update_reg_state(ce, engine); /* Push back any incomplete requests for replay after the reset. */ __unwind_incomplete_requests(engine); - -out_clear: - execlists_clear_all_active(execlists); } static void execlists_reset(struct intel_engine_cs *engine, bool stalled) @@ -2296,7 +2250,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; - GEM_BUG_ON(port_isset(execlists->port)); GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); execlists->tasklet.func = nop_submission_tasklet; @@ -2514,15 +2467,29 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) return cs; } +static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = intel_hws_preempt_address(request->engine); + *cs++ = 0; + + return cs; +} + static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write(cs, request->fence.seqno, request->timeline->hwsp_offset, 0); - *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + cs = emit_preempt_busywait(request, cs); request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2543,9 +2510,10 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL, 0); - *cs++ = MI_USER_INTERRUPT; + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + cs = emit_preempt_busywait(request, cs); request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2594,8 +2562,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_SUPPORTS_STATS; if (!intel_vgpu_active(engine->i915)) engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - if (engine->preempt_context && - HAS_LOGICAL_RING_PREEMPTION(engine->i915)) + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) engine->flags |= I915_ENGINE_HAS_PREEMPTION; } @@ -2718,11 +2685,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) i915_mmio_reg_offset(RING_ELSP(base)); } - execlists->preempt_complete_status = ~0u; - if (engine->preempt_context) - execlists->preempt_complete_status = - upper_32_bits(engine->preempt_context->lrc_desc); - execlists->csb_status = &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; @@ -2734,7 +2696,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) else execlists->csb_size = GEN11_CSB_ENTRIES; - reset_csb_pointers(execlists); + reset_csb_pointers(engine); return 0; } @@ -2917,11 +2879,6 @@ populate_lr_context(struct intel_context *ce, if (!engine->default_state) regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); - if (ce->gem_context == engine->i915->preempt_context && - INTEL_GEN(engine->i915) < 11) - regs[CTX_CONTEXT_CONTROL + 1] |= - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); ret = 0; err_unpin_ctx: diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b7e9fddef270..a497cf7acb6a 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1248,10 +1248,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error, } } -static void record_request(struct i915_request *request, +static void record_request(const struct i915_request *request, struct drm_i915_error_request *erq) { - struct i915_gem_context *ctx = request->gem_context; + const struct i915_gem_context *ctx = request->gem_context; erq->flags = request->fence.flags; erq->context = request->fence.context; @@ -1315,20 +1315,15 @@ static void engine_record_requests(struct intel_engine_cs *engine, ee->num_requests = count; } -static void error_record_engine_execlists(struct intel_engine_cs *engine, +static void error_record_engine_execlists(const struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) { const struct intel_engine_execlists * const execlists = &engine->execlists; - unsigned int n; + struct i915_request * const *port = execlists->active; + unsigned int n = 0; - for (n = 0; n < execlists_num_ports(execlists); n++) { - struct i915_request *rq = port_request(&execlists->port[n]); - - if (!rq) - break; - - record_request(rq, &ee->execlist[n]); - } + while (*port) + record_request(*port++, &ee->execlist[n++]); ee->num_ports = n; } diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 7083e6ab92c5..0c99694faab7 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -276,6 +276,12 @@ static bool i915_request_retire(struct i915_request *rq) local_irq_disable(); + /* + * We only loosely track inflight requests across preemption, + * and so we may find ourselves attempting to retire a _completed_ + * request that we have removed from the HW and put back on a run + * queue. + */ spin_lock(&rq->engine->active.lock); list_del(&rq->sched.link); spin_unlock(&rq->engine->active.lock); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index edbbdfec24ab..bebc1e9b4a5e 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -28,6 +28,7 @@ #include #include +#include "gt/intel_context_types.h" #include "gt/intel_engine_types.h" #include "i915_gem.h" diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 2e9b38bdc33c..b1ba3e65cd52 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -179,8 +179,7 @@ static inline int rq_prio(const struct i915_request *rq) static void kick_submission(struct intel_engine_cs *engine, int prio) { - const struct i915_request *inflight = - port_request(engine->execlists.port); + const struct i915_request *inflight = *engine->execlists.active; /* * If we are already the currently executing context, don't diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 2987219a6300..4920ff9aba62 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -131,6 +131,18 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size) ((typeof(ptr))((unsigned long)(ptr) | __bits)); \ }) +#define ptr_count_dec(p_ptr) do { \ + typeof(p_ptr) __p = (p_ptr); \ + unsigned long __v = (unsigned long)(*__p); \ + *__p = (typeof(*p_ptr))(--__v); \ +} while (0) + +#define ptr_count_inc(p_ptr) do { \ + typeof(p_ptr) __p = (p_ptr); \ + unsigned long __v = (unsigned long)(*__p); \ + *__p = (typeof(*p_ptr))(++__v); \ +} while (0) + #define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT) #define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT) #define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT) diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index db531ebc7704..12c22359fdac 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -32,7 +32,11 @@ #include "intel_guc_submission.h" #include "i915_drv.h" -#define GUC_PREEMPT_FINISHED 0x1 +enum { + GUC_PREEMPT_NONE = 0, + GUC_PREEMPT_INPROGRESS, + GUC_PREEMPT_FINISHED, +}; #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 #define GUC_PREEMPT_BREADCRUMB_BYTES \ (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) @@ -537,15 +541,11 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); - spin_lock(&client->wq_lock); - guc_wq_item_append(client, engine->guc_id, ctx_desc, ring_tail, rq->fence.seqno); guc_ring_doorbell(client); client->submissions[engine->id] += 1; - - spin_unlock(&client->wq_lock); } /* @@ -631,8 +631,9 @@ static void inject_preempt_context(struct work_struct *work) data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) { - execlists_clear_active(&engine->execlists, - EXECLISTS_ACTIVE_PREEMPT); + intel_write_status_page(engine, + I915_GEM_HWS_PREEMPT, + GUC_PREEMPT_NONE); tasklet_schedule(&engine->execlists.tasklet); } @@ -672,8 +673,6 @@ static void complete_preempt_context(struct intel_engine_cs *engine) { struct intel_engine_execlists *execlists = &engine->execlists; - GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); - if (inject_preempt_hang(execlists)) return; @@ -681,89 +680,90 @@ static void complete_preempt_context(struct intel_engine_cs *engine) execlists_unwind_incomplete_requests(execlists); wait_for_guc_preempt_report(engine); - intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0); + intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, GUC_PREEMPT_NONE); } -/** - * guc_submit() - Submit commands through GuC - * @engine: engine associated with the commands - * - * The only error here arises if the doorbell hardware isn't functioning - * as expected, which really shouln't happen. - */ -static void guc_submit(struct intel_engine_cs *engine) +static void guc_submit(struct intel_engine_cs *engine, + struct i915_request **out, + struct i915_request **end) { struct intel_guc *guc = &engine->i915->guc; - struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - unsigned int n; + struct intel_guc_client *client = guc->execbuf_client; - for (n = 0; n < execlists_num_ports(execlists); n++) { - struct i915_request *rq; - unsigned int count; + spin_lock(&client->wq_lock); - rq = port_unpack(&port[n], &count); - if (rq && count == 0) { - port_set(&port[n], port_pack(rq, ++count)); + do { + struct i915_request *rq = *out++; - flush_ggtt_writes(rq->ring->vma); + flush_ggtt_writes(rq->ring->vma); + guc_add_request(guc, rq); + } while (out != end); - guc_add_request(guc, rq); - } - } + spin_unlock(&client->wq_lock); } -static void port_assign(struct execlist_port *port, struct i915_request *rq) +static inline int rq_prio(const struct i915_request *rq) { - GEM_BUG_ON(port_isset(port)); - - port_set(port, i915_request_get(rq)); + return rq->sched.attr.priority | __NO_PREEMPTION; } -static inline int rq_prio(const struct i915_request *rq) +static struct i915_request *schedule_in(struct i915_request *rq, int idx) { - return rq->sched.attr.priority; + trace_i915_request_in(rq, idx); + + if (!rq->hw_context->inflight) + rq->hw_context->inflight = rq->engine; + intel_context_inflight_inc(rq->hw_context); + + return i915_request_get(rq); } -static inline int port_prio(const struct execlist_port *port) +static void schedule_out(struct i915_request *rq) { - return rq_prio(port_request(port)) | __NO_PREEMPTION; + trace_i915_request_out(rq); + + intel_context_inflight_dec(rq->hw_context); + if (!intel_context_inflight_count(rq->hw_context)) + rq->hw_context->inflight = NULL; + + i915_request_put(rq); } -static bool __guc_dequeue(struct intel_engine_cs *engine) +static void __guc_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - struct i915_request *last = NULL; - const struct execlist_port * const last_port = - &execlists->port[execlists->port_mask]; + struct i915_request **first = execlists->inflight; + struct i915_request ** const last_port = first + execlists->port_mask; + struct i915_request *last = first[0]; + struct i915_request **port; bool submit = false; struct rb_node *rb; lockdep_assert_held(&engine->active.lock); - if (port_isset(port)) { + if (last) { if (intel_engine_has_preemption(engine)) { struct guc_preempt_work *preempt_work = &engine->i915->guc.preempt_work[engine->id]; int prio = execlists->queue_priority_hint; - if (i915_scheduler_need_preempt(prio, - port_prio(port))) { - execlists_set_active(execlists, - EXECLISTS_ACTIVE_PREEMPT); + if (i915_scheduler_need_preempt(prio, rq_prio(last))) { + intel_write_status_page(engine, + I915_GEM_HWS_PREEMPT, + GUC_PREEMPT_INPROGRESS); queue_work(engine->i915->guc.preempt_wq, &preempt_work->work); - return false; + return; } } - port++; - if (port_isset(port)) - return false; + if (*++first) + return; + + last = NULL; } - GEM_BUG_ON(port_isset(port)); + port = first; while ((rb = rb_first_cached(&execlists->queue))) { struct i915_priolist *p = to_priolist(rb); struct i915_request *rq, *rn; @@ -774,18 +774,15 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) if (port == last_port) goto done; - if (submit) - port_assign(port, last); + *port = schedule_in(last, + port - execlists->inflight); port++; } list_del_init(&rq->sched.link); - __i915_request_submit(rq); - trace_i915_request_in(rq, port_index(port, execlists)); - - last = rq; submit = true; + last = rq; } rb_erase_cached(&p->node, &execlists->queue); @@ -794,58 +791,41 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) done: execlists->queue_priority_hint = rb ? to_priolist(rb)->priority : INT_MIN; - if (submit) - port_assign(port, last); - if (last) - execlists_user_begin(execlists, execlists->port); - - /* We must always keep the beast fed if we have work piled up */ - GEM_BUG_ON(port_isset(execlists->port) && - !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); - GEM_BUG_ON(rb_first_cached(&execlists->queue) && - !port_isset(execlists->port)); - - return submit; -} - -static void guc_dequeue(struct intel_engine_cs *engine) -{ - if (__guc_dequeue(engine)) - guc_submit(engine); + if (submit) { + *port = schedule_in(last, port - execlists->inflight); + *++port = NULL; + guc_submit(engine, first, port); + } + execlists->active = execlists->inflight; } static void guc_submission_tasklet(unsigned long data) { struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; struct intel_engine_execlists * const execlists = &engine->execlists; - struct execlist_port *port = execlists->port; - struct i915_request *rq; + struct i915_request **port, *rq; unsigned long flags; spin_lock_irqsave(&engine->active.lock, flags); - rq = port_request(port); - while (rq && i915_request_completed(rq)) { - trace_i915_request_out(rq); - i915_request_put(rq); + for (port = execlists->inflight; (rq = *port); port++) { + if (!i915_request_completed(rq)) + break; - port = execlists_port_complete(execlists, port); - if (port_isset(port)) { - execlists_user_begin(execlists, port); - rq = port_request(port); - } else { - execlists_user_end(execlists); - rq = NULL; - } + schedule_out(rq); + } + if (port != execlists->inflight) { + int idx = port - execlists->inflight; + int rem = ARRAY_SIZE(execlists->inflight) - idx; + memmove(execlists->inflight, port, rem * sizeof(*port)); } - if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) && - intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) == + if (intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) == GUC_PREEMPT_FINISHED) complete_preempt_context(engine); - if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) - guc_dequeue(engine); + if (!intel_read_status_page(engine, I915_GEM_HWS_PREEMPT)) + __guc_dequeue(engine); spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -959,7 +939,6 @@ static void guc_cancel_requests(struct intel_engine_cs *engine) execlists->queue_priority_hint = INT_MIN; execlists->queue = RB_ROOT_CACHED; - GEM_BUG_ON(port_isset(execlists->port)); spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -1422,7 +1401,7 @@ int intel_guc_submission_enable(struct intel_guc *guc) * and it is guaranteed that it will remove the work item from the * queue before our request is completed. */ - BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) * + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * sizeof(struct guc_wq_item) * I915_NUM_ENGINES > GUC_WQ_SIZE); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 298bb7116c51..1a5b9e284ca9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -366,13 +366,15 @@ static int __igt_breadcrumbs_smoketest(void *arg) if (!wait_event_timeout(wait->wait, i915_sw_fence_done(wait), - HZ / 2)) { + 5 * HZ)) { struct i915_request *rq = requests[count - 1]; - pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n", - count, + pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n", + atomic_read(&wait->pending), count, rq->fence.context, rq->fence.seqno, t->engine->name); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(t->engine->i915); GEM_BUG_ON(!i915_request_completed(rq)); i915_sw_fence_wait(wait); -- cgit v1.2.3 From 8ee36e048c98d4015804a23f884be2576f778a93 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 20 Jun 2019 15:20:52 +0100 Subject: drm/i915/execlists: Minimalistic timeslicing If we have multiple contexts of equal priority pending execution, activate a timer to demote the currently executing context in favour of the next in the queue when that timeslice expires. This enforces fairness between contexts (so long as they allow preemption -- forced preemption, in the future, will kick those who do not obey) and allows us to avoid userspace blocking forward progress with e.g. unbounded MI_SEMAPHORE_WAIT. For the starting point here, we use the jiffie as our timeslice so that we should be reasonably efficient wrt frequent CPU wakeups. Testcase: igt/gem_exec_scheduler/semaphore-resolve Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190620142052.19311-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 6 + drivers/gpu/drm/i915/gt/intel_lrc.c | 116 ++++++++++++++ drivers/gpu/drm/i915/gt/selftest_lrc.c | 223 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_scheduler.c | 1 + drivers/gpu/drm/i915/i915_scheduler_types.h | 1 + 5 files changed, 347 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index b4f7b81a3c3e..9940bad37812 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "i915_gem.h" @@ -149,6 +150,11 @@ struct intel_engine_execlists { */ struct tasklet_struct tasklet; + /** + * @timer: kick the current context if its timeslice expires + */ + struct timer_list timer; + /** * @default_priolist: priority list for I915_PRIORITY_NORMAL */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index cb9d285bd00a..f8ad49006917 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -266,6 +266,7 @@ static int effective_prio(const struct i915_request *rq) prio |= I915_PRIORITY_NOSEMAPHORE; /* Restrict mere WAIT boosts from triggering preemption */ + BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */ return prio | __NO_PREEMPTION; } @@ -830,6 +831,81 @@ last_active(const struct intel_engine_execlists *execlists) return *last; } +static void +defer_request(struct i915_request * const rq, struct list_head * const pl) +{ + struct i915_dependency *p; + + /* + * We want to move the interrupted request to the back of + * the round-robin list (i.e. its priority level), but + * in doing so, we must then move all requests that were in + * flight and were waiting for the interrupted request to + * be run after it again. + */ + list_move_tail(&rq->sched.link, pl); + + list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + struct i915_request *w = + container_of(p->waiter, typeof(*w), sched); + + /* Leave semaphores spinning on the other engines */ + if (w->engine != rq->engine) + continue; + + /* No waiter should start before the active request completed */ + GEM_BUG_ON(i915_request_started(w)); + + GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); + if (rq_prio(w) < rq_prio(rq)) + continue; + + if (list_empty(&w->sched.link)) + continue; /* Not yet submitted; unready */ + + /* + * This should be very shallow as it is limited by the + * number of requests that can fit in a ring (<64) and + * the number of contexts that can be in flight on this + * engine. + */ + defer_request(w, pl); + } +} + +static void defer_active(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = __unwind_incomplete_requests(engine); + if (!rq) + return; + + defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); +} + +static bool +need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) +{ + int hint; + + if (list_is_last(&rq->sched.link, &engine->active.requests)) + return false; + + hint = max(rq_prio(list_next_entry(rq, sched.link)), + engine->execlists.queue_priority_hint); + + return hint >= rq_prio(rq); +} + +static bool +enable_timeslice(struct intel_engine_cs *engine) +{ + struct i915_request *last = last_active(&engine->execlists); + + return last && need_timeslice(engine, last); +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -923,6 +999,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; last = NULL; + } else if (need_timeslice(engine, last) && + !timer_pending(&engine->execlists.timer)) { + GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n", + engine->name, + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); + + ring_set_paused(engine, 1); + defer_active(engine); + + /* + * Unlike for preemption, if we rewind and continue + * executing the same context as previously active, + * the order of execution will remain the same and + * the tail will only advance. We do not need to + * force a full context restore, as a lite-restore + * is sufficient to resample the monotonic TAIL. + * + * If we switch to any other context, similarly we + * will not rewind TAIL of current context, and + * normal save/restore will preserve state and allow + * us to later continue executing the same request. + */ + last = NULL; } else { /* * Otherwise if we already have a request pending @@ -1247,6 +1349,9 @@ promote: sizeof(*execlists->pending)); execlists->pending[0] = NULL; + if (enable_timeslice(engine)) + mod_timer(&execlists->timer, jiffies + 1); + if (!inject_preempt_hang(execlists)) ring_set_paused(engine, 0); } else if (status & GEN8_CTX_STATUS_PREEMPTED) { @@ -1317,6 +1422,15 @@ static void execlists_submission_tasklet(unsigned long data) spin_unlock_irqrestore(&engine->active.lock, flags); } +static void execlists_submission_timer(struct timer_list *timer) +{ + struct intel_engine_cs *engine = + from_timer(engine, timer, execlists.timer); + + /* Kick the tasklet for some interrupt coalescing and reset handling */ + tasklet_hi_schedule(&engine->execlists.tasklet); +} + static void queue_request(struct intel_engine_cs *engine, struct i915_sched_node *node, int prio) @@ -2542,6 +2656,7 @@ static int gen8_init_rcs_context(struct i915_request *rq) static void execlists_park(struct intel_engine_cs *engine) { + del_timer_sync(&engine->execlists.timer); intel_engine_park(engine); } @@ -2639,6 +2754,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) tasklet_init(&engine->execlists.tasklet, execlists_submission_tasklet, (unsigned long)engine); + timer_setup(&engine->execlists.timer, execlists_submission_timer, 0); logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 401e8b539297..0c97f953e908 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -79,6 +79,225 @@ err_unlock: return err; } +static int +emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 10); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_NEQ_SDD; + *cs++ = 0; + *cs++ = i915_ggtt_offset(vma) + 4 * idx; + *cs++ = 0; + + if (idx > 0) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + } else { + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + intel_ring_advance(rq, cs); + return 0; +} + +static struct i915_request * +semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) +{ + struct i915_gem_context *ctx; + struct i915_request *rq; + int err; + + ctx = kernel_context(engine->i915); + if (!ctx) + return ERR_PTR(-ENOMEM); + + rq = igt_request_alloc(ctx, engine); + if (IS_ERR(rq)) + goto out_ctx; + + err = emit_semaphore_chain(rq, vma, idx); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_ctx: + kernel_context_close(ctx); + return rq; +} + +static int +release_queue(struct intel_engine_cs *engine, + struct i915_vma *vma, + int idx) +{ + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct i915_request *rq; + u32 *cs; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + + intel_ring_advance(rq, cs); + i915_request_add(rq); + + engine->schedule(rq, &attr); + + return 0; +} + +static int +slice_semaphore_queue(struct intel_engine_cs *outer, + struct i915_vma *vma, + int count) +{ + struct intel_engine_cs *engine; + struct i915_request *head; + enum intel_engine_id id; + int err, i, n = 0; + + head = semaphore_queue(outer, vma, n++); + if (IS_ERR(head)) + return PTR_ERR(head); + + i915_request_get(head); + for_each_engine(engine, outer->i915, id) { + for (i = 0; i < count; i++) { + struct i915_request *rq; + + rq = semaphore_queue(engine, vma, n++); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + } + } + + err = release_queue(outer, vma, n); + if (err) + goto out; + + if (i915_request_wait(head, + I915_WAIT_LOCKED, + 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) { + pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", + count, n); + GEM_TRACE_DUMP(); + i915_gem_set_wedged(outer->i915); + err = -EIO; + } + +out: + i915_request_put(head); + return err; +} + +static int live_timeslice_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + struct i915_vma *vma; + void *vaddr; + int err = 0; + int count; + + /* + * If a request takes too long, we would like to give other users + * a fair go on the GPU. In particular, users may create batches + * that wait upon external input, where that input may even be + * supplied by another GPU job. To avoid blocking forever, we + * need to preempt the current task and replace it with another + * ready task. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_unlock; + } + + vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + for_each_prime_number_from(count, 1, 16) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, i915, id) { + memset(vaddr, 0, PAGE_SIZE); + + err = slice_semaphore_queue(engine, vma, count); + if (err) + goto err_pin; + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + err = -EIO; + goto err_pin; + } + } + } + +err_pin: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); +err_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + static int live_busywait_preempt(void *arg) { struct drm_i915_private *i915 = arg; @@ -398,6 +617,9 @@ static int live_late_preempt(void *arg) if (!ctx_lo) goto err_ctx_hi; + /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ + ctx_lo->sched.priority = I915_USER_PRIORITY(1); + for_each_engine(engine, i915, id) { struct igt_live_test t; struct i915_request *rq; @@ -1812,6 +2034,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_sanitycheck), + SUBTEST(live_timeslice_preempt), SUBTEST(live_busywait_preempt), SUBTEST(live_preempt), SUBTEST(live_late_preempt), diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index b1ba3e65cd52..0bd452e851d8 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -394,6 +394,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, list_add(&dep->wait_link, &signal->waiters_list); list_add(&dep->signal_link, &node->signalers_list); dep->signaler = signal; + dep->waiter = node; dep->flags = flags; /* Keep track of whether anyone on this chain has a semaphore */ diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h index 3e309631bd0b..aad81acba9dc 100644 --- a/drivers/gpu/drm/i915/i915_scheduler_types.h +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -62,6 +62,7 @@ struct i915_sched_node { struct i915_dependency { struct i915_sched_node *signaler; + struct i915_sched_node *waiter; struct list_head signal_link; struct list_head wait_link; struct list_head dfs_link; -- cgit v1.2.3 From cffa1eb8a22392a02dbc6a372094262f888fd3ed Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 19 Jun 2019 21:35:04 +0100 Subject: drm/i915: Rings are always flushed Our intel_rings are always flushed as they are continually used to submit commands to the GPU, and so do not need to be flushed on unpinning. This avoids pulling in the flush_ggtt_writes locking into our context unpin, which we want to allow from atomic context (for simplicity). Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190619203504.4220-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 12010e798868..8b1da57c3764 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1219,6 +1219,7 @@ void intel_ring_unpin(struct intel_ring *ring) intel_ring_reset(ring, ring->tail); GEM_BUG_ON(!ring->vma); + i915_vma_unset_ggtt_write(ring->vma); if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); else -- cgit v1.2.3 From 60a0933bff57af52f99a17cbe9f0bc5d27771236 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 20 Jun 2019 11:24:32 +0100 Subject: drm/i915/selftests: Use request managed wakerefs Since commit 79ffac8599c4 ("drm/i915: Invert the GEM wakeref hierarchy"), the request creation itself took responsibility for managing the engine/GT wakerefs and so we can remove the redundant grabs in our selftests. References: 79ffac8599c4 ("drm/i915: Invert the GEM wakeref hierarchy") Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620102432.31580-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 35 +++++----------------- 1 file changed, 7 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index eaa2b16574c7..f2d4e773404a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -31,7 +31,6 @@ static int live_nop_switch(void *arg) struct intel_engine_cs *engine; struct i915_gem_context **ctx; enum intel_engine_id id; - intel_wakeref_t wakeref; struct igt_live_test t; struct drm_file *file; unsigned long n; @@ -53,7 +52,6 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -152,7 +150,6 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -507,7 +504,6 @@ static int igt_ctx_exec(void *arg) dw = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; - intel_wakeref_t wakeref; ctx = live_context(i915, file); if (IS_ERR(ctx)) { @@ -523,8 +519,7 @@ static int igt_ctx_exec(void *arg) } } - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -623,7 +618,6 @@ static int igt_shared_ctx_exec(void *arg) ncontexts = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; - intel_wakeref_t wakeref; ctx = kernel_context(i915); if (IS_ERR(ctx)) { @@ -642,9 +636,7 @@ static int igt_shared_ctx_exec(void *arg) } } - err = 0; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -1030,7 +1022,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915, struct i915_gem_context *ctx; struct intel_context *ce; struct intel_sseu pg_sseu; - intel_wakeref_t wakeref; struct drm_file *file; int ret; @@ -1078,12 +1069,10 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_unlock; } - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - ce = i915_gem_context_get_engine(ctx, RCS0); if (IS_ERR(ce)) { ret = PTR_ERR(ce); - goto out_rpm; + goto out_put; } ret = intel_context_pin(ce); @@ -1117,8 +1106,7 @@ out_fail: intel_context_unpin(ce); out_context: intel_context_put(ce); -out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); +out_put: i915_gem_object_put(obj); out_unlock: @@ -1207,8 +1195,6 @@ static int igt_ctx_readonly(void *arg) unsigned int id; for_each_engine(engine, i915, id) { - intel_wakeref_t wakeref; - if (!intel_engine_can_store_dword(engine)) continue; @@ -1223,9 +1209,7 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - err = 0; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(obj, ctx, engine, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -1488,7 +1472,6 @@ static int igt_vm_isolation(void *arg) struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_a, *ctx_b; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); @@ -1535,8 +1518,6 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->vm->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - count = 0; for_each_engine(engine, i915, id) { IGT_TIMEOUT(end_time); @@ -1560,7 +1541,7 @@ static int igt_vm_isolation(void *arg) err = read_from_scratch(ctx_b, engine, offset, &value); if (err) - goto out_rpm; + goto out_unlock; if (value) { pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", @@ -1569,7 +1550,7 @@ static int igt_vm_isolation(void *arg) lower_32_bits(offset), this); err = -EINVAL; - goto out_rpm; + goto out_unlock; } this++; @@ -1579,8 +1560,6 @@ static int igt_vm_isolation(void *arg) pr_info("Checked %lu scratch offsets across %d engines\n", count, RUNTIME_INFO(i915)->num_engines); -out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); out_unlock: if (igt_live_test_end(&t)) err = -EIO; -- cgit v1.2.3 From 6a7bafe8fdb6019101ec90a5f5ddeea6f58d1158 Mon Sep 17 00:00:00 2001 From: Vandita Kulkarni Date: Wed, 19 Jun 2019 16:31:33 -0700 Subject: drm/i915/ehl/dsi: Set lane latency optimization for DW1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL has 2 additional steps in the DSI sequence, this is one of then the lane latency optimization for DW1. BSpec: 20597 Cc: Uma Shankar Cc: Rodrigo Vivi Cc: Jani Nikula Reviewed-by: Matt Roper Signed-off-by: Vandita Kulkarni Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190619233134.20009-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 13 +++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 2 ++ 2 files changed, 15 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 74448e6bf749..8b4d589be4b4 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -403,6 +403,19 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + + /* For EHL set latency optimization for PCS_DW1 lanes */ + if (IS_ELKHARTLAKE(dev_priv)) { + tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port)); + tmp &= ~LATENCY_OPTIM_MASK; + tmp |= LATENCY_OPTIM_VAL(0); + I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp); + + tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + tmp &= ~LATENCY_OPTIM_MASK; + tmp |= LATENCY_OPTIM_VAL(0x1); + I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp); + } } } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 02c7f8c6c20b..969c3b23d519 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1896,6 +1896,8 @@ enum i915_power_well_id { #define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port)) #define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port)) #define COMMON_KEEPER_EN (1 << 26) +#define LATENCY_OPTIM_MASK (0x3 << 2) +#define LATENCY_OPTIM_VAL(x) ((x) << 2) /* CNL/ICL Port TX registers */ #define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 -- cgit v1.2.3 From 683d672c425aa29c0e74583ed28a0e011cc0bb43 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 19 Jun 2019 16:31:34 -0700 Subject: drm/i915/ehl/dsi: Enable AFE over PPI strap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The other additional step in the DSI sequence for EHL. v2: - Using REG_BIT()(Matt) - Fixed commit message typo(Vandita) BSpec: 20597 Cc: Uma Shankar Cc: Jani Nikula Reviewed-by: Vandita Kulkarni Reviewed-by: Matt Roper Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190619233134.20009-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 8 ++++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 2 files changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 8b4d589be4b4..b8673debf932 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -544,6 +544,14 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp); } } + + if (IS_ELKHARTLAKE(dev_priv)) { + for_each_dsi_port(port, intel_dsi->ports) { + tmp = I915_READ(ICL_DPHY_CHKN(port)); + tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; + I915_WRITE(ICL_DPHY_CHKN(port), tmp); + } + } } static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 969c3b23d519..6ccc713d85b3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1993,6 +1993,10 @@ enum i915_power_well_id { #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) +#define _ICL_DPHY_CHKN_REG 0x194 +#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) +#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) + #define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \ _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) -- cgit v1.2.3 From b32fa811156328aea5a3c2ff05cc096490382456 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 20 Jun 2019 19:37:05 +0100 Subject: drm/i915/gtt: Defer address space cleanup to an RCU worker Enable RCU protection of i915_address_space and its ppgtt superclasses, and defer its cleanup into a worker executed after an RCU grace period. In the future we will be able to use the RCU protection to reduce the locking around VM lookups, but the immediate benefit is being able to defer the release into a kworker (process context). This is required as we may need to sleep to reap the WC pages stashed away inside the ppgtt. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110934 Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190620183705.31006-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/i915_gem_gtt.c | 174 ++++++++++---------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 7 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 - 4 files changed, 71 insertions(+), 113 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index e6ba66f787f9..cb74242f9c3b 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -6,6 +6,7 @@ header_test := \ i915_active_types.h \ i915_debugfs.h \ i915_drv.h \ + i915_gem_gtt.h \ i915_irq.h \ i915_params.h \ i915_priolist_types.h \ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8ab820145ea6..5b858f675f29 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -482,9 +482,69 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) spin_unlock(&vm->free_pages.lock); } +static void i915_address_space_fini(struct i915_address_space *vm) +{ + spin_lock(&vm->free_pages.lock); + if (pagevec_count(&vm->free_pages.pvec)) + vm_free_pages_release(vm, true); + GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); + spin_unlock(&vm->free_pages.lock); + + drm_mm_takedown(&vm->mm); + + mutex_destroy(&vm->mutex); +} + +static void ppgtt_destroy_vma(struct i915_address_space *vm) +{ + struct list_head *phases[] = { + &vm->bound_list, + &vm->unbound_list, + NULL, + }, **phase; + + mutex_lock(&vm->i915->drm.struct_mutex); + for (phase = phases; *phase; phase++) { + struct i915_vma *vma, *vn; + + list_for_each_entry_safe(vma, vn, *phase, vm_link) + i915_vma_destroy(vma); + } + mutex_unlock(&vm->i915->drm.struct_mutex); +} + +static void __i915_vm_release(struct work_struct *work) +{ + struct i915_address_space *vm = + container_of(work, struct i915_address_space, rcu.work); + + ppgtt_destroy_vma(vm); + + GEM_BUG_ON(!list_empty(&vm->bound_list)); + GEM_BUG_ON(!list_empty(&vm->unbound_list)); + + vm->cleanup(vm); + i915_address_space_fini(vm); + + kfree(vm); +} + +void i915_vm_release(struct kref *kref) +{ + struct i915_address_space *vm = + container_of(kref, struct i915_address_space, ref); + + GEM_BUG_ON(i915_is_ggtt(vm)); + trace_i915_ppgtt_release(vm); + + vm->closed = true; + queue_rcu_work(vm->i915->wq, &vm->rcu); +} + static void i915_address_space_init(struct i915_address_space *vm, int subclass) { kref_init(&vm->ref); + INIT_RCU_WORK(&vm->rcu, __i915_vm_release); /* * The vm->mutex must be reclaim safe (for use in the shrinker). @@ -505,19 +565,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass) INIT_LIST_HEAD(&vm->bound_list); } -static void i915_address_space_fini(struct i915_address_space *vm) -{ - spin_lock(&vm->free_pages.lock); - if (pagevec_count(&vm->free_pages.pvec)) - vm_free_pages_release(vm, true); - GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec)); - spin_unlock(&vm->free_pages.lock); - - drm_mm_takedown(&vm->mm); - - mutex_destroy(&vm->mutex); -} - static int __setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p, gfp_t gfp) @@ -1909,62 +1956,15 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) free_pt(&ppgtt->base.vm, pt); } -struct gen6_ppgtt_cleanup_work { - struct work_struct base; - struct i915_vma *vma; -}; - -static void gen6_ppgtt_cleanup_work(struct work_struct *wrk) -{ - struct gen6_ppgtt_cleanup_work *work = - container_of(wrk, typeof(*work), base); - /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */ - struct drm_i915_private *i915 = work->vma->vm->i915; - - mutex_lock(&i915->drm.struct_mutex); - i915_vma_destroy(work->vma); - mutex_unlock(&i915->drm.struct_mutex); - - kfree(work); -} - -static int nop_set_pages(struct i915_vma *vma) -{ - return -ENODEV; -} - -static void nop_clear_pages(struct i915_vma *vma) -{ -} - -static int nop_bind(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 unused) -{ - return -ENODEV; -} - -static void nop_unbind(struct i915_vma *vma) -{ -} - -static const struct i915_vma_ops nop_vma_ops = { - .set_pages = nop_set_pages, - .clear_pages = nop_clear_pages, - .bind_vma = nop_bind, - .unbind_vma = nop_unbind, -}; - static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - struct gen6_ppgtt_cleanup_work *work = ppgtt->work; + struct drm_i915_private *i915 = vm->i915; /* FIXME remove the struct_mutex to bring the locking under control */ - INIT_WORK(&work->base, gen6_ppgtt_cleanup_work); - work->vma = ppgtt->vma; - work->vma->ops = &nop_vma_ops; - schedule_work(&work->base); + mutex_lock(&i915->drm.struct_mutex); + i915_vma_destroy(ppgtt->vma); + mutex_unlock(&i915->drm.struct_mutex); gen6_ppgtt_free_pd(ppgtt); gen6_ppgtt_free_scratch(vm); @@ -2146,16 +2146,10 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL); - if (!ppgtt->work) { - err = -ENOMEM; - goto err_free; - } - ppgtt->base.pd = __alloc_pd(); if (!ppgtt->base.pd) { err = -ENOMEM; - goto err_work; + goto err_free; } err = gen6_ppgtt_init_scratch(ppgtt); @@ -2174,8 +2168,6 @@ err_scratch: gen6_ppgtt_free_scratch(&ppgtt->base.vm); err_pd: kfree(ppgtt->base.pd); -err_work: - kfree(ppgtt->work); err_free: kfree(ppgtt); return ERR_PTR(err); @@ -2250,42 +2242,6 @@ i915_ppgtt_create(struct drm_i915_private *i915) return ppgtt; } -static void ppgtt_destroy_vma(struct i915_address_space *vm) -{ - struct list_head *phases[] = { - &vm->bound_list, - &vm->unbound_list, - NULL, - }, **phase; - - vm->closed = true; - for (phase = phases; *phase; phase++) { - struct i915_vma *vma, *vn; - - list_for_each_entry_safe(vma, vn, *phase, vm_link) - i915_vma_destroy(vma); - } -} - -void i915_vm_release(struct kref *kref) -{ - struct i915_address_space *vm = - container_of(kref, struct i915_address_space, ref); - - GEM_BUG_ON(i915_is_ggtt(vm)); - trace_i915_ppgtt_release(vm); - - ppgtt_destroy_vma(vm); - - GEM_BUG_ON(!list_empty(&vm->bound_list)); - GEM_BUG_ON(!list_empty(&vm->unbound_list)); - - vm->cleanup(vm); - i915_address_space_fini(vm); - - kfree(vm); -} - /* Certain Gen5 chipsets require require idling the GPU before * unmapping anything from the GTT when VT-d is enabled. */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 812717ccc69b..4d6f96763a98 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -35,8 +35,12 @@ #define __I915_GEM_GTT_H__ #include +#include #include #include +#include + +#include #include "gt/intel_reset.h" #include "i915_gem_fence_reg.h" @@ -280,6 +284,7 @@ struct pagestash { struct i915_address_space { struct kref ref; + struct rcu_work rcu; struct drm_mm mm; struct drm_i915_private *i915; @@ -425,8 +430,6 @@ struct gen6_ppgtt { unsigned int pin_count; bool scan_for_unused_pt; - - struct gen6_ppgtt_cleanup_work *work; }; #define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 1a60b9fe8221..0c47276ed5df 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -208,9 +208,7 @@ static int igt_ppgtt_alloc(void *arg) } err_ppgtt_cleanup: - mutex_lock(&dev_priv->drm.struct_mutex); i915_vm_put(&ppgtt->vm); - mutex_unlock(&dev_priv->drm.struct_mutex); return err; } -- cgit v1.2.3 From 12fdaf19e05b69992502eb9c97fc3c180f797564 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 09:07:29 +0100 Subject: drm/i915/execlists: Keep virtual context alive until after we kick The call to kick_siblings() dereferences the rq->context, so we should not drop our local reference until afterwards! v2: Stick to setting ce.inflight=NULL before kicking as this is what the other threads will check to see if the context is ready for takeover. Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190621080729.2652-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index f8ad49006917..faa9a2e4e40b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -557,9 +557,6 @@ execlists_schedule_out(struct i915_request *rq) intel_engine_context_out(ce->inflight); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); - ce->inflight = NULL; - intel_context_put(ce); - /* * If this is part of a virtual engine, its next request may * have been blocked waiting for access to the active context. @@ -569,8 +566,11 @@ execlists_schedule_out(struct i915_request *rq) * request before the tasklet runs and do not need to rebuild * each virtual tree and kick everyone again. */ + ce->inflight = NULL; if (rq->engine != ce->engine) kick_siblings(rq, ce); + + intel_context_put(ce); } i915_request_put(rq); -- cgit v1.2.3 From 3cb4ce0024f1e24f7da96ed1d02430c407de13fa Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:39 +0100 Subject: drm/i915: Convert intel_vgt_(de)balloon to uncore More removal of implicit dev_priv from using old mmio accessors. Furthermore these calls really operate on ggtt so it logically makes sense if they take it as parameter. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- drivers/gpu/drm/i915/i915_vgpu.c | 24 ++++++++++++++---------- drivers/gpu/drm/i915/i915_vgpu.h | 4 ++-- 3 files changed, 18 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5b858f675f29..685505f45991 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2811,7 +2811,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, intel_wopcm_guc_size(&dev_priv->wopcm)); - ret = intel_vgt_balloon(dev_priv); + ret = intel_vgt_balloon(ggtt); if (ret) return ret; @@ -2882,7 +2882,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt_release_guc_top(ggtt); if (drm_mm_initialized(&ggtt->vm.mm)) { - intel_vgt_deballoon(dev_priv); + intel_vgt_deballoon(ggtt); i915_address_space_fini(&ggtt->vm); } diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 1a8f7c731126..9916bc6159b6 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -136,17 +136,17 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt, * This function is called to deallocate the ballooned-out graphic memory, when * driver is unloaded or when ballooning fails. */ -void intel_vgt_deballoon(struct drm_i915_private *dev_priv) +void intel_vgt_deballoon(struct i915_ggtt *ggtt) { int i; - if (!intel_vgpu_active(dev_priv)) + if (!intel_vgpu_active(ggtt->vm.i915)) return; DRM_DEBUG("VGT deballoon.\n"); for (i = 0; i < 4; i++) - vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]); + vgt_deballoon_space(ggtt, &bl_info.space[i]); } static int vgt_balloon_space(struct i915_ggtt *ggtt, @@ -214,22 +214,26 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, * Returns: * zero on success, non-zero if configuration invalid or ballooning failed */ -int intel_vgt_balloon(struct drm_i915_private *dev_priv) +int intel_vgt_balloon(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct intel_uncore *uncore = &ggtt->vm.i915->uncore; unsigned long ggtt_end = ggtt->vm.total; unsigned long mappable_base, mappable_size, mappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end; int ret; - if (!intel_vgpu_active(dev_priv)) + if (!intel_vgpu_active(ggtt->vm.i915)) return 0; - mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); - mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); - unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); - unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size)); + mappable_base = + intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base)); + mappable_size = + intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size)); + unmappable_base = + intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base)); + unmappable_size = + intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size)); mappable_end = mappable_base + mappable_size; unmappable_end = unmappable_base + unmappable_size; diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index a919735fb6ce..8dd7497eda15 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -42,7 +42,7 @@ intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv) return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT; } -int intel_vgt_balloon(struct drm_i915_private *dev_priv); -void intel_vgt_deballoon(struct drm_i915_private *dev_priv); +int intel_vgt_balloon(struct i915_ggtt *ggtt); +void intel_vgt_deballoon(struct i915_ggtt *ggtt); #endif /* _I915_VGPU_H_ */ -- cgit v1.2.3 From e5be5c7a8ffa8471fb31554cf56f3efd11f7e454 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:40 +0100 Subject: drm/i915: Introduce struct intel_gt as replacement for anonymous i915->gt We have long been slighlty annoyed by the anonymous i915->gt. Promote it to a separate structure and give it its own header. This is a first step towards cleaning up the separation between i915 and gt. v2: * Adjust SPDX header. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gt_types.h | 52 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 34 ++------------------- 2 files changed, 54 insertions(+), 32 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_gt_types.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h new file mode 100644 index 000000000000..72e94aba8b2a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_GT_TYPES__ +#define __INTEL_GT_TYPES__ + +#include +#include +#include +#include +#include +#include + +#include "i915_vma.h" +#include "intel_wakeref.h" + +struct intel_gt { + struct i915_gt_timelines { + struct mutex mutex; /* protects list, tainted by GPU */ + struct list_head active_list; + + /* Pack multiple timelines' seqnos into the same page */ + spinlock_t hwsp_lock; + struct list_head hwsp_free_list; + } timelines; + + struct list_head active_rings; + + struct intel_wakeref wakeref; + + struct list_head closed_vma; + spinlock_t closed_lock; /* guards the list of closed_vma */ + + /** + * Is the GPU currently considered idle, or busy executing + * userspace requests? Whilst idle, we allow runtime power + * management to power down the hardware and display clocks. + * In order to reduce the effect on performance, there + * is a slight delay before we do so. + */ + intel_wakeref_t awake; + + struct blocking_notifier_head pm_notifications; + + ktime_t last_init_time; + + struct i915_vma *scratch; +}; + +#endif /* __INTEL_GT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2734f62ab19a..3509eb88b452 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -72,6 +72,7 @@ #include "gt/intel_lrc.h" #include "gt/intel_engine.h" +#include "gt/intel_gt_types.h" #include "gt/intel_workarounds.h" #include "intel_device_info.h" @@ -1824,38 +1825,7 @@ struct drm_i915_private { } perf; /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ - struct { - struct i915_gt_timelines { - struct mutex mutex; /* protects list, tainted by GPU */ - struct list_head active_list; - - /* Pack multiple timelines' seqnos into the same page */ - spinlock_t hwsp_lock; - struct list_head hwsp_free_list; - } timelines; - - struct list_head active_rings; - - struct intel_wakeref wakeref; - - struct list_head closed_vma; - spinlock_t closed_lock; /* guards the list of closed_vma */ - - /** - * Is the GPU currently considered idle, or busy executing - * userspace requests? Whilst idle, we allow runtime power - * management to power down the hardware and display clocks. - * In order to reduce the effect on performance, there - * is a slight delay before we do so. - */ - intel_wakeref_t awake; - - struct blocking_notifier_head pm_notifications; - - ktime_t last_init_time; - - struct i915_vma *scratch; - } gt; + struct intel_gt gt; struct { struct notifier_block pm_notifier; -- cgit v1.2.3 From 24635c5152af5a6b79eec17d721685277f8d4cfc Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:41 +0100 Subject: drm/i915: Move intel_gt initialization to a separate file As it will grow in a following patch make a new home for it. v2: * Convert mock_gem_device as well. (Chris) v3: * Rename to intel_gt_init_early and move call site to i915_drv.c. (Chris) v4: * Adjust SPDX tags. * No need to gt/ path when including intel_gt_types.h. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gt/intel_gt.c | 14 ++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt.h | 13 +++++++++++++ drivers/gpu/drm/i915/i915_drv.c | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 4 ---- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 6 ++---- 6 files changed, 33 insertions(+), 8 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_gt.c create mode 100644 drivers/gpu/drm/i915/gt/intel_gt.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 91355c2ea8a5..19f8b6745772 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -74,6 +74,7 @@ gt-y += \ gt/intel_context.o \ gt/intel_engine_cs.o \ gt/intel_engine_pm.o \ + gt/intel_gt.o \ gt/intel_gt_pm.o \ gt/intel_hangcheck.o \ gt/intel_lrc.o \ diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c new file mode 100644 index 000000000000..10adee1120e6 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "intel_gt.h" + +void intel_gt_init_early(struct intel_gt *gt) +{ + INIT_LIST_HEAD(>->active_rings); + INIT_LIST_HEAD(>->closed_vma); + + spin_lock_init(>->closed_lock); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h new file mode 100644 index 000000000000..63284ca81d69 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_GT__ +#define __INTEL_GT__ + +#include "intel_gt_types.h" + +void intel_gt_init_early(struct intel_gt *gt); + +#endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8f84ed26e972..961f76d3efa4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -61,6 +61,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_reset.h" #include "gt/intel_workarounds.h" @@ -922,6 +923,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) if (ret < 0) goto err_engines; + intel_gt_init_early(&dev_priv->gt); + ret = i915_gem_init_early(dev_priv); if (ret < 0) goto err_workqueues; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 190ad54fb072..fc8fd41dd32e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1748,10 +1748,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) intel_gt_pm_init(dev_priv); - INIT_LIST_HEAD(&dev_priv->gt.active_rings); - INIT_LIST_HEAD(&dev_priv->gt.closed_vma); - spin_lock_init(&dev_priv->gt.closed_lock); - i915_gem_init__mm(dev_priv); i915_gem_init__pm(dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 64bc51400ae7..8dc9296892e6 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -25,6 +25,7 @@ #include #include +#include "gt/intel_gt.h" #include "gt/mock_engine.h" #include "mock_request.h" @@ -179,6 +180,7 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore); i915_gem_init__mm(i915); + intel_gt_init_early(&i915->gt); intel_gt_pm_init(i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ @@ -200,10 +202,6 @@ struct drm_i915_private *mock_gem_device(void) i915_timelines_init(i915); - INIT_LIST_HEAD(&i915->gt.active_rings); - INIT_LIST_HEAD(&i915->gt.closed_vma); - spin_lock_init(&i915->gt.closed_lock); - mutex_lock(&i915->drm.struct_mutex); mock_init_ggtt(i915, &i915->ggtt); -- cgit v1.2.3 From 724e9564c5acd40c422db2ccb57b1f66433021aa Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:42 +0100 Subject: drm/i915: Store some backpointers in struct intel_gt We need an easy way to get back to i915 and uncore. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-4-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 7 ++++++- drivers/gpu/drm/i915/gt/intel_gt.h | 4 +++- drivers/gpu/drm/i915/gt/intel_gt_types.h | 6 ++++++ drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 10adee1120e6..eccb52cf37b6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -3,10 +3,15 @@ * Copyright © 2019 Intel Corporation */ +#include "i915_drv.h" + #include "intel_gt.h" -void intel_gt_init_early(struct intel_gt *gt) +void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { + gt->i915 = i915; + gt->uncore = &i915->uncore; + INIT_LIST_HEAD(>->active_rings); INIT_LIST_HEAD(>->closed_vma); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 63284ca81d69..0dd218e53368 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -8,6 +8,8 @@ #include "intel_gt_types.h" -void intel_gt_init_early(struct intel_gt *gt); +struct drm_i915_private; + +void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 72e94aba8b2a..86a4985f8e84 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -16,7 +16,13 @@ #include "i915_vma.h" #include "intel_wakeref.h" +struct drm_i915_private; +struct intel_uncore; + struct intel_gt { + struct drm_i915_private *i915; + struct intel_uncore *uncore; + struct i915_gt_timelines { struct mutex mutex; /* protects list, tainted by GPU */ struct list_head active_list; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 961f76d3efa4..df11a0d6d52d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -923,7 +923,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) if (ret < 0) goto err_engines; - intel_gt_init_early(&dev_priv->gt); + intel_gt_init_early(&dev_priv->gt, dev_priv); ret = i915_gem_init_early(dev_priv); if (ret < 0) diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 8dc9296892e6..2b87200477f6 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -180,7 +180,7 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore); i915_gem_init__mm(i915); - intel_gt_init_early(&i915->gt); + intel_gt_init_early(&i915->gt, i915); intel_gt_pm_init(i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ -- cgit v1.2.3 From 99f2eb966749eb3e7523323a2f09b2f677d3a2d0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:43 +0100 Subject: drm/i915: Move intel_gt_pm_init under intel_gt_init_early And also rename to intel_gt_pm_init_early and make it operate on gt. Signed-off-by: Tvrtko Ursulin Suggested-by: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-5-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 3 +++ drivers/gpu/drm/i915/gt/intel_gt_pm.c | 6 +++--- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 3 ++- drivers/gpu/drm/i915/i915_gem.c | 2 -- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 1 - 5 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index eccb52cf37b6..f760c2e0253e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_gt.h" +#include "intel_gt_pm.h" void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { @@ -16,4 +17,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); + + intel_gt_pm_init_early(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 7b5967751762..6062840b5b46 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -80,10 +80,10 @@ void intel_gt_pm_put(struct drm_i915_private *i915) intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park); } -void intel_gt_pm_init(struct drm_i915_private *i915) +void intel_gt_pm_init_early(struct intel_gt *gt) { - intel_wakeref_init(&i915->gt.wakeref); - BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications); + intel_wakeref_init(>->wakeref); + BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications); } static bool reset_engines(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 7dd1130a19a4..b6049a907890 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -10,6 +10,7 @@ #include struct drm_i915_private; +struct intel_gt; enum { INTEL_GT_UNPARK, @@ -19,7 +20,7 @@ enum { void intel_gt_pm_get(struct drm_i915_private *i915); void intel_gt_pm_put(struct drm_i915_private *i915); -void intel_gt_pm_init(struct drm_i915_private *i915); +void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_sanitize(struct drm_i915_private *i915, bool force); void intel_gt_resume(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fc8fd41dd32e..9de1b1e88949 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1746,8 +1746,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) { int err; - intel_gt_pm_init(dev_priv); - i915_gem_init__mm(dev_priv); i915_gem_init__pm(dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 2b87200477f6..021ba42a3a00 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -181,7 +181,6 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore); i915_gem_init__mm(i915); intel_gt_init_early(&i915->gt, i915); - intel_gt_pm_init(i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ init_waitqueue_head(&i915->gpu_error.wait_queue); -- cgit v1.2.3 From eaf522f62b7454c4c040c4672c36ad0f4cb21fcd Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:44 +0100 Subject: drm/i915: Make i915_check_and_clear_faults take intel_gt Continuing the conversion and elimination of implicit dev_priv. Signed-off-by: Tvrtko Ursulin Suggested-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-6-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 4 +- drivers/gpu/drm/i915/gt/intel_gt.c | 130 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt.h | 5 ++ drivers/gpu/drm/i915/gt/intel_reset.c | 126 +---------------------------- drivers/gpu/drm/i915/gt/intel_reset.h | 2 - drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 +- 7 files changed, 144 insertions(+), 130 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index d45328e254dc..5e4bf738ee59 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -28,6 +28,8 @@ #include "i915_drv.h" +#include "gt/intel_gt.h" + #include "intel_engine.h" #include "intel_engine_pm.h" #include "intel_context.h" @@ -453,7 +455,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) RUNTIME_INFO(i915)->num_engines = hweight32(mask); - i915_check_and_clear_faults(i915); + intel_gt_check_and_clear_faults(&i915->gt); intel_setup_engine_capabilities(i915); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f760c2e0253e..5c9be38713db 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -7,6 +7,7 @@ #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_uncore.h" void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { @@ -20,3 +21,132 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) intel_gt_pm_init_early(gt); } + +static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) +{ + intel_uncore_rmw(uncore, reg, 0, set); +} + +static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) +{ + intel_uncore_rmw(uncore, reg, clr, 0); +} + +static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) +{ + intel_uncore_rmw(uncore, reg, 0, 0); +} + +static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) +{ + GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); + GEN6_RING_FAULT_REG_POSTING_READ(engine); +} + +void +intel_gt_clear_error_registers(struct intel_gt *gt, + intel_engine_mask_t engine_mask) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 eir; + + if (!IS_GEN(i915, 2)) + clear_register(uncore, PGTBL_ER); + + if (INTEL_GEN(i915) < 4) + clear_register(uncore, IPEIR(RENDER_RING_BASE)); + else + clear_register(uncore, IPEIR_I965); + + clear_register(uncore, EIR); + eir = intel_uncore_read(uncore, EIR); + if (eir) { + /* + * some errors might have become stuck, + * mask them. + */ + DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); + rmw_set(uncore, EMR, eir); + intel_uncore_write(uncore, GEN2_IIR, + I915_MASTER_ERROR_INTERRUPT); + } + + if (INTEL_GEN(i915) >= 8) { + rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); + intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); + } else if (INTEL_GEN(i915) >= 6) { + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine_masked(engine, i915, engine_mask, id) + gen8_clear_engine_error_register(engine); + } +} + +static void gen6_check_faults(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 fault; + + for_each_engine(engine, gt->i915, id) { + fault = GEN6_RING_FAULT_REG_READ(engine); + if (fault & RING_FAULT_VALID) { + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08lx\n" + "\tAddress space: %s\n" + "\tSource ID: %d\n" + "\tType: %d\n", + fault & PAGE_MASK, + fault & RING_FAULT_GTTSEL_MASK ? + "GGTT" : "PPGTT", + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } + } +} + +static void gen8_check_faults(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG); + + if (fault & RING_FAULT_VALID) { + u32 fault_data0, fault_data1; + u64 fault_addr; + + fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0); + fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1); + fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | + ((u64)fault_data0 << 12); + + DRM_DEBUG_DRIVER("Unexpected fault\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" + "\tEngine ID: %d\n" + "\tSource ID: %d\n" + "\tType: %d\n", + upper_32_bits(fault_addr), + lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", + GEN8_RING_FAULT_ENGINE_ID(fault), + RING_FAULT_SRCID(fault), + RING_FAULT_FAULT_TYPE(fault)); + } +} + +void intel_gt_check_and_clear_faults(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + /* From GEN8 onwards we only have one 'All Engine Fault Register' */ + if (INTEL_GEN(i915) >= 8) + gen8_check_faults(gt); + else if (INTEL_GEN(i915) >= 6) + gen6_check_faults(gt); + else + return; + + intel_gt_clear_error_registers(gt, ALL_ENGINES); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 0dd218e53368..033713b684bf 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -6,10 +6,15 @@ #ifndef __INTEL_GT__ #define __INTEL_GT__ +#include "intel_engine_types.h" #include "intel_gt_types.h" struct drm_i915_private; void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); +void intel_gt_check_and_clear_faults(struct intel_gt *gt); +void intel_gt_clear_error_registers(struct intel_gt *gt, + intel_engine_mask_t engine_mask); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 4c478b38e420..5297b3acb56d 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -15,6 +15,7 @@ #include "i915_gpu_error.h" #include "i915_irq.h" #include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_reset.h" @@ -25,16 +26,6 @@ /* XXX How to handle concurrent GGTT updates using tiling registers? */ #define RESET_UNDER_STOP_MACHINE 0 -static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) -{ - intel_uncore_rmw(uncore, reg, 0, set); -} - -static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) -{ - intel_uncore_rmw(uncore, reg, clr, 0); -} - static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set) { intel_uncore_rmw_fw(uncore, reg, 0, set); @@ -1157,119 +1148,6 @@ static void i915_reset_device(struct drm_i915_private *i915, kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } -static void clear_register(struct intel_uncore *uncore, i915_reg_t reg) -{ - intel_uncore_rmw(uncore, reg, 0, 0); -} - -static void gen8_clear_engine_error_register(struct intel_engine_cs *engine) -{ - GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0); - GEN6_RING_FAULT_REG_POSTING_READ(engine); -} - -static void clear_error_registers(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask) -{ - struct intel_uncore *uncore = &i915->uncore; - u32 eir; - - if (!IS_GEN(i915, 2)) - clear_register(uncore, PGTBL_ER); - - if (INTEL_GEN(i915) < 4) - clear_register(uncore, IPEIR(RENDER_RING_BASE)); - else - clear_register(uncore, IPEIR_I965); - - clear_register(uncore, EIR); - eir = intel_uncore_read(uncore, EIR); - if (eir) { - /* - * some errors might have become stuck, - * mask them. - */ - DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); - rmw_set(uncore, EMR, eir); - intel_uncore_write(uncore, GEN2_IIR, - I915_MASTER_ERROR_INTERRUPT); - } - - if (INTEL_GEN(i915) >= 8) { - rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); - intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); - } else if (INTEL_GEN(i915) >= 6) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine_masked(engine, i915, engine_mask, id) - gen8_clear_engine_error_register(engine); - } -} - -static void gen6_check_faults(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 fault; - - for_each_engine(engine, dev_priv, id) { - fault = GEN6_RING_FAULT_REG_READ(engine); - if (fault & RING_FAULT_VALID) { - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08lx\n" - "\tAddress space: %s\n" - "\tSource ID: %d\n" - "\tType: %d\n", - fault & PAGE_MASK, - fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } - } -} - -static void gen8_check_faults(struct drm_i915_private *dev_priv) -{ - u32 fault = I915_READ(GEN8_RING_FAULT_REG); - - if (fault & RING_FAULT_VALID) { - u32 fault_data0, fault_data1; - u64 fault_addr; - - fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); - fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); - fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | - ((u64)fault_data0 << 12); - - DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08x_%08x\n" - "\tAddress space: %s\n" - "\tEngine ID: %d\n" - "\tSource ID: %d\n" - "\tType: %d\n", - upper_32_bits(fault_addr), - lower_32_bits(fault_addr), - fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", - GEN8_RING_FAULT_ENGINE_ID(fault), - RING_FAULT_SRCID(fault), - RING_FAULT_FAULT_TYPE(fault)); - } -} - -void i915_check_and_clear_faults(struct drm_i915_private *i915) -{ - /* From GEN8 onwards we only have one 'All Engine Fault Register' */ - if (INTEL_GEN(i915) >= 8) - gen8_check_faults(i915); - else if (INTEL_GEN(i915) >= 6) - gen6_check_faults(i915); - else - return; - - clear_error_registers(i915, ALL_ENGINES); -} - /** * i915_handle_error - handle a gpu error * @i915: i915 device private @@ -1318,7 +1196,7 @@ void i915_handle_error(struct drm_i915_private *i915, if (flags & I915_ERROR_CAPTURE) { i915_capture_error_state(i915, engine_mask, msg); - clear_error_registers(i915, engine_mask); + intel_gt_clear_error_registers(&i915->gt, engine_mask); } /* diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 580ebdb59eca..03fba0ab3868 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -25,8 +25,6 @@ void i915_handle_error(struct drm_i915_private *i915, const char *fmt, ...); #define I915_ERROR_CAPTURE BIT(0) -void i915_check_and_clear_faults(struct drm_i915_private *i915); - void i915_reset(struct drm_i915_private *i915, intel_engine_mask_t stalled_mask, const char *reason); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index df11a0d6d52d..ef4100ed49fd 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2361,7 +2361,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_uncore_resume_early(&dev_priv->uncore); - i915_check_and_clear_faults(dev_priv); + intel_gt_check_and_clear_faults(&dev_priv->gt); if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) { gen9_sanitize_dc_state(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 685505f45991..697e8511ab24 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -36,6 +36,7 @@ #include #include "display/intel_frontbuffer.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_scatterlist.h" @@ -2263,7 +2264,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) < 6) return; - i915_check_and_clear_faults(dev_priv); + intel_gt_check_and_clear_faults(&dev_priv->gt); ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); @@ -3572,7 +3573,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma, *vn; - i915_check_and_clear_faults(dev_priv); + intel_gt_check_and_clear_faults(&dev_priv->gt); mutex_lock(&ggtt->vm.mutex); -- cgit v1.2.3 From 500bfa380e112f744f8ae316c88c0e849833d804 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:45 +0100 Subject: drm/i915: Convert i915_gem_init_swizzling to intel_gt Start using the newly introduced struct intel_gt to fuse together correct logical init flow with uncore for more removal of implicit dev_priv in mmio access. v2: * Move code to i915_gem_fence_reg. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-7-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 25 +-------------------- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 37 +++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem_fence_reg.h | 3 +++ 5 files changed, 43 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ef4100ed49fd..c02fdefcb356 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2957,7 +2957,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_uc_resume(dev_priv); - i915_gem_init_swizzling(dev_priv); + intel_gt_init_swizzling(&dev_priv->gt); i915_gem_restore_fences(dev_priv); enable_rpm_wakeref_asserts(rpm); @@ -3059,7 +3059,7 @@ static int intel_runtime_resume(struct device *kdev) * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ - i915_gem_init_swizzling(dev_priv); + intel_gt_init_swizzling(&dev_priv->gt); i915_gem_restore_fences(dev_priv); /* diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3509eb88b452..44196bed4d81 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2537,7 +2537,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); -void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_fini_hw(struct drm_i915_private *dev_priv); void i915_gem_fini(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9de1b1e88949..30da603ee7fd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1202,29 +1202,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_runtime_pm_put(&i915->runtime_pm, wakeref); } -void i915_gem_init_swizzling(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 5 || - dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) - return; - - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | - DISP_TILE_SURFACE_SWIZZLING); - - if (IS_GEN(dev_priv, 5)) - return; - - I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); - if (IS_GEN(dev_priv, 6)) - I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); - else if (IS_GEN(dev_priv, 7)) - I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); - else if (IS_GEN(dev_priv, 8)) - I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); - else - BUG(); -} - static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) { I915_WRITE(RING_CTL(base), 0); @@ -1271,7 +1248,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) /* ...and determine whether they are sticking. */ intel_gt_verify_workarounds(dev_priv, "init"); - i915_gem_init_swizzling(dev_priv); + intel_gt_init_swizzling(&dev_priv->gt); /* * At least 830 can leave some of the unused rings diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 0bf53ac1c835..dd09790c420d 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -834,3 +834,40 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt) i915_gem_restore_fences(i915); } + +void intel_gt_init_swizzling(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + + if (INTEL_GEN(i915) < 5 || + i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) + return; + + intel_uncore_write(uncore, + DISP_ARB_CTL, + intel_uncore_read(uncore, DISP_ARB_CTL) | + DISP_TILE_SURFACE_SWIZZLING); + + if (IS_GEN(i915, 5)) + return; + + intel_uncore_write(uncore, + TILECTL, + intel_uncore_read(uncore, TILECTL) | TILECTL_SWZCTL); + + if (IS_GEN(i915, 6)) + intel_uncore_write(uncore, + ARB_MODE, + _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); + else if (IS_GEN(i915, 7)) + intel_uncore_write(uncore, + ARB_MODE, + _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); + else if (IS_GEN(i915, 8)) + intel_uncore_write(uncore, + GAMTARBMODE, + _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW)); + else + MISSING_CASE(INTEL_GEN(i915)); +} diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h index d2da98828179..37e4f104f7c0 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h @@ -32,6 +32,7 @@ struct drm_i915_gem_object; struct drm_i915_private; struct i915_ggtt; struct i915_vma; +struct intel_gt; struct sg_table; #define I965_FENCE_PAGE 4096UL @@ -66,4 +67,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, void i915_ggtt_init_fences(struct i915_ggtt *ggtt); +void intel_gt_init_swizzling(struct intel_gt *gt); + #endif -- cgit v1.2.3 From f88709bd1c5e2bc9fb62b1b3151fae1ef4f7a2c5 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:46 +0100 Subject: drm/i915: Use intel_uncore_rmw in intel_gt_init_swizzling Two easy opportunities to compact the code by using the existing helper. Signed-off-by: Tvrtko Ursulin Suggested-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-8-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index dd09790c420d..bcac359ec661 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -844,17 +844,12 @@ void intel_gt_init_swizzling(struct intel_gt *gt) i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) return; - intel_uncore_write(uncore, - DISP_ARB_CTL, - intel_uncore_read(uncore, DISP_ARB_CTL) | - DISP_TILE_SURFACE_SWIZZLING); + intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING); if (IS_GEN(i915, 5)) return; - intel_uncore_write(uncore, - TILECTL, - intel_uncore_read(uncore, TILECTL) | TILECTL_SWZCTL); + intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL); if (IS_GEN(i915, 6)) intel_uncore_write(uncore, -- cgit v1.2.3 From cf6844b234e5832c3ab4d8ad76fee433905a1de6 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:47 +0100 Subject: drm/i915: Convert init_unused_rings to intel_gt More removal of implicit dev_priv from using old mmio accessors. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-9-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem.c | 42 ++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 30da603ee7fd..6f96d360e8d5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1202,28 +1202,32 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_runtime_pm_put(&i915->runtime_pm, wakeref); } -static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base) +static void init_unused_ring(struct intel_gt *gt, u32 base) { - I915_WRITE(RING_CTL(base), 0); - I915_WRITE(RING_HEAD(base), 0); - I915_WRITE(RING_TAIL(base), 0); - I915_WRITE(RING_START(base), 0); + struct intel_uncore *uncore = gt->uncore; + + intel_uncore_write(uncore, RING_CTL(base), 0); + intel_uncore_write(uncore, RING_HEAD(base), 0); + intel_uncore_write(uncore, RING_TAIL(base), 0); + intel_uncore_write(uncore, RING_START(base), 0); } -static void init_unused_rings(struct drm_i915_private *dev_priv) +static void init_unused_rings(struct intel_gt *gt) { - if (IS_I830(dev_priv)) { - init_unused_ring(dev_priv, PRB1_BASE); - init_unused_ring(dev_priv, SRB0_BASE); - init_unused_ring(dev_priv, SRB1_BASE); - init_unused_ring(dev_priv, SRB2_BASE); - init_unused_ring(dev_priv, SRB3_BASE); - } else if (IS_GEN(dev_priv, 2)) { - init_unused_ring(dev_priv, SRB0_BASE); - init_unused_ring(dev_priv, SRB1_BASE); - } else if (IS_GEN(dev_priv, 3)) { - init_unused_ring(dev_priv, PRB1_BASE); - init_unused_ring(dev_priv, PRB2_BASE); + struct drm_i915_private *i915 = gt->i915; + + if (IS_I830(i915)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + init_unused_ring(gt, SRB2_BASE); + init_unused_ring(gt, SRB3_BASE); + } else if (IS_GEN(i915, 2)) { + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + } else if (IS_GEN(i915, 3)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, PRB2_BASE); } } @@ -1256,7 +1260,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) * will prevent c3 entry. Makes sure all unused rings * are totally idle. */ - init_unused_rings(dev_priv); + init_unused_rings(&dev_priv->gt); BUG_ON(!dev_priv->kernel_context); ret = i915_terminally_wedged(dev_priv); -- cgit v1.2.3 From d10cfee4d81ba3483a09ea70ddd78889bbca0384 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:48 +0100 Subject: drm/i915: Convert gt workarounds to intel_gt More conversion of i915_gem_init_hw to uncore. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-10-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 10 +++++----- drivers/gpu/drm/i915/gt/intel_workarounds.h | 6 +++--- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 686df92b4866..91f20f87fd03 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_context.h" +#include "intel_gt.h" #include "intel_workarounds.h" /** @@ -984,9 +985,9 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal) spin_unlock_irqrestore(&uncore->lock, flags); } -void intel_gt_apply_workarounds(struct drm_i915_private *i915) +void intel_gt_apply_workarounds(struct intel_gt *gt) { - wa_list_apply(&i915->uncore, &i915->gt_wa_list); + wa_list_apply(gt->uncore, >->i915->gt_wa_list); } static bool wa_list_verify(struct intel_uncore *uncore, @@ -1005,10 +1006,9 @@ static bool wa_list_verify(struct intel_uncore *uncore, return ok; } -bool intel_gt_verify_workarounds(struct drm_i915_private *i915, - const char *from) +bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) { - return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from); + return wa_list_verify(gt->uncore, >->i915->gt_wa_list, from); } static void diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h index 3761a6ee58bb..8c9c769c2204 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.h +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h @@ -14,6 +14,7 @@ struct drm_i915_private; struct i915_request; struct intel_engine_cs; +struct intel_gt; static inline void intel_wa_list_free(struct i915_wa_list *wal) { @@ -25,9 +26,8 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine); int intel_engine_emit_ctx_wa(struct i915_request *rq); void intel_gt_init_workarounds(struct drm_i915_private *i915); -void intel_gt_apply_workarounds(struct drm_i915_private *i915); -bool intel_gt_verify_workarounds(struct drm_i915_private *i915, - const char *from); +void intel_gt_apply_workarounds(struct intel_gt *gt); +bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from); void intel_engine_init_whitelist(struct intel_engine_cs *engine); void intel_engine_apply_whitelist(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6f96d360e8d5..0a313289f984 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1248,9 +1248,9 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); /* Apply the GT workarounds... */ - intel_gt_apply_workarounds(dev_priv); + intel_gt_apply_workarounds(&dev_priv->gt); /* ...and determine whether they are sticking. */ - intel_gt_verify_workarounds(dev_priv, "init"); + intel_gt_verify_workarounds(&dev_priv->gt, "init"); intel_gt_init_swizzling(&dev_priv->gt); -- cgit v1.2.3 From f937f5613b6f108b2f93470f357c0880c1b57ffb Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:49 +0100 Subject: drm/i915: Store backpointer to intel_gt in the engine It will come useful in the next patch. v2: * Do mock_engine as well. v3: * And the virtual engine... Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-11-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 1 + drivers/gpu/drm/i915/gt/intel_engine_types.h | 2 ++ drivers/gpu/drm/i915/gt/intel_lrc.c | 1 + drivers/gpu/drm/i915/gt/mock_engine.c | 1 + 4 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 5e4bf738ee59..dd23fcaced52 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -316,6 +316,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->id = id; engine->mask = BIT(id); engine->i915 = dev_priv; + engine->gt = &dev_priv->gt; engine->uncore = &dev_priv->uncore; __sprint_engine_name(engine->name, info); engine->hw_id = engine->guc_id = info->hw_id; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 9940bad37812..fb65e96fa36b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -36,6 +36,7 @@ struct drm_i915_reg_table; struct i915_gem_context; struct i915_request; struct i915_sched_attr; +struct intel_gt; struct intel_uncore; typedef u8 intel_engine_mask_t; @@ -257,6 +258,7 @@ struct intel_engine_execlists { struct intel_engine_cs { struct drm_i915_private *i915; + struct intel_gt *gt; struct intel_uncore *uncore; char name[INTEL_ENGINE_CS_MAX_NAME]; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index faa9a2e4e40b..e4b44b3f2d0b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3363,6 +3363,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, return ERR_PTR(-ENOMEM); ve->base.i915 = ctx->i915; + ve->base.gt = siblings[0]->gt; ve->base.id = -1; ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 486c6953dcb1..b022af3385f3 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -257,6 +257,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, /* minimal engine setup for requests */ engine->base.i915 = i915; + engine->base.gt = &i915->gt; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; engine->base.mask = BIT(id); -- cgit v1.2.3 From 20a7f2fc4d7a4eb1811595d18ffa32b0811ede0b Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:50 +0100 Subject: drm/i915: Convert intel_mocs_init_l3cc_table to intel_gt More removal of implicit dev_priv from using old mmio accessors. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-12-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_mocs.c | 52 ++++++++++++++++++++++-------------- drivers/gpu/drm/i915/gt/intel_mocs.h | 3 ++- drivers/gpu/drm/i915/i915_gem.c | 2 +- 3 files changed, 35 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 1f9db50b1869..d08b8f47269b 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -23,6 +23,7 @@ #include "i915_drv.h" #include "intel_engine.h" +#include "intel_gt.h" #include "intel_mocs.h" #include "intel_lrc.h" @@ -247,7 +248,7 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = { /** * get_mocs_settings() - * @dev_priv: i915 device. + * @gt: gt device * @table: Output table that will be made to point at appropriate * MOCS values for the device. * @@ -257,33 +258,34 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = { * * Return: true if there are applicable MOCS settings for the device. */ -static bool get_mocs_settings(struct drm_i915_private *dev_priv, +static bool get_mocs_settings(struct intel_gt *gt, struct drm_i915_mocs_table *table) { + struct drm_i915_private *i915 = gt->i915; bool result = false; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { table->size = ARRAY_SIZE(icelake_mocs_table); table->table = icelake_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; result = true; - } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { + } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { table->size = ARRAY_SIZE(skylake_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = skylake_mocs_table; result = true; - } else if (IS_GEN9_LP(dev_priv)) { + } else if (IS_GEN9_LP(i915)) { table->size = ARRAY_SIZE(broxton_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; table->table = broxton_mocs_table; result = true; } else { - WARN_ONCE(INTEL_GEN(dev_priv) >= 9, + WARN_ONCE(INTEL_GEN(i915) >= 9, "Platform that should have a MOCS table does not.\n"); } /* WaDisableSkipCaching:skl,bxt,kbl,glk */ - if (IS_GEN(dev_priv, 9)) { + if (IS_GEN(i915, 9)) { int i; for (i = 0; i < table->size; i++) @@ -338,12 +340,13 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table, */ void intel_mocs_init_engine(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct intel_gt *gt = engine->gt; + struct intel_uncore *uncore = gt->uncore; struct drm_i915_mocs_table table; unsigned int index; u32 unused_value; - if (!get_mocs_settings(dev_priv, &table)) + if (!get_mocs_settings(gt, &table)) return; /* Set unused values to PTE */ @@ -352,12 +355,16 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) for (index = 0; index < table.size; index++) { u32 value = get_entry_control(&table, index); - I915_WRITE(mocs_register(engine->id, index), value); + intel_uncore_write(uncore, + mocs_register(engine->id, index), + value); } /* All remaining entries are also unused */ for (; index < table.n_entries; index++) - I915_WRITE(mocs_register(engine->id, index), unused_value); + intel_uncore_write(uncore, + mocs_register(engine->id, index), + unused_value); } /** @@ -502,13 +509,14 @@ static int emit_mocs_l3cc_table(struct i915_request *rq, * * Return: Nothing. */ -void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) +void intel_mocs_init_l3cc_table(struct intel_gt *gt) { + struct intel_uncore *uncore = gt->uncore; struct drm_i915_mocs_table table; unsigned int i; u16 unused_value; - if (!get_mocs_settings(dev_priv, &table)) + if (!get_mocs_settings(gt, &table)) return; /* Set unused values to PTE */ @@ -518,23 +526,27 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv) u16 low = get_entry_l3cc(&table, 2 * i); u16 high = get_entry_l3cc(&table, 2 * i + 1); - I915_WRITE(GEN9_LNCFCMOCS(i), - l3cc_combine(&table, low, high)); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(&table, low, high)); } /* Odd table size - 1 left over */ if (table.size & 0x01) { u16 low = get_entry_l3cc(&table, 2 * i); - I915_WRITE(GEN9_LNCFCMOCS(i), - l3cc_combine(&table, low, unused_value)); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(&table, low, unused_value)); i++; } /* All remaining entries are also unused */ for (; i < table.n_entries / 2; i++) - I915_WRITE(GEN9_LNCFCMOCS(i), - l3cc_combine(&table, unused_value, unused_value)); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(&table, unused_value, + unused_value)); } /** @@ -558,7 +570,7 @@ int intel_rcs_context_init_mocs(struct i915_request *rq) struct drm_i915_mocs_table t; int ret; - if (get_mocs_settings(rq->i915, &t)) { + if (get_mocs_settings(rq->engine->gt, &t)) { /* Program the RCS control registers */ ret = emit_mocs_control_table(rq, &t); if (ret) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index 0913704a1af2..8b9813e6f9ac 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -52,9 +52,10 @@ struct drm_i915_private; struct i915_request; struct intel_engine_cs; +struct intel_gt; int intel_rcs_context_init_mocs(struct i915_request *rq); -void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv); +void intel_mocs_init_l3cc_table(struct intel_gt *gt); void intel_mocs_init_engine(struct intel_engine_cs *engine); #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0a313289f984..26caa0b5b5d7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1286,7 +1286,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) goto out; } - intel_mocs_init_l3cc_table(dev_priv); + intel_mocs_init_l3cc_table(&dev_priv->gt); /* Only when the HW is re-initialised, can we replay the requests */ ret = intel_engines_resume(dev_priv); -- cgit v1.2.3 From acb56d97d907de6b853f888128755116ee5ff3a9 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:51 +0100 Subject: drm/i915: Convert i915_ppgtt_init_hw to intel_gt More removal of implicit dev_priv from using old mmio accessors. v2: * Rebase for uncore_to_i915 removal. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-13-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 101 +++++++++++++++++++++++------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 3 +- 3 files changed, 67 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 26caa0b5b5d7..769cfb15e6ca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1267,7 +1267,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) if (ret) goto out; - ret = i915_ppgtt_init_hw(dev_priv); + ret = i915_ppgtt_init_hw(&dev_priv->gt); if (ret) { DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); goto out; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 697e8511ab24..9eba5460ad14 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1720,25 +1720,29 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt, ppgtt->pd_addr + pde); } -static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) +static void gen7_ppgtt_enable(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; u32 ecochk, ecobits; enum intel_engine_id id; - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); + ecobits = intel_uncore_read(uncore, GAC_ECO_BITS); + intel_uncore_write(uncore, + GAC_ECO_BITS, + ecobits | ECOBITS_PPGTT_CACHE64B); - ecochk = I915_READ(GAM_ECOCHK); - if (IS_HASWELL(dev_priv)) { + ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + if (IS_HASWELL(i915)) { ecochk |= ECOCHK_PPGTT_WB_HSW; } else { ecochk |= ECOCHK_PPGTT_LLC_IVB; ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; } - I915_WRITE(GAM_ECOCHK, ecochk); + intel_uncore_write(uncore, GAM_ECOCHK, ecochk); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, i915, id) { /* GFX_MODE is per-ring on gen7+ */ ENGINE_WRITE(engine, RING_MODE_GEN7, @@ -1746,22 +1750,30 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) } } -static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) +static void gen6_ppgtt_enable(struct intel_gt *gt) { + struct intel_uncore *uncore = gt->uncore; u32 ecochk, gab_ctl, ecobits; - ecobits = I915_READ(GAC_ECO_BITS); - I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | - ECOBITS_PPGTT_CACHE64B); + ecobits = intel_uncore_read(uncore, GAC_ECO_BITS); + intel_uncore_write(uncore, + GAC_ECO_BITS, + ecobits | ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); - gab_ctl = I915_READ(GAB_CTL); - I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + gab_ctl = intel_uncore_read(uncore, GAB_CTL); + intel_uncore_write(uncore, + GAB_CTL, + gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); - ecochk = I915_READ(GAM_ECOCHK); - I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); + ecochk = intel_uncore_read(uncore, GAM_ECOCHK); + intel_uncore_write(uncore, + GAM_ECOCHK, + ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); - if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */ - I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); + if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ + intel_uncore_write(uncore, + GFX_MODE, + _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); } /* PPGTT support for Sandybdrige/Gen6 and later */ @@ -2174,21 +2186,32 @@ err_free: return ERR_PTR(err); } -static void gtt_write_workarounds(struct drm_i915_private *dev_priv) +static void gtt_write_workarounds(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + /* This function is for gtt related workarounds. This function is * called on driver load and after a GPU reset, so you can place * workarounds here even if they get overwritten by GPU reset. */ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */ - if (IS_BROADWELL(dev_priv)) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); - else if (IS_CHERRYVIEW(dev_priv)) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_LP(dev_priv)) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); - else if (INTEL_GEN(dev_priv) >= 9) - I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); + if (IS_BROADWELL(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); + else if (IS_CHERRYVIEW(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); + else if (IS_GEN9_LP(i915)) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); + else if (INTEL_GEN(i915) >= 9) + intel_uncore_write(uncore, + GEN8_L3_LRA_1_GPGPU, + GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); /* * To support 64K PTEs we need to first enable the use of the @@ -2201,21 +2224,25 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv) * 32K pages, but we don't currently have any support for it in our * driver. */ - if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) && - INTEL_GEN(dev_priv) <= 10) - I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, - I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | - GAMW_ECO_ENABLE_64K_IPS_FIELD); + if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && + INTEL_GEN(i915) <= 10) + intel_uncore_write(uncore, + GEN8_GAMW_ECO_DEV_RW_IA, + intel_uncore_read(uncore, + GEN8_GAMW_ECO_DEV_RW_IA) | + GAMW_ECO_ENABLE_64K_IPS_FIELD); } -int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) +int i915_ppgtt_init_hw(struct intel_gt *gt) { - gtt_write_workarounds(dev_priv); + struct drm_i915_private *i915 = gt->i915; + + gtt_write_workarounds(gt); - if (IS_GEN(dev_priv, 6)) - gen6_ppgtt_enable(dev_priv); - else if (IS_GEN(dev_priv, 7)) - gen7_ppgtt_enable(dev_priv); + if (IS_GEN(i915, 6)) + gen6_ppgtt_enable(gt); + else if (IS_GEN(i915, 7)) + gen7_ppgtt_enable(gt); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 4d6f96763a98..0f591fa186f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -68,6 +68,7 @@ struct drm_i915_file_private; struct drm_i915_gem_object; struct i915_vma; +struct intel_gt; typedef u32 gen6_pte_t; typedef u64 gen8_pte_t; @@ -656,7 +657,7 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915); int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); -int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); +int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From 28a1f7894f6b518bbc4f5d23f7bc7b2edc1d6c3f Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:52 +0100 Subject: drm/i915: Consolidate some open coded mmio rmw Replace some gen6/7 open coded rmw with intel_uncore_rmw. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-14-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 41 ++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9eba5460ad14..7c543e067623 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1725,13 +1725,10 @@ static void gen7_ppgtt_enable(struct intel_gt *gt) struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; - u32 ecochk, ecobits; enum intel_engine_id id; + u32 ecochk; - ecobits = intel_uncore_read(uncore, GAC_ECO_BITS); - intel_uncore_write(uncore, - GAC_ECO_BITS, - ecobits | ECOBITS_PPGTT_CACHE64B); + intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B); ecochk = intel_uncore_read(uncore, GAM_ECOCHK); if (IS_HASWELL(i915)) { @@ -1753,22 +1750,21 @@ static void gen7_ppgtt_enable(struct intel_gt *gt) static void gen6_ppgtt_enable(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - u32 ecochk, gab_ctl, ecobits; - ecobits = intel_uncore_read(uncore, GAC_ECO_BITS); - intel_uncore_write(uncore, - GAC_ECO_BITS, - ecobits | ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); + intel_uncore_rmw(uncore, + GAC_ECO_BITS, + 0, + ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B); - gab_ctl = intel_uncore_read(uncore, GAB_CTL); - intel_uncore_write(uncore, - GAB_CTL, - gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); + intel_uncore_rmw(uncore, + GAB_CTL, + 0, + GAB_CTL_CONT_AFTER_PAGEFAULT); - ecochk = intel_uncore_read(uncore, GAM_ECOCHK); - intel_uncore_write(uncore, - GAM_ECOCHK, - ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); + intel_uncore_rmw(uncore, + GAM_ECOCHK, + 0, + ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */ intel_uncore_write(uncore, @@ -2226,11 +2222,10 @@ static void gtt_write_workarounds(struct intel_gt *gt) */ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) && INTEL_GEN(i915) <= 10) - intel_uncore_write(uncore, - GEN8_GAMW_ECO_DEV_RW_IA, - intel_uncore_read(uncore, - GEN8_GAMW_ECO_DEV_RW_IA) | - GAMW_ECO_ENABLE_64K_IPS_FIELD); + intel_uncore_rmw(uncore, + GEN8_GAMW_ECO_DEV_RW_IA, + 0, + GAMW_ECO_ENABLE_64K_IPS_FIELD); } int i915_ppgtt_init_hw(struct intel_gt *gt) -- cgit v1.2.3 From abc584f9aa042ff75b7ff1749c221957b46a8c99 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:53 +0100 Subject: drm/i915: Convert i915_gem_init_hw to intel_gt More removal of implicit dev_priv from using old mmio accessors. Actually the top level function remains but is split into a part which writes to i915 and part which operates on intel_gt in order to initialize the hardware. GuC and engines are the only odd ones out remaining. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-15-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem.c | 66 +++++++++++++++++++++++++---------------- 1 file changed, 40 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 769cfb15e6ca..989d987c42d4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1231,28 +1231,32 @@ static void init_unused_rings(struct intel_gt *gt) } } -int i915_gem_init_hw(struct drm_i915_private *dev_priv) +static int init_hw(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; int ret; - dev_priv->gt.last_init_time = ktime_get(); + gt->last_init_time = ktime_get(); /* Double layer security blanket, see i915_gem_init() */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) - I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); + if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) + intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); - if (IS_HASWELL(dev_priv)) - I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? - LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); + if (IS_HASWELL(i915)) + intel_uncore_write(uncore, + MI_PREDICATE_RESULT_2, + IS_HSW_GT3(i915) ? + LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); /* Apply the GT workarounds... */ - intel_gt_apply_workarounds(&dev_priv->gt); + intel_gt_apply_workarounds(gt); /* ...and determine whether they are sticking. */ - intel_gt_verify_workarounds(&dev_priv->gt, "init"); + intel_gt_verify_workarounds(gt, "init"); - intel_gt_init_swizzling(&dev_priv->gt); + intel_gt_init_swizzling(gt); /* * At least 830 can leave some of the unused rings @@ -1260,48 +1264,58 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) * will prevent c3 entry. Makes sure all unused rings * are totally idle. */ - init_unused_rings(&dev_priv->gt); - - BUG_ON(!dev_priv->kernel_context); - ret = i915_terminally_wedged(dev_priv); - if (ret) - goto out; + init_unused_rings(gt); - ret = i915_ppgtt_init_hw(&dev_priv->gt); + ret = i915_ppgtt_init_hw(gt); if (ret) { DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); goto out; } - ret = intel_wopcm_init_hw(&dev_priv->wopcm); + ret = intel_wopcm_init_hw(&i915->wopcm); if (ret) { DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); goto out; } /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(dev_priv); + ret = intel_uc_init_hw(i915); if (ret) { DRM_ERROR("Enabling uc failed (%d)\n", ret); goto out; } - intel_mocs_init_l3cc_table(&dev_priv->gt); + intel_mocs_init_l3cc_table(gt); /* Only when the HW is re-initialised, can we replay the requests */ - ret = intel_engines_resume(dev_priv); + ret = intel_engines_resume(i915); if (ret) goto cleanup_uc; - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - intel_engines_set_scheduler_caps(dev_priv); return 0; cleanup_uc: - intel_uc_fini_hw(dev_priv); + intel_uc_fini_hw(i915); out: - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + return ret; +} + +int i915_gem_init_hw(struct drm_i915_private *i915) +{ + int ret; + + BUG_ON(!i915->kernel_context); + ret = i915_terminally_wedged(i915); + if (ret) + return ret; + + ret = init_hw(&i915->gt); + + intel_engines_set_scheduler_caps(i915); return ret; } -- cgit v1.2.3 From 8649187a9520f61b742c85f5d313ad89617d86fe Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:54 +0100 Subject: drm/i915: Move intel_engines_resume into common init Since this part still operates on i915 and not intel_gt, move it to the common (top-level) function. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-16-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 989d987c42d4..1760a0761130 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1287,17 +1287,10 @@ static int init_hw(struct intel_gt *gt) intel_mocs_init_l3cc_table(gt); - /* Only when the HW is re-initialised, can we replay the requests */ - ret = intel_engines_resume(i915); - if (ret) - goto cleanup_uc; - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); return 0; -cleanup_uc: - intel_uc_fini_hw(i915); out: intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); @@ -1306,6 +1299,7 @@ out: int i915_gem_init_hw(struct drm_i915_private *i915) { + struct intel_uncore *uncore = &i915->uncore; int ret; BUG_ON(!i915->kernel_context); @@ -1313,7 +1307,28 @@ int i915_gem_init_hw(struct drm_i915_private *i915) if (ret) return ret; + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + ret = init_hw(&i915->gt); + if (ret) + goto err_init; + + /* Only when the HW is re-initialised, can we replay the requests */ + ret = intel_engines_resume(i915); + if (ret) + goto err_engines; + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + intel_engines_set_scheduler_caps(i915); + + return 0; + +err_engines: + intel_uc_fini_hw(i915); +err_init: + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); intel_engines_set_scheduler_caps(i915); -- cgit v1.2.3 From 6b0a8dfdf27e6d6a180598b6ff6b205d9055d975 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:55 +0100 Subject: drm/i915: Stop using I915_READ/WRITE in intel_wopcm_init_hw More legacy mmio accessor removal. We pass in intel_gt explicitly allowing code to use new intel_uncore_read/write helpers. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-17-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_wopcm.c | 31 +++++++++++++++++-------------- drivers/gpu/drm/i915/intel_wopcm.h | 4 +++- 3 files changed, 21 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1760a0761130..8966f32fe2a2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1272,7 +1272,7 @@ static int init_hw(struct intel_gt *gt) goto out; } - ret = intel_wopcm_init_hw(&i915->wopcm); + ret = intel_wopcm_init_hw(&i915->wopcm, gt); if (ret) { DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); goto out; diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 7b4ba84b9fb8..931987e37241 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -225,17 +225,18 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) return 0; } -static inline int write_and_verify(struct drm_i915_private *dev_priv, - i915_reg_t reg, u32 val, u32 mask, - u32 locked_bit) +static int +write_and_verify(struct intel_gt *gt, + i915_reg_t reg, u32 val, u32 mask, u32 locked_bit) { + struct intel_uncore *uncore = gt->uncore; u32 reg_val; GEM_BUG_ON(val & ~mask); - I915_WRITE(reg, val); + intel_uncore_write(uncore, reg, val); - reg_val = I915_READ(reg); + reg_val = intel_uncore_read(uncore, reg); return (reg_val & mask) != (val | locked_bit) ? -EIO : 0; } @@ -250,29 +251,30 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv, * * Return: 0 on success. -EIO if registers were locked with incorrect values. */ -int intel_wopcm_init_hw(struct intel_wopcm *wopcm) +int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt) { - struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm); + struct drm_i915_private *i915 = wopcm_to_i915(wopcm); + struct intel_uncore *uncore = gt->uncore; u32 huc_agent; u32 mask; int err; - if (!USES_GUC(dev_priv)) + if (!USES_GUC(i915)) return 0; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GUC(i915)); GEM_BUG_ON(!wopcm->guc.size); GEM_BUG_ON(!wopcm->guc.base); - err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size, + err = write_and_verify(gt, GUC_WOPCM_SIZE, wopcm->guc.size, GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED, GUC_WOPCM_SIZE_LOCKED); if (err) goto err_out; - huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0; + huc_agent = USES_HUC(i915) ? HUC_LOADING_AGENT_GUC : 0; mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; - err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET, + err = write_and_verify(gt, DMA_GUC_WOPCM_OFFSET, wopcm->guc.base | huc_agent, mask, GUC_WOPCM_OFFSET_VALID); if (err) @@ -283,8 +285,9 @@ int intel_wopcm_init_hw(struct intel_wopcm *wopcm) err_out: DRM_ERROR("Failed to init WOPCM registers:\n"); DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n", - I915_READ(DMA_GUC_WOPCM_OFFSET)); - DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE)); + intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); + DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", + intel_uncore_read(uncore, GUC_WOPCM_SIZE)); return err; } diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h index 114401971520..56aaed4d64ff 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.h +++ b/drivers/gpu/drm/i915/intel_wopcm.h @@ -9,6 +9,8 @@ #include +struct intel_gt; + /** * struct intel_wopcm - Overall WOPCM info and WOPCM regions. * @size: Size of overall WOPCM. @@ -41,6 +43,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm) void intel_wopcm_init_early(struct intel_wopcm *wopcm); int intel_wopcm_init(struct intel_wopcm *wopcm); -int intel_wopcm_init_hw(struct intel_wopcm *wopcm); +int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt); #endif -- cgit v1.2.3 From ee1de7dd7d33bb753725178dae3bc652eb7611a4 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:56 +0100 Subject: drm/i915: Compartmentalize i915_ggtt_probe_hw Having made start to better code compartmentalization by introducing struct intel_gt, continue the theme elsewhere in code by making functions take parameters take what logically makes most sense for them instead of the global struct drm_i915_private. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-18-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7c543e067623..629683c52e50 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3457,21 +3457,16 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) return 0; } -/** - * i915_ggtt_probe_hw - Probe GGTT hardware location - * @dev_priv: i915 device - */ -int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) +static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct drm_i915_private *i915) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - ggtt->vm.i915 = dev_priv; - ggtt->vm.dma = &dev_priv->drm.pdev->dev; + ggtt->vm.i915 = i915; + ggtt->vm.dma = &i915->drm.pdev->dev; - if (INTEL_GEN(dev_priv) <= 5) + if (INTEL_GEN(i915) <= 5) ret = i915_gmch_probe(ggtt); - else if (INTEL_GEN(dev_priv) < 8) + else if (INTEL_GEN(i915) < 8) ret = gen6_gmch_probe(ggtt); else ret = gen8_gmch_probe(ggtt); @@ -3499,6 +3494,22 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20); DRM_DEBUG_DRIVER("DSM size = %lluM\n", (u64)resource_size(&intel_graphics_stolen_res) >> 20); + + return 0; +} + +/** + * i915_ggtt_probe_hw - Probe GGTT hardware location + * @dev_priv: i915 device + */ +int i915_ggtt_probe_hw(struct drm_i915_private *i915) +{ + int ret; + + ret = ggtt_probe_hw(&i915->ggtt, i915); + if (ret) + return ret; + if (intel_vtd_active()) DRM_INFO("VT-d active for gfx access\n"); -- cgit v1.2.3 From 8b5342f58d3cea762280f34eb68eb0b640393997 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:57 +0100 Subject: drm/i915: Compartmentalize i915_ggtt_init_hw Having made start to better code compartmentalization by introducing struct intel_gt, continue the theme elsewhere in code by making functions take parameters take what logically makes most sense for them instead of the global struct drm_i915_private. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-19-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 64 ++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 629683c52e50..6f86c8e051fe 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3516,45 +3516,65 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) return 0; } -/** - * i915_ggtt_init_hw - Initialize GGTT hardware - * @dev_priv: i915 device - */ -int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) +static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; - int ret; + ggtt->vm.cleanup(&ggtt->vm); +} - stash_init(&dev_priv->mm.wc_stash); +static int ggtt_init_hw(struct i915_ggtt *ggtt) +{ + struct drm_i915_private *i915 = ggtt->vm.i915; + int ret = 0; + + mutex_lock(&i915->drm.struct_mutex); - /* Note that we use page colouring to enforce a guard page at the - * end of the address space. This is required as the CS may prefetch - * beyond the end of the batch buffer, across the page boundary, - * and beyond the end of the GTT if we do not provide a guard. - */ - mutex_lock(&dev_priv->drm.struct_mutex); i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); ggtt->vm.is_ggtt = true; /* Only VLV supports read-only GGTT mappings */ - ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); + ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); - if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv)) + if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; - mutex_unlock(&dev_priv->drm.struct_mutex); - if (!io_mapping_init_wc(&dev_priv->ggtt.iomap, - dev_priv->ggtt.gmadr.start, - dev_priv->ggtt.mappable_end)) { + if (!io_mapping_init_wc(&ggtt->iomap, + ggtt->gmadr.start, + ggtt->mappable_end)) { + ggtt_cleanup_hw(ggtt); ret = -EIO; - goto out_gtt_cleanup; + goto out; } ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); i915_ggtt_init_fences(ggtt); +out: + mutex_unlock(&i915->drm.struct_mutex); + + return ret; +} + +/** + * i915_ggtt_init_hw - Initialize GGTT hardware + * @dev_priv: i915 device + */ +int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) +{ + int ret; + + stash_init(&dev_priv->mm.wc_stash); + + /* Note that we use page colouring to enforce a guard page at the + * end of the address space. This is required as the CS may prefetch + * beyond the end of the batch buffer, across the page boundary, + * and beyond the end of the GTT if we do not provide a guard. + */ + ret = ggtt_init_hw(&dev_priv->ggtt); + if (ret) + return ret; + /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. @@ -3566,7 +3586,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) return 0; out_gtt_cleanup: - ggtt->vm.cleanup(&ggtt->vm); + ggtt_cleanup_hw(&dev_priv->ggtt); return ret; } -- cgit v1.2.3 From 759e4a74f354c51509d2aae93d32381879f11344 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:58 +0100 Subject: drm/i915: Make ggtt invalidation work on ggtt It is more logical for ggtt invalidation to take ggtt as input parameter. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-20-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 51 ++++++++++++++++++------------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- 2 files changed, 26 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6f86c8e051fe..17f690f9d95d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -110,9 +110,9 @@ static int i915_get_ggtt_vma_pages(struct i915_vma *vma); -static void gen6_ggtt_invalidate(struct drm_i915_private *i915) +static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = &ggtt->vm.i915->uncore; /* * Note that as an uncached mmio write, this will flush the @@ -121,24 +121,19 @@ static void gen6_ggtt_invalidate(struct drm_i915_private *i915) intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); } -static void guc_ggtt_invalidate(struct drm_i915_private *i915) +static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = &ggtt->vm.i915->uncore; - gen6_ggtt_invalidate(i915); + gen6_ggtt_invalidate(ggtt); intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); } -static void gmch_ggtt_invalidate(struct drm_i915_private *i915) +static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) { intel_gtt_chipset_flush(); } -static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) -{ - i915->ggtt.invalidate(i915); -} - static int ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 unused) @@ -1904,7 +1899,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, if (flush) { mark_tlbs_dirty(&ppgtt->base); - gen6_ggtt_invalidate(vm->i915); + gen6_ggtt_invalidate(&vm->i915->ggtt); } goto out; @@ -2010,7 +2005,7 @@ static int pd_vma_bind(struct i915_vma *vma, gen6_write_pde(ppgtt, pde, pt); mark_tlbs_dirty(&ppgtt->base); - gen6_ggtt_invalidate(ppgtt->base.vm.i915); + gen6_ggtt_invalidate(ggtt); return 0; } @@ -2290,7 +2285,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - i915_ggtt_invalidate(dev_priv); + ggtt->invalidate(ggtt); } int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, @@ -2336,7 +2331,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } static void gen8_ggtt_insert_entries(struct i915_address_space *vm, @@ -2364,7 +2359,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, * We want to flush the TLBs only after we're certain all the PTE * updates have finished. */ - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } static void gen6_ggtt_insert_page(struct i915_address_space *vm, @@ -2379,7 +2374,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm, iowrite32(vm->pte_encode(addr, level, flags), pte); - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } /* @@ -2405,7 +2400,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, * We want to flush the TLBs only after we're certain all the PTE * updates have finished. */ - ggtt->invalidate(vm->i915); + ggtt->invalidate(ggtt); } static void nop_clear_range(struct i915_address_space *vm, @@ -3600,25 +3595,29 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) void i915_ggtt_enable_guc(struct drm_i915_private *i915) { - GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate); + struct i915_ggtt *ggtt = &i915->ggtt; - i915->ggtt.invalidate = guc_ggtt_invalidate; + GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate); - i915_ggtt_invalidate(i915); + ggtt->invalidate = guc_ggtt_invalidate; + + ggtt->invalidate(ggtt); } void i915_ggtt_disable_guc(struct drm_i915_private *i915) { + struct i915_ggtt *ggtt = &i915->ggtt; + /* XXX Temporary pardon for error unload */ - if (i915->ggtt.invalidate == gen6_ggtt_invalidate) + if (ggtt->invalidate == gen6_ggtt_invalidate) return; /* We should only be called after i915_ggtt_enable_guc() */ - GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate); + GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate); - i915->ggtt.invalidate = gen6_ggtt_invalidate; + ggtt->invalidate = gen6_ggtt_invalidate; - i915_ggtt_invalidate(i915); + ggtt->invalidate(ggtt); } void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) @@ -3660,7 +3659,7 @@ lock: } ggtt->vm.closed = false; - i915_ggtt_invalidate(dev_priv); + ggtt->invalidate(ggtt); mutex_unlock(&ggtt->vm.mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 0f591fa186f6..ca8ba458a310 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -392,7 +392,7 @@ struct i915_ggtt { /** "Graphics Stolen Memory" holds the global PTEs */ void __iomem *gsm; - void (*invalidate)(struct drm_i915_private *dev_priv); + void (*invalidate)(struct i915_ggtt *ggtt); bool do_idle_maps; -- cgit v1.2.3 From 763c1e63125ef18561b36aafcf983230aeda4212 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:07:59 +0100 Subject: drm/i915: Store intel_gt backpointer in vm This will come useful in the following patch. v2: * Handle mock ggtt. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-21-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 16 ++++++++++------ drivers/gpu/drm/i915/i915_gem_gtt.h | 1 + drivers/gpu/drm/i915/selftests/mock_gtt.c | 1 + 3 files changed, 12 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 17f690f9d95d..1467b65c9a10 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1603,9 +1603,11 @@ unwind: return -ENOMEM; } -static void ppgtt_init(struct drm_i915_private *i915, - struct i915_ppgtt *ppgtt) +static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + + ppgtt->vm.gt = gt; ppgtt->vm.i915 = i915; ppgtt->vm.dma = &i915->drm.pdev->dev; ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); @@ -1634,7 +1636,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - ppgtt_init(i915, ppgtt); + ppgtt_init(ppgtt, &i915->gt); /* * From bdw, there is hw support for read-only pages in the PPGTT. @@ -2141,7 +2143,7 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - ppgtt_init(i915, &ppgtt->base); + ppgtt_init(&ppgtt->base, &i915->gt); ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; @@ -3452,10 +3454,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) return 0; } -static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct drm_i915_private *i915) +static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; int ret; + ggtt->vm.gt = gt; ggtt->vm.i915 = i915; ggtt->vm.dma = &i915->drm.pdev->dev; @@ -3501,7 +3505,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) { int ret; - ret = ggtt_probe_hw(&i915->ggtt, i915); + ret = ggtt_probe_hw(&i915->ggtt, &i915->gt); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index ca8ba458a310..6d1aad555ffa 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -288,6 +288,7 @@ struct i915_address_space { struct rcu_work rcu; struct drm_mm mm; + struct intel_gt *gt; struct drm_i915_private *i915; struct device *dma; /* Every address space belongs to a struct file - except for the global diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index f625c307a406..215372f75eec 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -98,6 +98,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) { memset(ggtt, 0, sizeof(*ggtt)); + ggtt->vm.gt = &i915->gt; ggtt->vm.i915 = i915; ggtt->vm.is_ggtt = true; -- cgit v1.2.3 From 68c754b850840eb8258211306371e9fa3d706667 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:00 +0100 Subject: drm/i915: Compartmentalize i915_gem_suspend/restore_gtt_mappings Having made start to better code compartmentalization by introducing struct intel_gt, continue the theme elsewhere in code by making functions take parameters take what logically makes most sense for them instead of the global struct drm_i915_private. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-22-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1467b65c9a10..b38af1df1fe9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2273,23 +2273,28 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv) return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active(); } -void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) +static void ggtt_suspend_mappings(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *i915 = ggtt->vm.i915; /* Don't bother messing with faults pre GEN6 as we have little * documentation supporting that it's a good idea. */ - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return; - intel_gt_check_and_clear_faults(&dev_priv->gt); + intel_gt_check_and_clear_faults(ggtt->vm.gt); ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); ggtt->invalidate(ggtt); } +void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915) +{ + ggtt_suspend_mappings(&i915->ggtt); +} + int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { @@ -3624,12 +3629,11 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915) ggtt->invalidate(ggtt); } -void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) +static void ggtt_restore_mappings(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma, *vn; - intel_gt_check_and_clear_faults(&dev_priv->gt); + intel_gt_check_and_clear_faults(ggtt->vm.gt); mutex_lock(&ggtt->vm.mutex); @@ -3666,12 +3670,17 @@ lock: ggtt->invalidate(ggtt); mutex_unlock(&ggtt->vm.mutex); +} + +void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) +{ + ggtt_restore_mappings(&i915->ggtt); - if (INTEL_GEN(dev_priv) >= 8) { - struct intel_ppat *ppat = &dev_priv->ppat; + if (INTEL_GEN(i915) >= 8) { + struct intel_ppat *ppat = &i915->ppat; bitmap_set(ppat->dirty, 0, ppat->max_entries); - dev_priv->ppat.update_hw(dev_priv); + i915->ppat.update_hw(i915); return; } } -- cgit v1.2.3 From a1c8a09e0c7c39ee079fc7755cd665efefd9ef73 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:01 +0100 Subject: drm/i915: Convert i915_gem_flush_ggtt_writes to intel_gt Having introduced struct intel_gt (named the anonymous structure in i915) we can start using it to compartmentalize our code better. It makes more sense logically to have the code internally like this and it will also help with future split between gt and display in i915. v2: * Keep ggtt flush before fb obj flush. (Chris) v3: * Fix refactoring fail. * Always flush ggtt writes. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-23-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 7 ++-- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 3 +- drivers/gpu/drm/i915/gt/intel_gt.c | 41 ++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt.h | 2 ++ drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem.c | 40 --------------------- drivers/gpu/drm/i915/i915_vma.c | 3 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +- 8 files changed, 52 insertions(+), 48 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index be6caccce0c5..87275f9883ac 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -23,7 +23,7 @@ */ #include "display/intel_frontbuffer.h" - +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" @@ -319,7 +319,6 @@ void i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct i915_vma *vma; assert_object_held(obj); @@ -329,7 +328,8 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, switch (obj->write_domain) { case I915_GEM_DOMAIN_GTT: - i915_gem_flush_ggtt_writes(dev_priv); + for_each_ggtt_vma(vma, obj) + intel_gt_flush_ggtt_writes(vma->vm->gt); intel_fb_obj_flush(obj, fb_write_origin(obj, I915_GEM_DOMAIN_GTT)); @@ -340,6 +340,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, i915_vma_unset_ggtt_write(vma); } + break; case I915_GEM_DOMAIN_WC: diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 5c81f4b4813a..2812f7fa27fe 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -6,6 +6,7 @@ #include +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "huge_gem_object.h" #include "i915_selftest.h" @@ -143,7 +144,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, if (offset >= obj->base.size) continue; - i915_gem_flush_ggtt_writes(to_i915(obj->base.dev)); + intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); cpu = kmap(p) + offset_in_page(offset); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 5c9be38713db..4fd9977fe284 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -150,3 +150,44 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt) intel_gt_clear_error_registers(gt, ALL_ENGINES); } + +void intel_gt_flush_ggtt_writes(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + intel_wakeref_t wakeref; + + /* + * No actual flushing is required for the GTT write domain for reads + * from the GTT domain. Writes to it "immediately" go to main memory + * as far as we know, so there's no chipset flush. It also doesn't + * land in the GPU render cache. + * + * However, we do have to enforce the order so that all writes through + * the GTT land before any writes to the device, such as updates to + * the GATT itself. + * + * We also have to wait a bit for the writes to land from the GTT. + * An uncached read (i.e. mmio) seems to be ideal for the round-trip + * timing. This issue has only been observed when switching quickly + * between GTT writes and CPU reads from inside the kernel on recent hw, + * and it appears to only affect discrete GTT blocks (i.e. on LLC + * system agents we cannot reproduce this behaviour, until Cannonlake + * that was!). + */ + + wmb(); + + if (INTEL_INFO(i915)->has_coherent_ggtt) + return; + + i915_gem_chipset_flush(i915); + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + struct intel_uncore *uncore = gt->uncore; + + spin_lock_irq(&uncore->lock); + intel_uncore_posting_read_fw(uncore, + RING_HEAD(RENDER_RING_BASE)); + spin_unlock_irq(&uncore->lock); + } +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 033713b684bf..6073f3617caa 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -17,4 +17,6 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt); void intel_gt_clear_error_registers(struct intel_gt *gt, intel_engine_mask_t engine_mask); +void intel_gt_flush_ggtt_writes(struct intel_gt *gt); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 44196bed4d81..1fd0a73b4b9a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2599,8 +2599,6 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm); -void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv); - /* belongs in i915_gem_gtt.h */ static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8966f32fe2a2..a9189807cd71 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -233,46 +233,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, &args->size, &args->handle); } -void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - - /* - * No actual flushing is required for the GTT write domain for reads - * from the GTT domain. Writes to it "immediately" go to main memory - * as far as we know, so there's no chipset flush. It also doesn't - * land in the GPU render cache. - * - * However, we do have to enforce the order so that all writes through - * the GTT land before any writes to the device, such as updates to - * the GATT itself. - * - * We also have to wait a bit for the writes to land from the GTT. - * An uncached read (i.e. mmio) seems to be ideal for the round-trip - * timing. This issue has only been observed when switching quickly - * between GTT writes and CPU reads from inside the kernel on recent hw, - * and it appears to only affect discrete GTT blocks (i.e. on LLC - * system agents we cannot reproduce this behaviour, until Cannonlake - * that was!). - */ - - wmb(); - - if (INTEL_INFO(dev_priv)->has_coherent_ggtt) - return; - - i915_gem_chipset_flush(dev_priv); - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - struct intel_uncore *uncore = &dev_priv->uncore; - - spin_lock_irq(&uncore->lock); - intel_uncore_posting_read_fw(uncore, - RING_HEAD(RENDER_RING_BASE)); - spin_unlock_irq(&uncore->lock); - } -} - static int shmem_pread(struct page *page, int offset, int len, char __user *user_data, bool needs_clflush) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a57729be8312..503f1180af12 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -27,6 +27,7 @@ #include "display/intel_frontbuffer.h" #include "gt/intel_engine.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_globals.h" @@ -408,7 +409,7 @@ void i915_vma_flush_writes(struct i915_vma *vma) if (!i915_vma_has_ggtt_write(vma)) return; - i915_gem_flush_ggtt_writes(vma->vm->i915); + intel_gt_flush_ggtt_writes(vma->vm->gt); i915_vma_unset_ggtt_write(vma); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 0c47276ed5df..31a51ca1ddcb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1193,7 +1193,7 @@ static int igt_ggtt_page(void *arg) iowrite32(n, vaddr + n); io_mapping_unmap_atomic(vaddr); } - i915_gem_flush_ggtt_writes(i915); + intel_gt_flush_ggtt_writes(ggtt->vm.gt); i915_random_reorder(order, count, &prng); for (n = 0; n < count; n++) { -- cgit v1.2.3 From baea429dc5ed90cf9afbdf326fa527581745c8f1 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:02 +0100 Subject: drm/i915: Move i915_gem_chipset_flush to intel_gt This aligns better with the rest of restructuring. v2: * Move call out of line. (Chris) Signed-off-by: Tvrtko Ursulin Suggested-by: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-24-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 5 +++-- drivers/gpu/drm/i915/gem/i915_gem_phys.c | 3 ++- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 4 +++- drivers/gpu/drm/i915/gt/intel_gt.c | 9 ++++++++- drivers/gpu/drm/i915/gt/intel_gt.h | 1 + drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 14 ++++++++++---- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 5 +++-- drivers/gpu/drm/i915/i915_drv.h | 8 -------- drivers/gpu/drm/i915/i915_gem.c | 3 ++- drivers/gpu/drm/i915/selftests/i915_request.c | 10 ++++++---- drivers/gpu/drm/i915/selftests/igt_spinner.c | 7 +++++-- drivers/gpu/drm/i915/selftests/igt_spinner.h | 3 +++ 12 files changed, 46 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5fae0e50aad0..cf8edb6822ee 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -16,6 +16,7 @@ #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "i915_gem_ioctls.h" @@ -994,7 +995,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache) __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); i915_gem_object_unpin_map(cache->rq->batch->obj); - i915_gem_chipset_flush(cache->rq->i915); + intel_gt_chipset_flush(cache->rq->engine->gt); i915_request_add(cache->rq); cache->rq = NULL; @@ -1954,7 +1955,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) eb->exec = NULL; /* Unconditionally flush any chipset caches (for streaming writes). */ - i915_gem_chipset_flush(eb->i915); + intel_gt_chipset_flush(eb->engine->gt); return 0; err_skip: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 2deac933cf59..7b900ee4ed8d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -13,6 +13,7 @@ #include /* for drm_pci.h! */ #include +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" @@ -60,7 +61,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) vaddr += PAGE_SIZE; } - i915_gem_chipset_flush(to_i915(obj->base.dev)); + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) { diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index b74729b6f353..dcf60a8c229e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -10,6 +10,8 @@ #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" + #include "igt_gem_utils.h" #include "mock_context.h" @@ -926,7 +928,7 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) } *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(vma->vm->gt); i915_gem_object_unpin_map(obj); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 4fd9977fe284..f632b7b5b490 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -180,7 +180,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt) if (INTEL_INFO(i915)->has_coherent_ggtt) return; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(gt); with_intel_runtime_pm(&i915->runtime_pm, wakeref) { struct intel_uncore *uncore = gt->uncore; @@ -191,3 +191,10 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt) spin_unlock_irq(&uncore->lock); } } + +void intel_gt_chipset_flush(struct intel_gt *gt) +{ + wmb(); + if (INTEL_GEN(gt->i915) < 6) + intel_gtt_chipset_flush(); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 6073f3617caa..fb064758b591 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -18,5 +18,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt, intel_engine_mask_t engine_mask); void intel_gt_flush_ggtt_writes(struct intel_gt *gt); +void intel_gt_chipset_flush(struct intel_gt *gt); #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 1ee4c923044f..2d773f11e203 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -25,6 +25,7 @@ #include #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "intel_engine_pm.h" #include "i915_selftest.h" @@ -43,6 +44,7 @@ struct hang { struct drm_i915_private *i915; + struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; struct i915_gem_context *ctx; @@ -135,6 +137,8 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) u32 *batch; int err; + h->gt = engine->gt; + if (i915_gem_object_is_active(h->obj)) { struct drm_i915_gem_object *obj; void *vaddr; @@ -242,7 +246,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = lower_32_bits(vma->node.start); } *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - i915_gem_chipset_flush(h->i915); + intel_gt_chipset_flush(engine->gt); if (rq->engine->emit_init_breadcrumb) { err = rq->engine->emit_init_breadcrumb(rq); @@ -276,7 +280,9 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) static void hang_fini(struct hang *h) { *h->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(h->i915); + + if (h->gt) + intel_gt_chipset_flush(h->gt); i915_gem_object_unpin_map(h->obj); i915_gem_object_put(h->obj); @@ -333,7 +339,7 @@ static int igt_hang_sanitycheck(void *arg) i915_request_get(rq); *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(engine->gt); i915_request_add(rq); @@ -1509,7 +1515,7 @@ static int igt_reset_queue(void *arg) pr_info("%s: Completed %d resets\n", engine->name, count); *h.batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(engine->gt); i915_request_put(prev); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 9eaf030affd0..931bc33fc46d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -5,6 +5,7 @@ */ #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" #include "intel_reset.h" @@ -542,7 +543,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, i915_gem_object_flush_map(batch->obj); i915_gem_object_unpin_map(batch->obj); - i915_gem_chipset_flush(ctx->i915); + intel_gt_chipset_flush(engine->gt); rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { @@ -806,7 +807,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, *cs++ = MI_BATCH_BUFFER_END; i915_gem_object_flush_map(batch->obj); - i915_gem_chipset_flush(ctx->i915); + intel_gt_chipset_flush(engine->gt); rq = igt_request_alloc(ctx, engine); if (IS_ERR(rq)) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1fd0a73b4b9a..b574aea23581 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2599,14 +2599,6 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, unsigned int flags); int i915_gem_evict_vm(struct i915_address_space *vm); -/* belongs in i915_gem_gtt.h */ -static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv) -{ - wmb(); - if (INTEL_GEN(dev_priv) < 6) - intel_gtt_chipset_flush(); -} - /* i915_gem_stolen.c */ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a9189807cd71..ae36955d819a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -47,6 +47,7 @@ #include "gem/i915_gem_pm.h" #include "gem/i915_gemfs.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" #include "gt/intel_reset.h" @@ -142,7 +143,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, return -EFAULT; drm_clflush_virt_range(vaddr, args->size); - i915_gem_chipset_flush(to_i915(obj->base.dev)); + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); intel_fb_obj_flush(obj, ORIGIN_CPU); return 0; diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 1a5b9e284ca9..0fdf948a93a0 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -27,6 +27,8 @@ #include "gem/i915_gem_pm.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_gt.h" + #include "i915_random.h" #include "i915_selftest.h" #include "igt_live_test.h" @@ -624,7 +626,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(&i915->gt); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -793,7 +795,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(&i915->gt); return vma; @@ -811,7 +813,7 @@ static int recursive_batch_resolve(struct i915_vma *batch) return PTR_ERR(cmd); *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(batch->vm->i915); + intel_gt_chipset_flush(batch->vm->gt); i915_gem_object_unpin_map(batch->obj); @@ -1033,7 +1035,7 @@ out_request: I915_MAP_WC); if (!IS_ERR(cmd)) { *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + intel_gt_chipset_flush(engine->gt); i915_gem_object_unpin_map(request[id]->batch->obj); } diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 1e59b543cf27..0c1f65262a63 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -3,6 +3,7 @@ * * Copyright © 2018 Intel Corporation */ +#include "gt/intel_gt.h" #include "gem/selftests/igt_gem_utils.h" @@ -94,6 +95,8 @@ igt_spinner_create_request(struct igt_spinner *spin, u32 *batch; int err; + spin->gt = engine->gt; + vma = i915_vma_instance(spin->obj, ctx->vm, NULL); if (IS_ERR(vma)) return ERR_CAST(vma); @@ -138,7 +141,7 @@ igt_spinner_create_request(struct igt_spinner *spin, *batch++ = upper_32_bits(vma->node.start); *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - i915_gem_chipset_flush(spin->i915); + intel_gt_chipset_flush(engine->gt); if (engine->emit_init_breadcrumb && rq->timeline->has_initial_breadcrumb) { @@ -172,7 +175,7 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) void igt_spinner_end(struct igt_spinner *spin) { *spin->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(spin->i915); + intel_gt_chipset_flush(spin->gt); } void igt_spinner_fini(struct igt_spinner *spin) diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index 34a88ac9b47a..1bfc39efa773 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -14,8 +14,11 @@ #include "i915_request.h" #include "i915_selftest.h" +struct intel_gt; + struct igt_spinner { struct drm_i915_private *i915; + struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; u32 *batch; -- cgit v1.2.3 From 390c82055b74e14d36a1a18ec772af1e032bde71 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:03 +0100 Subject: drm/i915: Compartmentalize timeline_init/park/fini Continuing on the theme of better logical organization of our code, make the first step towards making the timeline code better isolated from wider struct drm_i915_private. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-25-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_timeline.c | 67 ++++++++++++++++++++++-------------- 1 file changed, 41 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index c311ce9c6f9d..dc885a13b16d 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -261,18 +261,23 @@ int i915_timeline_init(struct drm_i915_private *i915, return 0; } -void i915_timelines_init(struct drm_i915_private *i915) +static void timelines_init(struct intel_gt *gt) { - struct i915_gt_timelines *gt = &i915->gt.timelines; + struct i915_gt_timelines *timelines = >->timelines; - mutex_init(>->mutex); - INIT_LIST_HEAD(>->active_list); + mutex_init(&timelines->mutex); + INIT_LIST_HEAD(&timelines->active_list); - spin_lock_init(>->hwsp_lock); - INIT_LIST_HEAD(>->hwsp_free_list); + spin_lock_init(&timelines->hwsp_lock); + INIT_LIST_HEAD(&timelines->hwsp_free_list); /* via i915_gem_wait_for_idle() */ - i915_gem_shrinker_taints_mutex(i915, >->mutex); + i915_gem_shrinker_taints_mutex(gt->i915, &timelines->mutex); +} + +void i915_timelines_init(struct drm_i915_private *i915) +{ + timelines_init(&i915->gt); } static void timeline_add_to_active(struct i915_timeline *tl) @@ -293,6 +298,24 @@ static void timeline_remove_from_active(struct i915_timeline *tl) mutex_unlock(>->mutex); } +static void timelines_park(struct intel_gt *gt) +{ + struct i915_gt_timelines *timelines = >->timelines; + struct i915_timeline *timeline; + + mutex_lock(&timelines->mutex); + list_for_each_entry(timeline, &timelines->active_list, link) { + /* + * All known fences are completed so we can scrap + * the current sync point tracking and start afresh, + * any attempt to wait upon a previous sync point + * will be skipped as the fence was signaled. + */ + i915_syncmap_free(&timeline->sync); + } + mutex_unlock(&timelines->mutex); +} + /** * i915_timelines_park - called when the driver idles * @i915: the drm_i915_private device @@ -305,20 +328,7 @@ static void timeline_remove_from_active(struct i915_timeline *tl) */ void i915_timelines_park(struct drm_i915_private *i915) { - struct i915_gt_timelines *gt = &i915->gt.timelines; - struct i915_timeline *timeline; - - mutex_lock(>->mutex); - list_for_each_entry(timeline, >->active_list, link) { - /* - * All known fences are completed so we can scrap - * the current sync point tracking and start afresh, - * any attempt to wait upon a previous sync point - * will be skipped as the fence was signaled. - */ - i915_syncmap_free(&timeline->sync); - } - mutex_unlock(>->mutex); + timelines_park(&i915->gt); } void i915_timeline_fini(struct i915_timeline *timeline) @@ -563,14 +573,19 @@ void __i915_timeline_free(struct kref *kref) kfree(timeline); } -void i915_timelines_fini(struct drm_i915_private *i915) +static void timelines_fini(struct intel_gt *gt) { - struct i915_gt_timelines *gt = &i915->gt.timelines; + struct i915_gt_timelines *timelines = >->timelines; - GEM_BUG_ON(!list_empty(>->active_list)); - GEM_BUG_ON(!list_empty(>->hwsp_free_list)); + GEM_BUG_ON(!list_empty(&timelines->active_list)); + GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); - mutex_destroy(>->mutex); + mutex_destroy(&timelines->mutex); +} + +void i915_timelines_fini(struct drm_i915_private *i915) +{ + timelines_fini(&i915->gt); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -- cgit v1.2.3 From 3b8966287fa91403337742fa6b954880131bb1c0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:04 +0100 Subject: drm/i915: Compartmentalize i915_ggtt_cleanup_hw Continuing on the theme of better logical organization of our code, make the first step towards making the ggtt code better isolated from wider struct drm_i915_private. v2: * Cleanup of mm.wc_stash does not need struct_mutex. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-26-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 55 +++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b38af1df1fe9..d10590f868b7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2775,14 +2775,19 @@ static void fini_aliasing_ppgtt(struct drm_i915_private *i915) struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ppgtt *ppgtt; + mutex_lock(&i915->drm.struct_mutex); + ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); if (!ppgtt) - return; + goto out; i915_vm_put(&ppgtt->vm); ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; + +out: + mutex_unlock(&i915->drm.struct_mutex); } static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) @@ -2883,20 +2888,14 @@ err_reserve: return ret; } -/** - * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization - * @dev_priv: i915 device - */ -void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) +static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *i915 = ggtt->vm.i915; struct i915_vma *vma, *vn; - struct pagevec *pvec; ggtt->vm.closed = true; - mutex_lock(&dev_priv->drm.struct_mutex); - fini_aliasing_ppgtt(dev_priv); + mutex_lock(&i915->drm.struct_mutex); list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) WARN_ON(i915_vma_unbind(vma)); @@ -2913,18 +2912,31 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ggtt->vm.cleanup(&ggtt->vm); - pvec = &dev_priv->mm.wc_stash.pvec; + mutex_unlock(&i915->drm.struct_mutex); + + arch_phys_wc_del(ggtt->mtrr); + io_mapping_fini(&ggtt->iomap); +} + +/** + * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization + * @dev_priv: i915 device + */ +void i915_ggtt_cleanup_hw(struct drm_i915_private *i915) +{ + struct pagevec *pvec; + + fini_aliasing_ppgtt(i915); + + ggtt_cleanup_hw(&i915->ggtt); + + pvec = &i915->mm.wc_stash.pvec; if (pvec->nr) { set_pages_array_wb(pvec->pages, pvec->nr); __pagevec_release(pvec); } - mutex_unlock(&dev_priv->drm.struct_mutex); - - arch_phys_wc_del(ggtt->mtrr); - io_mapping_fini(&ggtt->iomap); - - i915_gem_cleanup_stolen(dev_priv); + i915_gem_cleanup_stolen(i915); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -3520,11 +3532,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) return 0; } -static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) -{ - ggtt->vm.cleanup(&ggtt->vm); -} - static int ggtt_init_hw(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; @@ -3545,7 +3552,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt) if (!io_mapping_init_wc(&ggtt->iomap, ggtt->gmadr.start, ggtt->mappable_end)) { - ggtt_cleanup_hw(ggtt); + ggtt->vm.cleanup(&ggtt->vm); ret = -EIO; goto out; } @@ -3590,7 +3597,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) return 0; out_gtt_cleanup: - ggtt_cleanup_hw(&dev_priv->ggtt); + dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm); return ret; } -- cgit v1.2.3 From 1d66377a76bd146bb5c16431549ed1d586fdfeb0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:05 +0100 Subject: drm/i915: Compartmentalize i915_gem_init_ggtt Continuing on the theme of better logical organization of our code, make the first step towards making the ggtt code better isolated from wider struct drm_i915_private. v2: * Bring the ickle onion unwind back. (Chris) * Rename to i915_init_ggtt. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-27-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 40 +++++++++++++++++++++++++------------ drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- 3 files changed, 29 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ae36955d819a..50d7e1e8d8ad 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1510,7 +1510,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - ret = i915_gem_init_ggtt(dev_priv); + ret = i915_init_ggtt(dev_priv); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_unlock; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d10590f868b7..44a32ccbd922 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2816,7 +2816,13 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt) drm_mm_remove_node(&ggtt->uc_fw); } -int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) +static void cleanup_init_ggtt(struct i915_ggtt *ggtt) +{ + ggtt_release_guc_top(ggtt); + drm_mm_remove_node(&ggtt->error_capture); +} + +static int init_ggtt(struct i915_ggtt *ggtt) { /* Let GEM Manage all of the aperture. * @@ -2827,7 +2833,6 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) * aperture. One page should be enough to keep any prefetching inside * of the aperture. */ - struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long hole_start, hole_end; struct drm_mm_node *entry; int ret; @@ -2839,7 +2844,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) * why. */ ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE, - intel_wopcm_guc_size(&dev_priv->wopcm)); + intel_wopcm_guc_size(&ggtt->vm.i915->wopcm)); ret = intel_vgt_balloon(ggtt); if (ret) @@ -2860,7 +2865,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) */ ret = ggtt_reserve_guc_top(ggtt); if (ret) - goto err_reserve; + goto err; /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { @@ -2873,19 +2878,28 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) /* And finally clear the reserved guard page */ ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); - if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { - ret = init_aliasing_ppgtt(dev_priv); + return 0; + +err: + cleanup_init_ggtt(ggtt); + return ret; +} + +int i915_init_ggtt(struct drm_i915_private *i915) +{ + int ret; + + ret = init_ggtt(&i915->ggtt); + if (ret) + return ret; + + if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { + ret = init_aliasing_ppgtt(i915); if (ret) - goto err_appgtt; + cleanup_init_ggtt(&i915->ggtt); } return 0; - -err_appgtt: - ggtt_release_guc_top(ggtt); -err_reserve: - drm_mm_remove_node(&ggtt->error_capture); - return ret; } static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 6d1aad555ffa..e8b9ebe50c4e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -655,7 +655,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); void i915_ggtt_enable_guc(struct drm_i915_private *i915); void i915_ggtt_disable_guc(struct drm_i915_private *i915); -int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); +int i915_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct intel_gt *gt); -- cgit v1.2.3 From d8a4424839eead6da07835e7d2f8b8f54ad6c6b0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:06 +0100 Subject: drm/i915: Store ggtt pointer in intel_gt This will become useful in the following patch. v2: * Assign the pointer through a helper on the top level to work around the layering violation. (Chris) v3: * Handle selftests. v4: * Move call to intel_gt_init_hw into mock_init_ggtt. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-28-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 5 +++++ drivers/gpu/drm/i915/gt/intel_gt.h | 1 + drivers/gpu/drm/i915/gt/intel_gt_types.h | 2 ++ drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/selftests/mock_gtt.c | 2 ++ 5 files changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f632b7b5b490..e22ee3e823fa 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -22,6 +22,11 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) intel_gt_pm_init_early(gt); } +void intel_gt_init_hw(struct drm_i915_private *i915) +{ + i915->gt.ggtt = &i915->ggtt; +} + static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) { intel_uncore_rmw(uncore, reg, 0, set); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index fb064758b591..29cd15be6a01 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -12,6 +12,7 @@ struct drm_i915_private; void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); +void intel_gt_init_hw(struct drm_i915_private *i915); void intel_gt_check_and_clear_faults(struct intel_gt *gt); void intel_gt_clear_error_registers(struct intel_gt *gt, diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 86a4985f8e84..be891492505d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -17,11 +17,13 @@ #include "intel_wakeref.h" struct drm_i915_private; +struct i915_ggtt; struct intel_uncore; struct intel_gt { struct drm_i915_private *i915; struct intel_uncore *uncore; + struct i915_ggtt *ggtt; struct i915_gt_timelines { struct mutex mutex; /* protects list, tainted by GPU */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c02fdefcb356..4a211d5e2701 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1593,6 +1593,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) goto err_ggtt; + intel_gt_init_hw(dev_priv); + ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 215372f75eec..e62a67e0f79c 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -117,6 +117,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->vm.vma_ops.clear_pages = clear_pages; i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); + + intel_gt_init_hw(i915); } void mock_fini_ggtt(struct i915_ggtt *ggtt) -- cgit v1.2.3 From db45fb5bc1a09af37d2b0e6c4c3c258d1d732b89 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:07 +0100 Subject: drm/i915: Compartmentalize ring buffer creation Continuing on the theme of compartmentalizing the code better to make future split between gt and display in global i915 clearer. v2: * Pass in ggtt instead of gt. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-29-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 8b1da57c3764..c946c42d4834 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1234,16 +1234,16 @@ void intel_ring_unpin(struct intel_ring *ring) i915_timeline_unpin(ring->timeline); } -static struct i915_vma * -intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) { - struct i915_address_space *vm = &dev_priv->ggtt.vm; + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma; - obj = i915_gem_object_create_stolen(dev_priv, size); + obj = i915_gem_object_create_stolen(i915, size); if (!obj) - obj = i915_gem_object_create_internal(dev_priv, size); + obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) return ERR_CAST(obj); @@ -1270,6 +1270,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, struct i915_timeline *timeline, int size) { + struct drm_i915_private *i915 = engine->i915; struct intel_ring *ring; struct i915_vma *vma; @@ -1290,12 +1291,12 @@ intel_engine_create_ring(struct intel_engine_cs *engine, * of the buffer. */ ring->effective_size = size; - if (IS_I830(engine->i915) || IS_I845G(engine->i915)) + if (IS_I830(i915) || IS_I845G(i915)) ring->effective_size -= 2 * CACHELINE_BYTES; intel_ring_update_space(ring); - vma = intel_ring_create_vma(engine->i915, size); + vma = create_ring_vma(engine->gt->ggtt, size); if (IS_ERR(vma)) { kfree(ring); return ERR_CAST(vma); -- cgit v1.2.3 From ba4134a41931e0e25d29bb81f8b30a522e29f6f0 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:08 +0100 Subject: drm/i915: Save trip via top-level i915 in a few more places For gt related operations it makes more logical sense to stay in the realm of gt instead of dereferencing via driver i915. This patch handles a few of the easy ones with work requiring more refactoring still outstanding. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-30-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 3 +-- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 3 +-- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 8 ++++---- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_render_state.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- 11 files changed, 16 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index dcf60a8c229e..2154cdee4ab3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1039,8 +1039,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, u64 size, u64 offset, u32 dword, u32 val) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; struct i915_vma *vma; int err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index f2d4e773404a..167c4a57c4cd 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -234,8 +234,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, struct intel_engine_cs *engine, unsigned int dw) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; + struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; struct i915_request *rq; struct i915_vma *vma; struct i915_vma *batch; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index dd23fcaced52..5fee1e184b6b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -584,7 +584,7 @@ static int init_status_page(struct intel_engine_cs *engine) i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index e4b44b3f2d0b..f9f6d2bd2921 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1954,7 +1954,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) if (IS_ERR(obj)) return PTR_ERR(obj); - vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -3038,7 +3038,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, if (IS_ERR(ctx_obj)) return PTR_ERR(ctx_obj); - vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto error_deref_obj; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c946c42d4834..e8e00df056e1 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1414,7 +1414,7 @@ alloc_context_vma(struct intel_engine_cs *engine) i915_gem_object_unpin_map(obj); } - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 91f20f87fd03..2835ab70199b 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1420,7 +1420,7 @@ static int engine_wa_list_verify(struct intel_context *ce, if (!wal->count) return 0; - vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count); + vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 2d773f11e203..0dc3896e49f5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -130,7 +130,7 @@ static struct i915_request * hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct drm_i915_private *i915 = h->i915; - struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm; + struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; unsigned int flags; @@ -143,12 +143,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) struct drm_i915_gem_object *obj; void *vaddr; - obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) return ERR_CAST(obj); vaddr = i915_gem_object_pin_map(obj, - i915_coherent_map_type(h->i915)); + i915_coherent_map_type(i915)); if (IS_ERR(vaddr)) { i915_gem_object_put(obj); return ERR_CAST(vaddr); @@ -255,7 +255,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) } flags = 0; - if (INTEL_GEN(vm->i915) <= 5) + if (INTEL_GEN(i915) <= 5) flags |= I915_DISPATCH_SECURE; err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 931bc33fc46d..f12cb20fe785 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -103,7 +103,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) i915_gem_object_flush_map(result); i915_gem_object_unpin_map(result); - vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); + vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 44a32ccbd922..4dbfbccfa618 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1901,7 +1901,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, if (flush) { mark_tlbs_dirty(&ppgtt->base); - gen6_ggtt_invalidate(&vm->i915->ggtt); + gen6_ggtt_invalidate(vm->gt->ggtt); } goto out; @@ -2045,7 +2045,7 @@ static const struct i915_vma_ops pd_vma_ops = { static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) { struct drm_i915_private *i915 = ppgtt->base.vm.i915; - struct i915_ggtt *ggtt = &i915->ggtt; + struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; struct i915_vma *vma; GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 4ee032072d4f..6bda08c1e8d7 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) if (IS_ERR(so.obj)) return PTR_ERR(so.obj); - so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL); + so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(so.vma)) { err = PTR_ERR(so.vma); goto err_obj; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a497cf7acb6a..330a25c5db6a 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1405,12 +1405,12 @@ capture_object(struct drm_i915_private *dev_priv, static void gem_record_rings(struct i915_gpu_state *error) { struct drm_i915_private *i915 = error->i915; - struct i915_ggtt *ggtt = &i915->ggtt; int i; for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = i915->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; + struct i915_ggtt *ggtt = engine->gt->ggtt; struct i915_request *request; ee->engine_id = -1; -- cgit v1.2.3 From 4c6d51ea2a68699d0d47c2361f691cf9265371b6 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:09 +0100 Subject: drm/i915: Make timelines gt centric Our timelines are stored inside intel_gt so we can convert the interface to take exactly that and not i915. At the same time re-order the params to our more typical layout and replace the backpointer to the new containing structure. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-31-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 4 +-- drivers/gpu/drm/i915/gt/intel_lrc.c | 7 +++-- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/gt/mock_engine.c | 2 +- drivers/gpu/drm/i915/i915_timeline.c | 43 ++++++++++++-------------- drivers/gpu/drm/i915/i915_timeline.h | 7 ++--- drivers/gpu/drm/i915/i915_timeline_types.h | 2 +- drivers/gpu/drm/i915/selftests/i915_timeline.c | 8 ++--- drivers/gpu/drm/i915/selftests/mock_timeline.c | 2 +- 10 files changed, 38 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 35871c8a42a6..fb691535fbf2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -530,7 +530,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { struct i915_timeline *timeline; - timeline = i915_timeline_create(dev_priv, NULL); + timeline = i915_timeline_create(&dev_priv->gt, NULL); if (IS_ERR(timeline)) { context_close(ctx); return ERR_CAST(timeline); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 5fee1e184b6b..89edf97d8ad7 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -740,8 +740,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) if (!frame) return -ENOMEM; - if (i915_timeline_init(engine->i915, - &frame->timeline, + if (i915_timeline_init(&frame->timeline, + engine->gt, engine->status_page.vma)) goto out_frame; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index f9f6d2bd2921..415fdf2eb997 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3005,12 +3005,13 @@ err_unpin_ctx: return ret; } -static struct i915_timeline *get_timeline(struct i915_gem_context *ctx) +static struct i915_timeline * +get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt) { if (ctx->timeline) return i915_timeline_get(ctx->timeline); else - return i915_timeline_create(ctx->i915, NULL); + return i915_timeline_create(gt, NULL); } static int execlists_context_deferred_alloc(struct intel_context *ce, @@ -3044,7 +3045,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, goto error_deref_obj; } - timeline = get_timeline(ce->gem_context); + timeline = get_timeline(ce->gem_context, engine->gt); if (IS_ERR(timeline)) { ret = PTR_ERR(timeline); goto error_deref_obj; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index e8e00df056e1..9a748be0ce0c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -2273,7 +2273,7 @@ int intel_ring_submission_init(struct intel_engine_cs *engine) struct intel_ring *ring; int err; - timeline = i915_timeline_create(engine->i915, engine->status_page.vma); + timeline = i915_timeline_create(engine->gt, engine->status_page.vma); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); goto err; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index b022af3385f3..423027aa71cd 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -56,7 +56,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) if (!ring) return NULL; - if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) { + if (i915_timeline_init(&ring->timeline, engine->gt, NULL)) { kfree(ring); return NULL; } diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index dc885a13b16d..3e2c3169dc69 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -4,6 +4,8 @@ * Copyright © 2016-2018 Intel Corporation */ +#include "gt/intel_gt_types.h" + #include "i915_drv.h" #include "i915_active.h" @@ -14,7 +16,8 @@ #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) struct i915_timeline_hwsp { - struct i915_gt_timelines *gt; + struct intel_gt *gt; + struct i915_gt_timelines *gt_timelines; struct list_head free_link; struct i915_vma *vma; u64 free_bitmap; @@ -28,14 +31,9 @@ struct i915_timeline_cacheline { #define CACHELINE_FREE CACHELINE_BITS }; -static inline struct drm_i915_private * -hwsp_to_i915(struct i915_timeline_hwsp *hwsp) -{ - return container_of(hwsp->gt, struct drm_i915_private, gt.timelines); -} - -static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) +static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -45,7 +43,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) i915_gem_object_put(obj); @@ -55,8 +53,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) static struct i915_vma * hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) { - struct drm_i915_private *i915 = timeline->i915; - struct i915_gt_timelines *gt = &i915->gt.timelines; + struct i915_gt_timelines *gt = &timeline->gt->timelines; struct i915_timeline_hwsp *hwsp; BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); @@ -75,16 +72,17 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) if (!hwsp) return ERR_PTR(-ENOMEM); - vma = __hwsp_alloc(i915); + vma = __hwsp_alloc(timeline->gt); if (IS_ERR(vma)) { kfree(hwsp); return vma; } vma->private = hwsp; + hwsp->gt = timeline->gt; hwsp->vma = vma; hwsp->free_bitmap = ~0ull; - hwsp->gt = gt; + hwsp->gt_timelines = gt; spin_lock_irq(>->hwsp_lock); list_add(&hwsp->free_link, >->hwsp_free_list); @@ -104,7 +102,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) { - struct i915_gt_timelines *gt = hwsp->gt; + struct i915_gt_timelines *gt = hwsp->gt_timelines; unsigned long flags; spin_lock_irqsave(>->hwsp_lock, flags); @@ -170,7 +168,7 @@ cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline) cl->hwsp = hwsp; cl->vaddr = page_pack_bits(vaddr, cacheline); - i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire); + i915_active_init(hwsp->gt->i915, &cl->active, __cacheline_retire); return cl; } @@ -196,8 +194,8 @@ static void cacheline_free(struct i915_timeline_cacheline *cl) __idle_cacheline_free(cl); } -int i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *timeline, +int i915_timeline_init(struct i915_timeline *timeline, + struct intel_gt *gt, struct i915_vma *hwsp) { void *vaddr; @@ -212,7 +210,7 @@ int i915_timeline_init(struct drm_i915_private *i915, */ BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); - timeline->i915 = i915; + timeline->gt = gt; timeline->pin_count = 0; timeline->has_initial_breadcrumb = !hwsp; timeline->hwsp_cacheline = NULL; @@ -282,7 +280,7 @@ void i915_timelines_init(struct drm_i915_private *i915) static void timeline_add_to_active(struct i915_timeline *tl) { - struct i915_gt_timelines *gt = &tl->i915->gt.timelines; + struct i915_gt_timelines *gt = &tl->gt->timelines; mutex_lock(>->mutex); list_add(&tl->link, >->active_list); @@ -291,7 +289,7 @@ static void timeline_add_to_active(struct i915_timeline *tl) static void timeline_remove_from_active(struct i915_timeline *tl) { - struct i915_gt_timelines *gt = &tl->i915->gt.timelines; + struct i915_gt_timelines *gt = &tl->gt->timelines; mutex_lock(>->mutex); list_del(&tl->link); @@ -347,8 +345,7 @@ void i915_timeline_fini(struct i915_timeline *timeline) } struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, - struct i915_vma *global_hwsp) +i915_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) { struct i915_timeline *timeline; int err; @@ -357,7 +354,7 @@ i915_timeline_create(struct drm_i915_private *i915, if (!timeline) return ERR_PTR(-ENOMEM); - err = i915_timeline_init(i915, timeline, global_hwsp); + err = i915_timeline_init(timeline, gt, global_hwsp); if (err) { kfree(timeline); return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index 36e5e5a65155..a454d49f229f 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -31,14 +31,13 @@ #include "i915_syncmap.h" #include "i915_timeline_types.h" -int i915_timeline_init(struct drm_i915_private *i915, - struct i915_timeline *tl, +int i915_timeline_init(struct i915_timeline *tl, + struct intel_gt *gt, struct i915_vma *hwsp); void i915_timeline_fini(struct i915_timeline *tl); struct i915_timeline * -i915_timeline_create(struct drm_i915_private *i915, - struct i915_vma *global_hwsp); +i915_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp); static inline struct i915_timeline * i915_timeline_get(struct i915_timeline *timeline) diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h index fce5cb4f1090..931585e12d41 100644 --- a/drivers/gpu/drm/i915/i915_timeline_types.h +++ b/drivers/gpu/drm/i915/i915_timeline_types.h @@ -59,7 +59,7 @@ struct i915_timeline { struct i915_syncmap *sync; struct list_head link; - struct drm_i915_private *i915; + struct intel_gt *gt; struct kref kref; }; diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index 76d3977f1d4b..44d031446f08 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -66,7 +66,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, unsigned long cacheline; int err; - tl = i915_timeline_create(state->i915, NULL); + tl = i915_timeline_create(&state->i915->gt, NULL); if (IS_ERR(tl)) return PTR_ERR(tl); @@ -448,7 +448,7 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) struct i915_request *rq; int err; - lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */ + lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */ err = i915_timeline_pin(tl); if (err) { @@ -478,7 +478,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915) { struct i915_timeline *tl; - tl = i915_timeline_create(i915, NULL); + tl = i915_timeline_create(&i915->gt, NULL); if (IS_ERR(tl)) return tl; @@ -660,7 +660,7 @@ static int live_hwsp_wrap(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - tl = i915_timeline_create(i915, NULL); + tl = i915_timeline_create(&i915->gt, NULL); if (IS_ERR(tl)) { err = PTR_ERR(tl); goto out_rpm; diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c index 65b52be23d42..c80ac0fbdd3b 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c @@ -10,7 +10,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) { - timeline->i915 = NULL; + timeline->gt = NULL; timeline->fence_context = context; mutex_init(&timeline->mutex); -- cgit v1.2.3 From f0c02c1b91888ccac539388eacb0659bf263a557 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:10 +0100 Subject: drm/i915: Rename i915_timeline to intel_timeline and move under gt Move all timeline code under gt and rename to intel_gt prefix. Signed-off-by: Tvrtko Ursulin Suggested-by: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-32-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/Makefile.header-test | 1 - drivers/gpu/drm/i915/gem/i915_gem_context.c | 10 +- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 4 +- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine.h | 4 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 14 +- drivers/gpu/drm/i915/gt/intel_engine_types.h | 4 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 10 +- drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 18 +- drivers/gpu/drm/i915/gt/intel_timeline.c | 591 ++++++++++++++ drivers/gpu/drm/i915/gt/intel_timeline.h | 93 +++ drivers/gpu/drm/i915/gt/intel_timeline_types.h | 67 ++ drivers/gpu/drm/i915/gt/mock_engine.c | 10 +- drivers/gpu/drm/i915/gt/selftest_timeline.c | 845 +++++++++++++++++++++ drivers/gpu/drm/i915/gt/selftests/mock_timeline.c | 29 + drivers/gpu/drm/i915/gt/selftests/mock_timeline.h | 15 + drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 8 +- drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- drivers/gpu/drm/i915/i915_request.c | 14 +- drivers/gpu/drm/i915/i915_request.h | 8 +- drivers/gpu/drm/i915/i915_timeline.c | 591 -------------- drivers/gpu/drm/i915/i915_timeline.h | 93 --- drivers/gpu/drm/i915/i915_timeline_types.h | 67 -- .../gpu/drm/i915/selftests/i915_live_selftests.h | 2 +- .../gpu/drm/i915/selftests/i915_mock_selftests.h | 2 +- drivers/gpu/drm/i915/selftests/i915_timeline.c | 845 --------------------- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 6 +- drivers/gpu/drm/i915/selftests/mock_timeline.c | 29 - drivers/gpu/drm/i915/selftests/mock_timeline.h | 15 - 32 files changed, 1702 insertions(+), 1703 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_timeline.c create mode 100644 drivers/gpu/drm/i915/gt/intel_timeline.h create mode 100644 drivers/gpu/drm/i915/gt/intel_timeline_types.h create mode 100644 drivers/gpu/drm/i915/gt/selftest_timeline.c create mode 100644 drivers/gpu/drm/i915/gt/selftests/mock_timeline.c create mode 100644 drivers/gpu/drm/i915/gt/selftests/mock_timeline.h delete mode 100644 drivers/gpu/drm/i915/i915_timeline.c delete mode 100644 drivers/gpu/drm/i915/i915_timeline.h delete mode 100644 drivers/gpu/drm/i915/i915_timeline_types.h delete mode 100644 drivers/gpu/drm/i915/selftests/i915_timeline.c delete mode 100644 drivers/gpu/drm/i915/selftests/mock_timeline.c delete mode 100644 drivers/gpu/drm/i915/selftests/mock_timeline.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 19f8b6745772..84ac0fd1b8d0 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -82,6 +82,7 @@ gt-y += \ gt/intel_ringbuffer.o \ gt/intel_mocs.o \ gt/intel_sseu.o \ + gt/intel_timeline.o \ gt/intel_workarounds.o gt-$(CONFIG_DRM_I915_SELFTEST) += \ gt/mock_engine.o @@ -127,7 +128,6 @@ i915-y += \ i915_query.o \ i915_request.o \ i915_scheduler.o \ - i915_timeline.o \ i915_trace_points.o \ i915_vma.o \ intel_wopcm.o diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index cb74242f9c3b..b1c3e642f621 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -12,7 +12,6 @@ header_test := \ i915_priolist_types.h \ i915_reg.h \ i915_scheduler_types.h \ - i915_timeline_types.h \ i915_utils.h \ intel_csr.h \ intel_drv.h \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index fb691535fbf2..628673d1d7f8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -316,7 +316,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) mutex_destroy(&ctx->engines_mutex); if (ctx->timeline) - i915_timeline_put(ctx->timeline); + intel_timeline_put(ctx->timeline); kfree(ctx->name); put_pid(ctx->pid); @@ -528,9 +528,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) } if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { - struct i915_timeline *timeline; + struct intel_timeline *timeline; - timeline = i915_timeline_create(&dev_priv->gt, NULL); + timeline = intel_timeline_create(&dev_priv->gt, NULL); if (IS_ERR(timeline)) { context_close(ctx); return ERR_CAST(timeline); @@ -2015,8 +2015,8 @@ static int clone_timeline(struct i915_gem_context *dst, GEM_BUG_ON(src->timeline == dst->timeline); if (dst->timeline) - i915_timeline_put(dst->timeline); - dst->timeline = i915_timeline_get(src->timeline); + intel_timeline_put(dst->timeline); + dst->timeline = intel_timeline_get(src->timeline); } return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index cc513410eeef..0ee61482ef94 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -26,7 +26,7 @@ struct pid; struct drm_i915_private; struct drm_i915_file_private; struct i915_address_space; -struct i915_timeline; +struct intel_timeline; struct intel_ring; struct i915_gem_engines { @@ -77,7 +77,7 @@ struct i915_gem_context { struct i915_gem_engines __rcu *engines; struct mutex engines_mutex; /* guards writes to engines */ - struct i915_timeline *timeline; + struct intel_timeline *timeline; /** * @vm: unique address space (GTT) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 05011d4a3b88..8f721cf0ab99 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -38,7 +38,7 @@ static void i915_gem_park(struct drm_i915_private *i915) i915_gem_batch_pool_fini(&engine->batch_pool); } - i915_timelines_park(i915); + intel_timelines_park(i915); i915_vma_parked(i915); i915_globals_park(); diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 9bb6ff76680e..557b08b13feb 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -14,7 +14,7 @@ #include "i915_reg.h" #include "i915_request.h" #include "i915_selftest.h" -#include "i915_timeline.h" +#include "gt/intel_timeline.h" #include "intel_engine_types.h" #include "intel_gpu_commands.h" #include "intel_workarounds.h" @@ -200,7 +200,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, - struct i915_timeline *timeline, + struct intel_timeline *timeline, int size); int intel_ring_pin(struct intel_ring *ring); void intel_ring_reset(struct intel_ring *ring, u32 tail); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 89edf97d8ad7..e30212e219ec 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -724,7 +724,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) struct measure_breadcrumb { struct i915_request rq; - struct i915_timeline timeline; + struct intel_timeline timeline; struct intel_ring ring; u32 cs[1024]; }; @@ -740,9 +740,9 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) if (!frame) return -ENOMEM; - if (i915_timeline_init(&frame->timeline, - engine->gt, - engine->status_page.vma)) + if (intel_timeline_init(&frame->timeline, + engine->gt, + engine->status_page.vma)) goto out_frame; INIT_LIST_HEAD(&frame->ring.request_list); @@ -757,17 +757,17 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) frame->rq.ring = &frame->ring; frame->rq.timeline = &frame->timeline; - dw = i915_timeline_pin(&frame->timeline); + dw = intel_timeline_pin(&frame->timeline); if (dw < 0) goto out_timeline; dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ - i915_timeline_unpin(&frame->timeline); + intel_timeline_unpin(&frame->timeline); out_timeline: - i915_timeline_fini(&frame->timeline); + intel_timeline_fini(&frame->timeline); out_frame: kfree(frame); return dw; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index fb65e96fa36b..7e056114344e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -20,7 +20,7 @@ #include "i915_pmu.h" #include "i915_priolist_types.h" #include "i915_selftest.h" -#include "i915_timeline_types.h" +#include "gt/intel_timeline_types.h" #include "intel_sseu.h" #include "intel_wakeref.h" #include "intel_workarounds_types.h" @@ -68,7 +68,7 @@ struct intel_ring { struct i915_vma *vma; void *vaddr; - struct i915_timeline *timeline; + struct intel_timeline *timeline; struct list_head request_list; struct list_head active_link; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 415fdf2eb997..3abcec3e4e0e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3005,13 +3005,13 @@ err_unpin_ctx: return ret; } -static struct i915_timeline * +static struct intel_timeline * get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt) { if (ctx->timeline) - return i915_timeline_get(ctx->timeline); + return intel_timeline_get(ctx->timeline); else - return i915_timeline_create(gt, NULL); + return intel_timeline_create(gt, NULL); } static int execlists_context_deferred_alloc(struct intel_context *ce, @@ -3021,7 +3021,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, struct i915_vma *vma; u32 context_size; struct intel_ring *ring; - struct i915_timeline *timeline; + struct intel_timeline *timeline; int ret; if (ce->state) @@ -3054,7 +3054,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce, ring = intel_engine_create_ring(engine, timeline, ce->gem_context->ring_size); - i915_timeline_put(timeline); + intel_timeline_put(timeline); if (IS_ERR(ring)) { ret = PTR_ERR(ring); goto error_deref_obj; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 5297b3acb56d..3c925af64793 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -851,7 +851,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; - struct i915_timeline *tl; + struct intel_timeline *tl; if (!test_bit(I915_WEDGED, &error->flags)) return true; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 9a748be0ce0c..aa483bba04bf 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1156,7 +1156,7 @@ int intel_ring_pin(struct intel_ring *ring) if (atomic_fetch_inc(&ring->pin_count)) return 0; - ret = i915_timeline_pin(ring->timeline); + ret = intel_timeline_pin(ring->timeline); if (ret) goto err_unpin; @@ -1194,7 +1194,7 @@ int intel_ring_pin(struct intel_ring *ring) err_ring: i915_vma_unpin(vma); err_timeline: - i915_timeline_unpin(ring->timeline); + intel_timeline_unpin(ring->timeline); err_unpin: atomic_dec(&ring->pin_count); return ret; @@ -1231,7 +1231,7 @@ void intel_ring_unpin(struct intel_ring *ring) ring->vma->obj->pin_global--; i915_vma_unpin(ring->vma); - i915_timeline_unpin(ring->timeline); + intel_timeline_unpin(ring->timeline); } static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) @@ -1267,7 +1267,7 @@ err: struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, - struct i915_timeline *timeline, + struct intel_timeline *timeline, int size) { struct drm_i915_private *i915 = engine->i915; @@ -1283,7 +1283,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, kref_init(&ring->ref); INIT_LIST_HEAD(&ring->request_list); - ring->timeline = i915_timeline_get(timeline); + ring->timeline = intel_timeline_get(timeline); ring->size = size; /* Workaround an erratum on the i830 which causes a hang if @@ -1313,7 +1313,7 @@ void intel_ring_free(struct kref *ref) i915_vma_close(ring->vma); i915_vma_put(ring->vma); - i915_timeline_put(ring->timeline); + intel_timeline_put(ring->timeline); kfree(ring); } @@ -2269,11 +2269,11 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) int intel_ring_submission_init(struct intel_engine_cs *engine) { - struct i915_timeline *timeline; + struct intel_timeline *timeline; struct intel_ring *ring; int err; - timeline = i915_timeline_create(engine->gt, engine->status_page.vma); + timeline = intel_timeline_create(engine->gt, engine->status_page.vma); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); goto err; @@ -2281,7 +2281,7 @@ int intel_ring_submission_init(struct intel_engine_cs *engine) GEM_BUG_ON(timeline->has_initial_breadcrumb); ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE); - i915_timeline_put(timeline); + intel_timeline_put(timeline); if (IS_ERR(ring)) { err = PTR_ERR(ring); goto err; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c new file mode 100644 index 000000000000..1a3f04458730 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -0,0 +1,591 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016-2018 Intel Corporation + */ + +#include "gt/intel_gt_types.h" + +#include "i915_drv.h" + +#include "i915_active.h" +#include "i915_syncmap.h" +#include "gt/intel_timeline.h" + +#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) +#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) + +struct intel_timeline_hwsp { + struct intel_gt *gt; + struct i915_gt_timelines *gt_timelines; + struct list_head free_link; + struct i915_vma *vma; + u64 free_bitmap; +}; + +struct intel_timeline_cacheline { + struct i915_active active; + struct intel_timeline_hwsp *hwsp; + void *vaddr; +#define CACHELINE_BITS 6 +#define CACHELINE_FREE CACHELINE_BITS +}; + +static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) + i915_gem_object_put(obj); + + return vma; +} + +static struct i915_vma * +hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) +{ + struct i915_gt_timelines *gt = &timeline->gt->timelines; + struct intel_timeline_hwsp *hwsp; + + BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); + + spin_lock_irq(>->hwsp_lock); + + /* hwsp_free_list only contains HWSP that have available cachelines */ + hwsp = list_first_entry_or_null(>->hwsp_free_list, + typeof(*hwsp), free_link); + if (!hwsp) { + struct i915_vma *vma; + + spin_unlock_irq(>->hwsp_lock); + + hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL); + if (!hwsp) + return ERR_PTR(-ENOMEM); + + vma = __hwsp_alloc(timeline->gt); + if (IS_ERR(vma)) { + kfree(hwsp); + return vma; + } + + vma->private = hwsp; + hwsp->gt = timeline->gt; + hwsp->vma = vma; + hwsp->free_bitmap = ~0ull; + hwsp->gt_timelines = gt; + + spin_lock_irq(>->hwsp_lock); + list_add(&hwsp->free_link, >->hwsp_free_list); + } + + GEM_BUG_ON(!hwsp->free_bitmap); + *cacheline = __ffs64(hwsp->free_bitmap); + hwsp->free_bitmap &= ~BIT_ULL(*cacheline); + if (!hwsp->free_bitmap) + list_del(&hwsp->free_link); + + spin_unlock_irq(>->hwsp_lock); + + GEM_BUG_ON(hwsp->vma->private != hwsp); + return hwsp->vma; +} + +static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) +{ + struct i915_gt_timelines *gt = hwsp->gt_timelines; + unsigned long flags; + + spin_lock_irqsave(>->hwsp_lock, flags); + + /* As a cacheline becomes available, publish the HWSP on the freelist */ + if (!hwsp->free_bitmap) + list_add_tail(&hwsp->free_link, >->hwsp_free_list); + + GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); + hwsp->free_bitmap |= BIT_ULL(cacheline); + + /* And if no one is left using it, give the page back to the system */ + if (hwsp->free_bitmap == ~0ull) { + i915_vma_put(hwsp->vma); + list_del(&hwsp->free_link); + kfree(hwsp); + } + + spin_unlock_irqrestore(>->hwsp_lock, flags); +} + +static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) +{ + GEM_BUG_ON(!i915_active_is_idle(&cl->active)); + + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + i915_vma_put(cl->hwsp->vma); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + + i915_active_fini(&cl->active); + kfree(cl); +} + +static void __cacheline_retire(struct i915_active *active) +{ + struct intel_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + i915_vma_unpin(cl->hwsp->vma); + if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) + __idle_cacheline_free(cl); +} + +static struct intel_timeline_cacheline * +cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) +{ + struct intel_timeline_cacheline *cl; + void *vaddr; + + GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); + + cl = kmalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return ERR_PTR(-ENOMEM); + + vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + kfree(cl); + return ERR_CAST(vaddr); + } + + i915_vma_get(hwsp->vma); + cl->hwsp = hwsp; + cl->vaddr = page_pack_bits(vaddr, cacheline); + + i915_active_init(hwsp->gt->i915, &cl->active, __cacheline_retire); + + return cl; +} + +static void cacheline_acquire(struct intel_timeline_cacheline *cl) +{ + if (cl && i915_active_acquire(&cl->active)) + __i915_vma_pin(cl->hwsp->vma); +} + +static void cacheline_release(struct intel_timeline_cacheline *cl) +{ + if (cl) + i915_active_release(&cl->active); +} + +static void cacheline_free(struct intel_timeline_cacheline *cl) +{ + GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); + cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); + + if (i915_active_is_idle(&cl->active)) + __idle_cacheline_free(cl); +} + +int intel_timeline_init(struct intel_timeline *timeline, + struct intel_gt *gt, + struct i915_vma *hwsp) +{ + void *vaddr; + + /* + * Ideally we want a set of engines on a single leaf as we expect + * to mostly be tracking synchronisation between engines. It is not + * a huge issue if this is not the case, but we may want to mitigate + * any page crossing penalties if they become an issue. + * + * Called during early_init before we know how many engines there are. + */ + BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); + + timeline->gt = gt; + timeline->pin_count = 0; + timeline->has_initial_breadcrumb = !hwsp; + timeline->hwsp_cacheline = NULL; + + if (!hwsp) { + struct intel_timeline_cacheline *cl; + unsigned int cacheline; + + hwsp = hwsp_alloc(timeline, &cacheline); + if (IS_ERR(hwsp)) + return PTR_ERR(hwsp); + + cl = cacheline_alloc(hwsp->private, cacheline); + if (IS_ERR(cl)) { + __idle_hwsp_free(hwsp->private, cacheline); + return PTR_ERR(cl); + } + + timeline->hwsp_cacheline = cl; + timeline->hwsp_offset = cacheline * CACHELINE_BYTES; + + vaddr = page_mask_bits(cl->vaddr); + } else { + timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; + + vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + } + + timeline->hwsp_seqno = + memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); + + timeline->hwsp_ggtt = i915_vma_get(hwsp); + GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size); + + timeline->fence_context = dma_fence_context_alloc(1); + + mutex_init(&timeline->mutex); + + INIT_ACTIVE_REQUEST(&timeline->last_request); + INIT_LIST_HEAD(&timeline->requests); + + i915_syncmap_init(&timeline->sync); + + return 0; +} + +static void timelines_init(struct intel_gt *gt) +{ + struct i915_gt_timelines *timelines = >->timelines; + + mutex_init(&timelines->mutex); + INIT_LIST_HEAD(&timelines->active_list); + + spin_lock_init(&timelines->hwsp_lock); + INIT_LIST_HEAD(&timelines->hwsp_free_list); + + /* via i915_gem_wait_for_idle() */ + i915_gem_shrinker_taints_mutex(gt->i915, &timelines->mutex); +} + +void intel_timelines_init(struct drm_i915_private *i915) +{ + timelines_init(&i915->gt); +} + +static void timeline_add_to_active(struct intel_timeline *tl) +{ + struct i915_gt_timelines *gt = &tl->gt->timelines; + + mutex_lock(>->mutex); + list_add(&tl->link, >->active_list); + mutex_unlock(>->mutex); +} + +static void timeline_remove_from_active(struct intel_timeline *tl) +{ + struct i915_gt_timelines *gt = &tl->gt->timelines; + + mutex_lock(>->mutex); + list_del(&tl->link); + mutex_unlock(>->mutex); +} + +static void timelines_park(struct intel_gt *gt) +{ + struct i915_gt_timelines *timelines = >->timelines; + struct intel_timeline *timeline; + + mutex_lock(&timelines->mutex); + list_for_each_entry(timeline, &timelines->active_list, link) { + /* + * All known fences are completed so we can scrap + * the current sync point tracking and start afresh, + * any attempt to wait upon a previous sync point + * will be skipped as the fence was signaled. + */ + i915_syncmap_free(&timeline->sync); + } + mutex_unlock(&timelines->mutex); +} + +/** + * intel_timelines_park - called when the driver idles + * @i915: the drm_i915_private device + * + * When the driver is completely idle, we know that all of our sync points + * have been signaled and our tracking is then entirely redundant. Any request + * to wait upon an older sync point will be completed instantly as we know + * the fence is signaled and therefore we will not even look them up in the + * sync point map. + */ +void intel_timelines_park(struct drm_i915_private *i915) +{ + timelines_park(&i915->gt); +} + +void intel_timeline_fini(struct intel_timeline *timeline) +{ + GEM_BUG_ON(timeline->pin_count); + GEM_BUG_ON(!list_empty(&timeline->requests)); + + i915_syncmap_free(&timeline->sync); + + if (timeline->hwsp_cacheline) + cacheline_free(timeline->hwsp_cacheline); + else + i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + + i915_vma_put(timeline->hwsp_ggtt); +} + +struct intel_timeline * +intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) +{ + struct intel_timeline *timeline; + int err; + + timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); + if (!timeline) + return ERR_PTR(-ENOMEM); + + err = intel_timeline_init(timeline, gt, global_hwsp); + if (err) { + kfree(timeline); + return ERR_PTR(err); + } + + kref_init(&timeline->kref); + + return timeline; +} + +int intel_timeline_pin(struct intel_timeline *tl) +{ + int err; + + if (tl->pin_count++) + return 0; + GEM_BUG_ON(!tl->pin_count); + + err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + goto unpin; + + tl->hwsp_offset = + i915_ggtt_offset(tl->hwsp_ggtt) + + offset_in_page(tl->hwsp_offset); + + cacheline_acquire(tl->hwsp_cacheline); + timeline_add_to_active(tl); + + return 0; + +unpin: + tl->pin_count = 0; + return err; +} + +static u32 timeline_advance(struct intel_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); + + return tl->seqno += 1 + tl->has_initial_breadcrumb; +} + +static void timeline_rollback(struct intel_timeline *tl) +{ + tl->seqno -= 1 + tl->has_initial_breadcrumb; +} + +static noinline int +__intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + struct intel_timeline_cacheline *cl; + unsigned int cacheline; + struct i915_vma *vma; + void *vaddr; + int err; + + /* + * If there is an outstanding GPU reference to this cacheline, + * such as it being sampled by a HW semaphore on another timeline, + * we cannot wraparound our seqno value (the HW semaphore does + * a strict greater-than-or-equals compare, not i915_seqno_passed). + * So if the cacheline is still busy, we must detach ourselves + * from it and leave it inflight alongside its users. + * + * However, if nobody is watching and we can guarantee that nobody + * will, we could simply reuse the same cacheline. + * + * if (i915_active_request_is_signaled(&tl->last_request) && + * i915_active_is_signaled(&tl->hwsp_cacheline->active)) + * return 0; + * + * That seems unlikely for a busy timeline that needed to wrap in + * the first place, so just replace the cacheline. + */ + + vma = hwsp_alloc(tl, &cacheline); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_rollback; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) { + __idle_hwsp_free(vma->private, cacheline); + goto err_rollback; + } + + cl = cacheline_alloc(vma->private, cacheline); + if (IS_ERR(cl)) { + err = PTR_ERR(cl); + __idle_hwsp_free(vma->private, cacheline); + goto err_unpin; + } + GEM_BUG_ON(cl->hwsp->vma != vma); + + /* + * Attach the old cacheline to the current request, so that we only + * free it after the current request is retired, which ensures that + * all writes into the cacheline from previous requests are complete. + */ + err = i915_active_ref(&tl->hwsp_cacheline->active, + tl->fence_context, rq); + if (err) + goto err_cacheline; + + cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ + cacheline_free(tl->hwsp_cacheline); + + i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ + i915_vma_put(tl->hwsp_ggtt); + + tl->hwsp_ggtt = i915_vma_get(vma); + + vaddr = page_mask_bits(cl->vaddr); + tl->hwsp_offset = cacheline * CACHELINE_BYTES; + tl->hwsp_seqno = + memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); + + tl->hwsp_offset += i915_ggtt_offset(vma); + + cacheline_acquire(cl); + tl->hwsp_cacheline = cl; + + *seqno = timeline_advance(tl); + GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); + return 0; + +err_cacheline: + cacheline_free(cl); +err_unpin: + i915_vma_unpin(vma); +err_rollback: + timeline_rollback(tl); + return err; +} + +int intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + *seqno = timeline_advance(tl); + + /* Replace the HWSP on wraparound for HW semaphores */ + if (unlikely(!*seqno && tl->hwsp_cacheline)) + return __intel_timeline_get_seqno(tl, rq, seqno); + + return 0; +} + +static int cacheline_ref(struct intel_timeline_cacheline *cl, + struct i915_request *rq) +{ + return i915_active_ref(&cl->active, rq->fence.context, rq); +} + +int intel_timeline_read_hwsp(struct i915_request *from, + struct i915_request *to, + u32 *hwsp) +{ + struct intel_timeline_cacheline *cl = from->hwsp_cacheline; + struct intel_timeline *tl = from->timeline; + int err; + + GEM_BUG_ON(to->timeline == tl); + + mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); + err = i915_request_completed(from); + if (!err) + err = cacheline_ref(cl, to); + if (!err) { + if (likely(cl == tl->hwsp_cacheline)) { + *hwsp = tl->hwsp_offset; + } else { /* across a seqno wrap, recover the original offset */ + *hwsp = i915_ggtt_offset(cl->hwsp->vma) + + ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * + CACHELINE_BYTES; + } + } + mutex_unlock(&tl->mutex); + + return err; +} + +void intel_timeline_unpin(struct intel_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + if (--tl->pin_count) + return; + + timeline_remove_from_active(tl); + cacheline_release(tl->hwsp_cacheline); + + /* + * Since this timeline is idle, all bariers upon which we were waiting + * must also be complete and so we can discard the last used barriers + * without loss of information. + */ + i915_syncmap_free(&tl->sync); + + __i915_vma_unpin(tl->hwsp_ggtt); +} + +void __intel_timeline_free(struct kref *kref) +{ + struct intel_timeline *timeline = + container_of(kref, typeof(*timeline), kref); + + intel_timeline_fini(timeline); + kfree(timeline); +} + +static void timelines_fini(struct intel_gt *gt) +{ + struct i915_gt_timelines *timelines = >->timelines; + + GEM_BUG_ON(!list_empty(&timelines->active_list)); + GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); + + mutex_destroy(&timelines->mutex); +} + +void intel_timelines_fini(struct drm_i915_private *i915) +{ + timelines_fini(&i915->gt); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "gt/selftests/mock_timeline.c" +#include "gt/selftest_timeline.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h new file mode 100644 index 000000000000..e08cebf64833 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -0,0 +1,93 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef I915_TIMELINE_H +#define I915_TIMELINE_H + +#include + +#include "i915_active.h" +#include "i915_syncmap.h" +#include "gt/intel_timeline_types.h" + +int intel_timeline_init(struct intel_timeline *tl, + struct intel_gt *gt, + struct i915_vma *hwsp); +void intel_timeline_fini(struct intel_timeline *tl); + +struct intel_timeline * +intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp); + +static inline struct intel_timeline * +intel_timeline_get(struct intel_timeline *timeline) +{ + kref_get(&timeline->kref); + return timeline; +} + +void __intel_timeline_free(struct kref *kref); +static inline void intel_timeline_put(struct intel_timeline *timeline) +{ + kref_put(&timeline->kref, __intel_timeline_free); +} + +static inline int __intel_timeline_sync_set(struct intel_timeline *tl, + u64 context, u32 seqno) +{ + return i915_syncmap_set(&tl->sync, context, seqno); +} + +static inline int intel_timeline_sync_set(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_set(tl, fence->context, fence->seqno); +} + +static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl, + u64 context, u32 seqno) +{ + return i915_syncmap_is_later(&tl->sync, context, seqno); +} + +static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl, + const struct dma_fence *fence) +{ + return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); +} + +int intel_timeline_pin(struct intel_timeline *tl); +int intel_timeline_get_seqno(struct intel_timeline *tl, + struct i915_request *rq, + u32 *seqno); +void intel_timeline_unpin(struct intel_timeline *tl); + +int intel_timeline_read_hwsp(struct i915_request *from, + struct i915_request *until, + u32 *hwsp_offset); + +void intel_timelines_init(struct drm_i915_private *i915); +void intel_timelines_park(struct drm_i915_private *i915); +void intel_timelines_fini(struct drm_i915_private *i915); + +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h new file mode 100644 index 000000000000..9a71aea7a338 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_TIMELINE_TYPES_H__ +#define __I915_TIMELINE_TYPES_H__ + +#include +#include +#include +#include + +#include "i915_active_types.h" + +struct drm_i915_private; +struct i915_vma; +struct intel_timeline_cacheline; +struct i915_syncmap; + +struct intel_timeline { + u64 fence_context; + u32 seqno; + + struct mutex mutex; /* protects the flow of requests */ + + unsigned int pin_count; + const u32 *hwsp_seqno; + struct i915_vma *hwsp_ggtt; + u32 hwsp_offset; + + struct intel_timeline_cacheline *hwsp_cacheline; + + bool has_initial_breadcrumb; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head requests; + + /* Contains an RCU guarded pointer to the last request. No reference is + * held to the request, users must carefully acquire a reference to + * the request using i915_active_request_get_request_rcu(), or hold the + * struct_mutex. + */ + struct i915_active_request last_request; + + /** + * We track the most recent seqno that we wait on in every context so + * that we only have to emit a new await and dependency on a more + * recent sync point. As the contexts may be executed out-of-order, we + * have to track each individually and can not rely on an absolute + * global_seqno. When we know that all tracked fences are completed + * (i.e. when the driver is idle), we know that the syncmap is + * redundant and we can discard it without loss of generality. + */ + struct i915_syncmap *sync; + + struct list_head link; + struct intel_gt *gt; + + struct kref kref; +}; + +#endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 423027aa71cd..bf0974b12f3d 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -33,15 +33,15 @@ struct mock_ring { struct intel_ring base; - struct i915_timeline timeline; + struct intel_timeline timeline; }; -static void mock_timeline_pin(struct i915_timeline *tl) +static void mock_timeline_pin(struct intel_timeline *tl) { tl->pin_count++; } -static void mock_timeline_unpin(struct i915_timeline *tl) +static void mock_timeline_unpin(struct intel_timeline *tl) { GEM_BUG_ON(!tl->pin_count); tl->pin_count--; @@ -56,7 +56,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) if (!ring) return NULL; - if (i915_timeline_init(&ring->timeline, engine->gt, NULL)) { + if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) { kfree(ring); return NULL; } @@ -78,7 +78,7 @@ static void mock_ring_free(struct intel_ring *base) { struct mock_ring *ring = container_of(base, typeof(*ring), base); - i915_timeline_fini(&ring->timeline); + intel_timeline_fini(&ring->timeline); kfree(ring); } diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c new file mode 100644 index 000000000000..193cc564ade2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -0,0 +1,845 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#include + +#include "gem/i915_gem_pm.h" + +#include "../selftests/i915_random.h" +#include "../i915_selftest.h" + +#include "../selftests/igt_flush_test.h" +#include "../selftests/mock_gem_device.h" +#include "selftests/mock_timeline.h" + +static struct page *hwsp_page(struct intel_timeline *tl) +{ + struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + return sg_page(obj->mm.pages->sgl); +} + +static unsigned long hwsp_cacheline(struct intel_timeline *tl) +{ + unsigned long address = (unsigned long)page_address(hwsp_page(tl)); + + return (address + tl->hwsp_offset) / CACHELINE_BYTES; +} + +#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) + +struct mock_hwsp_freelist { + struct drm_i915_private *i915; + struct radix_tree_root cachelines; + struct intel_timeline **history; + unsigned long count, max; + struct rnd_state prng; +}; + +enum { + SHUFFLE = BIT(0), +}; + +static void __mock_hwsp_record(struct mock_hwsp_freelist *state, + unsigned int idx, + struct intel_timeline *tl) +{ + tl = xchg(&state->history[idx], tl); + if (tl) { + radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); + intel_timeline_put(tl); + } +} + +static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, + unsigned int count, + unsigned int flags) +{ + struct intel_timeline *tl; + unsigned int idx; + + while (count--) { + unsigned long cacheline; + int err; + + tl = intel_timeline_create(&state->i915->gt, NULL); + if (IS_ERR(tl)) + return PTR_ERR(tl); + + cacheline = hwsp_cacheline(tl); + err = radix_tree_insert(&state->cachelines, cacheline, tl); + if (err) { + if (err == -EEXIST) { + pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", + cacheline); + } + intel_timeline_put(tl); + return err; + } + + idx = state->count++ % state->max; + __mock_hwsp_record(state, idx, tl); + } + + if (flags & SHUFFLE) + i915_prandom_shuffle(state->history, + sizeof(*state->history), + min(state->count, state->max), + &state->prng); + + count = i915_prandom_u32_max_state(min(state->count, state->max), + &state->prng); + while (count--) { + idx = --state->count % state->max; + __mock_hwsp_record(state, idx, NULL); + } + + return 0; +} + +static int mock_hwsp_freelist(void *arg) +{ + struct mock_hwsp_freelist state; + const struct { + const char *name; + unsigned int flags; + } phases[] = { + { "linear", 0 }, + { "shuffled", SHUFFLE }, + { }, + }, *p; + unsigned int na; + int err = 0; + + INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); + state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); + + state.i915 = mock_gem_device(); + if (!state.i915) + return -ENOMEM; + + /* + * Create a bunch of timelines and check that their HWSP do not overlap. + * Free some, and try again. + */ + + state.max = PAGE_SIZE / sizeof(*state.history); + state.count = 0; + state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL); + if (!state.history) { + err = -ENOMEM; + goto err_put; + } + + mutex_lock(&state.i915->drm.struct_mutex); + for (p = phases; p->name; p++) { + pr_debug("%s(%s)\n", __func__, p->name); + for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) { + err = __mock_hwsp_timeline(&state, na, p->flags); + if (err) + goto out; + } + } + +out: + for (na = 0; na < state.max; na++) + __mock_hwsp_record(&state, na, NULL); + mutex_unlock(&state.i915->drm.struct_mutex); + kfree(state.history); +err_put: + drm_dev_put(&state.i915->drm); + return err; +} + +struct __igt_sync { + const char *name; + u32 seqno; + bool expected; + bool set; +}; + +static int __igt_sync(struct intel_timeline *tl, + u64 ctx, + const struct __igt_sync *p, + const char *name) +{ + int ret; + + if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { + pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n", + name, p->name, ctx, p->seqno, yesno(p->expected)); + return -EINVAL; + } + + if (p->set) { + ret = __intel_timeline_sync_set(tl, ctx, p->seqno); + if (ret) + return ret; + } + + return 0; +} + +static int igt_sync(void *arg) +{ + const struct __igt_sync pass[] = { + { "unset", 0, false, false }, + { "new", 0, false, true }, + { "0a", 0, true, true }, + { "1a", 1, false, true }, + { "1b", 1, true, true }, + { "0b", 0, true, false }, + { "2a", 2, false, true }, + { "4", 4, false, true }, + { "INT_MAX", INT_MAX, false, true }, + { "INT_MAX-1", INT_MAX-1, true, false }, + { "INT_MAX+1", (u32)INT_MAX+1, false, true }, + { "INT_MAX", INT_MAX, true, false }, + { "UINT_MAX", UINT_MAX, false, true }, + { "wrap", 0, false, true }, + { "unwrap", UINT_MAX, true, false }, + {}, + }, *p; + struct intel_timeline tl; + int order, offset; + int ret = -ENODEV; + + mock_timeline_init(&tl, 0); + for (p = pass; p->name; p++) { + for (order = 1; order < 64; order++) { + for (offset = -1; offset <= (order > 1); offset++) { + u64 ctx = BIT_ULL(order) + offset; + + ret = __igt_sync(&tl, ctx, p, "1"); + if (ret) + goto out; + } + } + } + mock_timeline_fini(&tl); + + mock_timeline_init(&tl, 0); + for (order = 1; order < 64; order++) { + for (offset = -1; offset <= (order > 1); offset++) { + u64 ctx = BIT_ULL(order) + offset; + + for (p = pass; p->name; p++) { + ret = __igt_sync(&tl, ctx, p, "2"); + if (ret) + goto out; + } + } + } + +out: + mock_timeline_fini(&tl); + return ret; +} + +static unsigned int random_engine(struct rnd_state *rnd) +{ + return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd); +} + +static int bench_sync(void *arg) +{ + struct rnd_state prng; + struct intel_timeline tl; + unsigned long end_time, count; + u64 prng32_1M; + ktime_t kt; + int order, last_order; + + mock_timeline_init(&tl, 0); + + /* Lookups from cache are very fast and so the random number generation + * and the loop itself becomes a significant factor in the per-iteration + * timings. We try to compensate the results by measuring the overhead + * of the prng and subtract it from the reported results. + */ + prandom_seed_state(&prng, i915_selftest.random_seed); + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + u32 x; + + /* Make sure the compiler doesn't optimise away the prng call */ + WRITE_ONCE(x, prandom_u32_state(&prng)); + + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + pr_debug("%s: %lu random evaluations, %lluns/prng\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count); + + /* Benchmark (only) setting random context ids */ + prandom_seed_state(&prng, i915_selftest.random_seed); + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + u64 id = i915_prandom_u64_state(&prng); + + __intel_timeline_sync_set(&tl, id, 0); + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); + pr_info("%s: %lu random insertions, %lluns/insert\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + /* Benchmark looking up the exact same context ids as we just set */ + prandom_seed_state(&prng, i915_selftest.random_seed); + end_time = count; + kt = ktime_get(); + while (end_time--) { + u64 id = i915_prandom_u64_state(&prng); + + if (!__intel_timeline_sync_is_later(&tl, id, 0)) { + mock_timeline_fini(&tl); + pr_err("Lookup of %llu failed\n", id); + return -EINVAL; + } + } + kt = ktime_sub(ktime_get(), kt); + kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); + pr_info("%s: %lu random lookups, %lluns/lookup\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + mock_timeline_fini(&tl); + cond_resched(); + + mock_timeline_init(&tl, 0); + + /* Benchmark setting the first N (in order) contexts */ + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + __intel_timeline_sync_set(&tl, count++, 0); + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + pr_info("%s: %lu in-order insertions, %lluns/insert\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + /* Benchmark looking up the exact same context ids as we just set */ + end_time = count; + kt = ktime_get(); + while (end_time--) { + if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) { + pr_err("Lookup of %lu failed\n", end_time); + mock_timeline_fini(&tl); + return -EINVAL; + } + } + kt = ktime_sub(ktime_get(), kt); + pr_info("%s: %lu in-order lookups, %lluns/lookup\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + + mock_timeline_fini(&tl); + cond_resched(); + + mock_timeline_init(&tl, 0); + + /* Benchmark searching for a random context id and maybe changing it */ + prandom_seed_state(&prng, i915_selftest.random_seed); + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + u32 id = random_engine(&prng); + u32 seqno = prandom_u32_state(&prng); + + if (!__intel_timeline_sync_is_later(&tl, id, seqno)) + __intel_timeline_sync_set(&tl, id, seqno); + + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); + pr_info("%s: %lu repeated insert/lookups, %lluns/op\n", + __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); + mock_timeline_fini(&tl); + cond_resched(); + + /* Benchmark searching for a known context id and changing the seqno */ + for (last_order = 1, order = 1; order < 32; + ({ int tmp = last_order; last_order = order; order += tmp; })) { + unsigned int mask = BIT(order) - 1; + + mock_timeline_init(&tl, 0); + + count = 0; + kt = ktime_get(); + end_time = jiffies + HZ/10; + do { + /* Without assuming too many details of the underlying + * implementation, try to identify its phase-changes + * (if any)! + */ + u64 id = (u64)(count & mask) << order; + + __intel_timeline_sync_is_later(&tl, id, 0); + __intel_timeline_sync_set(&tl, id, 0); + + count++; + } while (!time_after(jiffies, end_time)); + kt = ktime_sub(ktime_get(), kt); + pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n", + __func__, count, order, + (long long)div64_ul(ktime_to_ns(kt), count)); + mock_timeline_fini(&tl); + cond_resched(); + } + + return 0; +} + +int intel_timeline_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(mock_hwsp_freelist), + SUBTEST(igt_sync), + SUBTEST(bench_sync), + }; + + return i915_subtests(tests, NULL); +} + +static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (INTEL_GEN(rq->i915) >= 8) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = addr; + *cs++ = 0; + *cs++ = value; + } else if (INTEL_GEN(rq->i915) >= 4) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = 0; + *cs++ = addr; + *cs++ = value; + } else { + *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; + *cs++ = addr; + *cs++ = value; + *cs++ = MI_NOOP; + } + + intel_ring_advance(rq, cs); + + return 0; +} + +static struct i915_request * +tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) +{ + struct i915_request *rq; + int err; + + lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */ + + err = intel_timeline_pin(tl); + if (err) { + rq = ERR_PTR(err); + goto out; + } + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + goto out_unpin; + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_unpin: + intel_timeline_unpin(tl); +out: + if (IS_ERR(rq)) + pr_err("Failed to write to timeline!\n"); + return rq; +} + +static struct intel_timeline * +checked_intel_timeline_create(struct drm_i915_private *i915) +{ + struct intel_timeline *tl; + + tl = intel_timeline_create(&i915->gt, NULL); + if (IS_ERR(tl)) + return tl; + + if (*tl->hwsp_seqno != tl->seqno) { + pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", + *tl->hwsp_seqno, tl->seqno); + intel_timeline_put(tl); + return ERR_PTR(-EINVAL); + } + + return tl; +} + +static int live_hwsp_engine(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct intel_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + count = 0; + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + for (n = 0; n < NUM_TIMELINES; n++) { + struct intel_timeline *tl; + struct i915_request *rq; + + tl = checked_intel_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + intel_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct intel_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + intel_timeline_put(tl); + } + + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_alternate(void *arg) +{ +#define NUM_TIMELINES 4096 + struct drm_i915_private *i915 = arg; + struct intel_timeline **timelines; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count, n; + int err = 0; + + /* + * Create a bunch of timelines and check we can write + * independently to each of their breadcrumb slots with adjacent + * engines. + */ + + timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, + sizeof(*timelines), + GFP_KERNEL); + if (!timelines) + return -ENOMEM; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + count = 0; + for (n = 0; n < NUM_TIMELINES; n++) { + for_each_engine(engine, i915, id) { + struct intel_timeline *tl; + struct i915_request *rq; + + if (!intel_engine_can_store_dword(engine)) + continue; + + tl = checked_intel_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + intel_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + timelines[count++] = tl; + } + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < count; n++) { + struct intel_timeline *tl = timelines[n]; + + if (!err && *tl->hwsp_seqno != n) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + n, *tl->hwsp_seqno); + err = -EINVAL; + } + intel_timeline_put(tl); + } + + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + kvfree(timelines); + + return err; +#undef NUM_TIMELINES +} + +static int live_hwsp_wrap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct intel_timeline *tl; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = 0; + + /* + * Across a seqno wrap, we need to keep the old cacheline alive for + * foreign GPU references. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + tl = intel_timeline_create(&i915->gt, NULL); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out_rpm; + } + if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) + goto out_free; + + err = intel_timeline_pin(tl); + if (err) + goto out_free; + + for_each_engine(engine, i915, id) { + const u32 *hwsp_seqno[2]; + struct i915_request *rq; + u32 seqno[2]; + + if (!intel_engine_can_store_dword(engine)) + continue; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + tl->seqno = -4u; + + err = intel_timeline_get_seqno(tl, rq, &seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", + seqno[0], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[0] = tl->hwsp_seqno; + + err = intel_timeline_get_seqno(tl, rq, &seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", + seqno[1], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[1] = tl->hwsp_seqno; + + /* With wrap should come a new hwsp */ + GEM_BUG_ON(seqno[1] >= seqno[0]); + GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]); + + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + err = -EIO; + goto out; + } + + if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { + pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", + *hwsp_seqno[0], *hwsp_seqno[1], + seqno[0], seqno[1]); + err = -EINVAL; + goto out; + } + + i915_retire_requests(i915); /* recycle HWSP */ + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + intel_timeline_unpin(tl); +out_free: + intel_timeline_put(tl); +out_rpm: + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +static int live_hwsp_recycle(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned long count; + int err = 0; + + /* + * Check seqno writes into one timeline at a time. We expect to + * recycle the breadcrumb slot between iterations and neither + * want to confuse ourselves or the GPU. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + count = 0; + for_each_engine(engine, i915, id) { + IGT_TIMEOUT(end_time); + + if (!intel_engine_can_store_dword(engine)) + continue; + + do { + struct intel_timeline *tl; + struct i915_request *rq; + + tl = checked_intel_timeline_create(i915); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out; + } + + rq = tl_write(tl, engine, count); + if (IS_ERR(rq)) { + intel_timeline_put(tl); + err = PTR_ERR(rq); + goto out; + } + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + intel_timeline_put(tl); + err = -EIO; + goto out; + } + + if (*tl->hwsp_seqno != count) { + pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + count, *tl->hwsp_seqno); + err = -EINVAL; + } + + intel_timeline_put(tl); + count++; + + if (err) + goto out; + + intel_timelines_park(i915); /* Encourage recycling! */ + } while (!__igt_timeout(end_time, NULL)); + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + +int intel_timeline_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_hwsp_recycle), + SUBTEST(live_hwsp_engine), + SUBTEST(live_hwsp_alternate), + SUBTEST(live_hwsp_wrap), + }; + + if (i915_terminally_wedged(i915)) + return 0; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c new file mode 100644 index 000000000000..5c549205828a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#include "../intel_timeline.h" + +#include "mock_timeline.h" + +void mock_timeline_init(struct intel_timeline *timeline, u64 context) +{ + timeline->gt = NULL; + timeline->fence_context = context; + + mutex_init(&timeline->mutex); + + INIT_ACTIVE_REQUEST(&timeline->last_request); + INIT_LIST_HEAD(&timeline->requests); + + i915_syncmap_init(&timeline->sync); + + INIT_LIST_HEAD(&timeline->link); +} + +void mock_timeline_fini(struct intel_timeline *timeline) +{ + i915_syncmap_free(&timeline->sync); +} diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h new file mode 100644 index 000000000000..689efc66c908 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2017-2018 Intel Corporation + */ + +#ifndef __MOCK_TIMELINE__ +#define __MOCK_TIMELINE__ + +struct intel_timeline; + +void mock_timeline_init(struct intel_timeline *timeline, u64 context); +void mock_timeline_fini(struct intel_timeline *timeline); + +#endif /* !__MOCK_TIMELINE__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b574aea23581..89a21fa4eac2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -89,7 +89,7 @@ #include "i915_gpu_error.h" #include "i915_request.h" #include "i915_scheduler.h" -#include "i915_timeline.h" +#include "gt/intel_timeline.h" #include "i915_vma.h" #include "intel_gvt.h" diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 50d7e1e8d8ad..6e07127242d9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -909,7 +909,7 @@ wait_for_timelines(struct drm_i915_private *i915, unsigned int flags, long timeout) { struct i915_gt_timelines *gt = &i915->gt.timelines; - struct i915_timeline *tl; + struct intel_timeline *tl; mutex_lock(>->mutex); list_for_each_entry(tl, >->active_list, link) { @@ -1487,7 +1487,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); - i915_timelines_init(dev_priv); + intel_timelines_init(dev_priv); ret = i915_gem_init_userptr(dev_priv); if (ret) @@ -1624,7 +1624,7 @@ err_uc_misc: if (ret != -EIO) { i915_gem_cleanup_userptr(dev_priv); - i915_timelines_fini(dev_priv); + intel_timelines_fini(dev_priv); } if (ret == -EIO) { @@ -1688,7 +1688,7 @@ void i915_gem_fini(struct drm_i915_private *dev_priv) intel_uc_fini_misc(dev_priv); i915_gem_cleanup_userptr(dev_priv); - i915_timelines_fini(dev_priv); + intel_timelines_fini(dev_priv); i915_gem_drain_freed_objects(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index e8b9ebe50c4e..028be3b44d07 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -47,7 +47,7 @@ #include "i915_request.h" #include "i915_scatterlist.h" #include "i915_selftest.h" -#include "i915_timeline.h" +#include "gt/intel_timeline.h" #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 0c99694faab7..5ff87c4a0cd5 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -607,7 +607,7 @@ out: struct i915_request * __i915_request_create(struct intel_context *ce, gfp_t gfp) { - struct i915_timeline *tl = ce->ring->timeline; + struct intel_timeline *tl = ce->ring->timeline; struct i915_request *rq; u32 seqno; int ret; @@ -656,7 +656,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) } } - ret = i915_timeline_get_seqno(tl, rq, &seqno); + ret = intel_timeline_get_seqno(tl, rq, &seqno); if (ret) goto err_free; @@ -775,7 +775,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) return 0; signal = list_prev_entry(signal, ring_link); - if (i915_timeline_sync_is_later(rq->timeline, &signal->fence)) + if (intel_timeline_sync_is_later(rq->timeline, &signal->fence)) return 0; return i915_sw_fence_await_dma_fence(&rq->submit, @@ -829,7 +829,7 @@ emit_semaphore_wait(struct i915_request *to, return err; /* We need to pin the signaler's HWSP until we are finished reading. */ - err = i915_timeline_read_hwsp(from, to, &hwsp_offset); + err = intel_timeline_read_hwsp(from, to, &hwsp_offset); if (err) return err; @@ -940,7 +940,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) /* Squash repeated waits to the same timelines */ if (fence->context != rq->i915->mm.unordered_timeline && - i915_timeline_sync_is_later(rq->timeline, fence)) + intel_timeline_sync_is_later(rq->timeline, fence)) continue; if (dma_fence_is_i915(fence)) @@ -954,7 +954,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) /* Record the latest fence used against each timeline */ if (fence->context != rq->i915->mm.unordered_timeline) - i915_timeline_sync_set(rq->timeline, fence); + intel_timeline_sync_set(rq->timeline, fence); } while (--nchild); return 0; @@ -1092,7 +1092,7 @@ void i915_request_skip(struct i915_request *rq, int error) static struct i915_request * __i915_request_add_to_timeline(struct i915_request *rq) { - struct i915_timeline *timeline = rq->timeline; + struct intel_timeline *timeline = rq->timeline; struct i915_request *prev; /* diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index bebc1e9b4a5e..b58ceef92e20 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -41,8 +41,8 @@ struct drm_file; struct drm_i915_gem_object; struct i915_request; -struct i915_timeline; -struct i915_timeline_cacheline; +struct intel_timeline; +struct intel_timeline_cacheline; struct i915_capture_list { struct i915_capture_list *next; @@ -113,7 +113,7 @@ struct i915_request { struct intel_engine_cs *engine; struct intel_context *hw_context; struct intel_ring *ring; - struct i915_timeline *timeline; + struct intel_timeline *timeline; struct list_head signal_link; /* @@ -176,7 +176,7 @@ struct i915_request { * inside the timeline's HWSP vma, but it is only valid while this * request has not completed and guarded by the timeline mutex. */ - struct i915_timeline_cacheline *hwsp_cacheline; + struct intel_timeline_cacheline *hwsp_cacheline; /** Position in the ring of the start of the request */ u32 head; diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c deleted file mode 100644 index 3e2c3169dc69..000000000000 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ /dev/null @@ -1,591 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2016-2018 Intel Corporation - */ - -#include "gt/intel_gt_types.h" - -#include "i915_drv.h" - -#include "i915_active.h" -#include "i915_syncmap.h" -#include "i915_timeline.h" - -#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) -#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) - -struct i915_timeline_hwsp { - struct intel_gt *gt; - struct i915_gt_timelines *gt_timelines; - struct list_head free_link; - struct i915_vma *vma; - u64 free_bitmap; -}; - -struct i915_timeline_cacheline { - struct i915_active active; - struct i915_timeline_hwsp *hwsp; - void *vaddr; -#define CACHELINE_BITS 6 -#define CACHELINE_FREE CACHELINE_BITS -}; - -static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) - i915_gem_object_put(obj); - - return vma; -} - -static struct i915_vma * -hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) -{ - struct i915_gt_timelines *gt = &timeline->gt->timelines; - struct i915_timeline_hwsp *hwsp; - - BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); - - spin_lock_irq(>->hwsp_lock); - - /* hwsp_free_list only contains HWSP that have available cachelines */ - hwsp = list_first_entry_or_null(>->hwsp_free_list, - typeof(*hwsp), free_link); - if (!hwsp) { - struct i915_vma *vma; - - spin_unlock_irq(>->hwsp_lock); - - hwsp = kmalloc(sizeof(*hwsp), GFP_KERNEL); - if (!hwsp) - return ERR_PTR(-ENOMEM); - - vma = __hwsp_alloc(timeline->gt); - if (IS_ERR(vma)) { - kfree(hwsp); - return vma; - } - - vma->private = hwsp; - hwsp->gt = timeline->gt; - hwsp->vma = vma; - hwsp->free_bitmap = ~0ull; - hwsp->gt_timelines = gt; - - spin_lock_irq(>->hwsp_lock); - list_add(&hwsp->free_link, >->hwsp_free_list); - } - - GEM_BUG_ON(!hwsp->free_bitmap); - *cacheline = __ffs64(hwsp->free_bitmap); - hwsp->free_bitmap &= ~BIT_ULL(*cacheline); - if (!hwsp->free_bitmap) - list_del(&hwsp->free_link); - - spin_unlock_irq(>->hwsp_lock); - - GEM_BUG_ON(hwsp->vma->private != hwsp); - return hwsp->vma; -} - -static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) -{ - struct i915_gt_timelines *gt = hwsp->gt_timelines; - unsigned long flags; - - spin_lock_irqsave(>->hwsp_lock, flags); - - /* As a cacheline becomes available, publish the HWSP on the freelist */ - if (!hwsp->free_bitmap) - list_add_tail(&hwsp->free_link, >->hwsp_free_list); - - GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); - hwsp->free_bitmap |= BIT_ULL(cacheline); - - /* And if no one is left using it, give the page back to the system */ - if (hwsp->free_bitmap == ~0ull) { - i915_vma_put(hwsp->vma); - list_del(&hwsp->free_link); - kfree(hwsp); - } - - spin_unlock_irqrestore(>->hwsp_lock, flags); -} - -static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) -{ - GEM_BUG_ON(!i915_active_is_idle(&cl->active)); - - i915_gem_object_unpin_map(cl->hwsp->vma->obj); - i915_vma_put(cl->hwsp->vma); - __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); - - i915_active_fini(&cl->active); - kfree(cl); -} - -static void __cacheline_retire(struct i915_active *active) -{ - struct i915_timeline_cacheline *cl = - container_of(active, typeof(*cl), active); - - i915_vma_unpin(cl->hwsp->vma); - if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) - __idle_cacheline_free(cl); -} - -static struct i915_timeline_cacheline * -cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline) -{ - struct i915_timeline_cacheline *cl; - void *vaddr; - - GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); - - cl = kmalloc(sizeof(*cl), GFP_KERNEL); - if (!cl) - return ERR_PTR(-ENOMEM); - - vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - kfree(cl); - return ERR_CAST(vaddr); - } - - i915_vma_get(hwsp->vma); - cl->hwsp = hwsp; - cl->vaddr = page_pack_bits(vaddr, cacheline); - - i915_active_init(hwsp->gt->i915, &cl->active, __cacheline_retire); - - return cl; -} - -static void cacheline_acquire(struct i915_timeline_cacheline *cl) -{ - if (cl && i915_active_acquire(&cl->active)) - __i915_vma_pin(cl->hwsp->vma); -} - -static void cacheline_release(struct i915_timeline_cacheline *cl) -{ - if (cl) - i915_active_release(&cl->active); -} - -static void cacheline_free(struct i915_timeline_cacheline *cl) -{ - GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); - cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); - - if (i915_active_is_idle(&cl->active)) - __idle_cacheline_free(cl); -} - -int i915_timeline_init(struct i915_timeline *timeline, - struct intel_gt *gt, - struct i915_vma *hwsp) -{ - void *vaddr; - - /* - * Ideally we want a set of engines on a single leaf as we expect - * to mostly be tracking synchronisation between engines. It is not - * a huge issue if this is not the case, but we may want to mitigate - * any page crossing penalties if they become an issue. - * - * Called during early_init before we know how many engines there are. - */ - BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); - - timeline->gt = gt; - timeline->pin_count = 0; - timeline->has_initial_breadcrumb = !hwsp; - timeline->hwsp_cacheline = NULL; - - if (!hwsp) { - struct i915_timeline_cacheline *cl; - unsigned int cacheline; - - hwsp = hwsp_alloc(timeline, &cacheline); - if (IS_ERR(hwsp)) - return PTR_ERR(hwsp); - - cl = cacheline_alloc(hwsp->private, cacheline); - if (IS_ERR(cl)) { - __idle_hwsp_free(hwsp->private, cacheline); - return PTR_ERR(cl); - } - - timeline->hwsp_cacheline = cl; - timeline->hwsp_offset = cacheline * CACHELINE_BYTES; - - vaddr = page_mask_bits(cl->vaddr); - } else { - timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; - - vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); - } - - timeline->hwsp_seqno = - memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); - - timeline->hwsp_ggtt = i915_vma_get(hwsp); - GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size); - - timeline->fence_context = dma_fence_context_alloc(1); - - mutex_init(&timeline->mutex); - - INIT_ACTIVE_REQUEST(&timeline->last_request); - INIT_LIST_HEAD(&timeline->requests); - - i915_syncmap_init(&timeline->sync); - - return 0; -} - -static void timelines_init(struct intel_gt *gt) -{ - struct i915_gt_timelines *timelines = >->timelines; - - mutex_init(&timelines->mutex); - INIT_LIST_HEAD(&timelines->active_list); - - spin_lock_init(&timelines->hwsp_lock); - INIT_LIST_HEAD(&timelines->hwsp_free_list); - - /* via i915_gem_wait_for_idle() */ - i915_gem_shrinker_taints_mutex(gt->i915, &timelines->mutex); -} - -void i915_timelines_init(struct drm_i915_private *i915) -{ - timelines_init(&i915->gt); -} - -static void timeline_add_to_active(struct i915_timeline *tl) -{ - struct i915_gt_timelines *gt = &tl->gt->timelines; - - mutex_lock(>->mutex); - list_add(&tl->link, >->active_list); - mutex_unlock(>->mutex); -} - -static void timeline_remove_from_active(struct i915_timeline *tl) -{ - struct i915_gt_timelines *gt = &tl->gt->timelines; - - mutex_lock(>->mutex); - list_del(&tl->link); - mutex_unlock(>->mutex); -} - -static void timelines_park(struct intel_gt *gt) -{ - struct i915_gt_timelines *timelines = >->timelines; - struct i915_timeline *timeline; - - mutex_lock(&timelines->mutex); - list_for_each_entry(timeline, &timelines->active_list, link) { - /* - * All known fences are completed so we can scrap - * the current sync point tracking and start afresh, - * any attempt to wait upon a previous sync point - * will be skipped as the fence was signaled. - */ - i915_syncmap_free(&timeline->sync); - } - mutex_unlock(&timelines->mutex); -} - -/** - * i915_timelines_park - called when the driver idles - * @i915: the drm_i915_private device - * - * When the driver is completely idle, we know that all of our sync points - * have been signaled and our tracking is then entirely redundant. Any request - * to wait upon an older sync point will be completed instantly as we know - * the fence is signaled and therefore we will not even look them up in the - * sync point map. - */ -void i915_timelines_park(struct drm_i915_private *i915) -{ - timelines_park(&i915->gt); -} - -void i915_timeline_fini(struct i915_timeline *timeline) -{ - GEM_BUG_ON(timeline->pin_count); - GEM_BUG_ON(!list_empty(&timeline->requests)); - - i915_syncmap_free(&timeline->sync); - - if (timeline->hwsp_cacheline) - cacheline_free(timeline->hwsp_cacheline); - else - i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); - - i915_vma_put(timeline->hwsp_ggtt); -} - -struct i915_timeline * -i915_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) -{ - struct i915_timeline *timeline; - int err; - - timeline = kzalloc(sizeof(*timeline), GFP_KERNEL); - if (!timeline) - return ERR_PTR(-ENOMEM); - - err = i915_timeline_init(timeline, gt, global_hwsp); - if (err) { - kfree(timeline); - return ERR_PTR(err); - } - - kref_init(&timeline->kref); - - return timeline; -} - -int i915_timeline_pin(struct i915_timeline *tl) -{ - int err; - - if (tl->pin_count++) - return 0; - GEM_BUG_ON(!tl->pin_count); - - err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH); - if (err) - goto unpin; - - tl->hwsp_offset = - i915_ggtt_offset(tl->hwsp_ggtt) + - offset_in_page(tl->hwsp_offset); - - cacheline_acquire(tl->hwsp_cacheline); - timeline_add_to_active(tl); - - return 0; - -unpin: - tl->pin_count = 0; - return err; -} - -static u32 timeline_advance(struct i915_timeline *tl) -{ - GEM_BUG_ON(!tl->pin_count); - GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); - - return tl->seqno += 1 + tl->has_initial_breadcrumb; -} - -static void timeline_rollback(struct i915_timeline *tl) -{ - tl->seqno -= 1 + tl->has_initial_breadcrumb; -} - -static noinline int -__i915_timeline_get_seqno(struct i915_timeline *tl, - struct i915_request *rq, - u32 *seqno) -{ - struct i915_timeline_cacheline *cl; - unsigned int cacheline; - struct i915_vma *vma; - void *vaddr; - int err; - - /* - * If there is an outstanding GPU reference to this cacheline, - * such as it being sampled by a HW semaphore on another timeline, - * we cannot wraparound our seqno value (the HW semaphore does - * a strict greater-than-or-equals compare, not i915_seqno_passed). - * So if the cacheline is still busy, we must detach ourselves - * from it and leave it inflight alongside its users. - * - * However, if nobody is watching and we can guarantee that nobody - * will, we could simply reuse the same cacheline. - * - * if (i915_active_request_is_signaled(&tl->last_request) && - * i915_active_is_signaled(&tl->hwsp_cacheline->active)) - * return 0; - * - * That seems unlikely for a busy timeline that needed to wrap in - * the first place, so just replace the cacheline. - */ - - vma = hwsp_alloc(tl, &cacheline); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_rollback; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); - if (err) { - __idle_hwsp_free(vma->private, cacheline); - goto err_rollback; - } - - cl = cacheline_alloc(vma->private, cacheline); - if (IS_ERR(cl)) { - err = PTR_ERR(cl); - __idle_hwsp_free(vma->private, cacheline); - goto err_unpin; - } - GEM_BUG_ON(cl->hwsp->vma != vma); - - /* - * Attach the old cacheline to the current request, so that we only - * free it after the current request is retired, which ensures that - * all writes into the cacheline from previous requests are complete. - */ - err = i915_active_ref(&tl->hwsp_cacheline->active, - tl->fence_context, rq); - if (err) - goto err_cacheline; - - cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ - cacheline_free(tl->hwsp_cacheline); - - i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ - i915_vma_put(tl->hwsp_ggtt); - - tl->hwsp_ggtt = i915_vma_get(vma); - - vaddr = page_mask_bits(cl->vaddr); - tl->hwsp_offset = cacheline * CACHELINE_BYTES; - tl->hwsp_seqno = - memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); - - tl->hwsp_offset += i915_ggtt_offset(vma); - - cacheline_acquire(cl); - tl->hwsp_cacheline = cl; - - *seqno = timeline_advance(tl); - GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); - return 0; - -err_cacheline: - cacheline_free(cl); -err_unpin: - i915_vma_unpin(vma); -err_rollback: - timeline_rollback(tl); - return err; -} - -int i915_timeline_get_seqno(struct i915_timeline *tl, - struct i915_request *rq, - u32 *seqno) -{ - *seqno = timeline_advance(tl); - - /* Replace the HWSP on wraparound for HW semaphores */ - if (unlikely(!*seqno && tl->hwsp_cacheline)) - return __i915_timeline_get_seqno(tl, rq, seqno); - - return 0; -} - -static int cacheline_ref(struct i915_timeline_cacheline *cl, - struct i915_request *rq) -{ - return i915_active_ref(&cl->active, rq->fence.context, rq); -} - -int i915_timeline_read_hwsp(struct i915_request *from, - struct i915_request *to, - u32 *hwsp) -{ - struct i915_timeline_cacheline *cl = from->hwsp_cacheline; - struct i915_timeline *tl = from->timeline; - int err; - - GEM_BUG_ON(to->timeline == tl); - - mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); - err = i915_request_completed(from); - if (!err) - err = cacheline_ref(cl, to); - if (!err) { - if (likely(cl == tl->hwsp_cacheline)) { - *hwsp = tl->hwsp_offset; - } else { /* across a seqno wrap, recover the original offset */ - *hwsp = i915_ggtt_offset(cl->hwsp->vma) + - ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * - CACHELINE_BYTES; - } - } - mutex_unlock(&tl->mutex); - - return err; -} - -void i915_timeline_unpin(struct i915_timeline *tl) -{ - GEM_BUG_ON(!tl->pin_count); - if (--tl->pin_count) - return; - - timeline_remove_from_active(tl); - cacheline_release(tl->hwsp_cacheline); - - /* - * Since this timeline is idle, all bariers upon which we were waiting - * must also be complete and so we can discard the last used barriers - * without loss of information. - */ - i915_syncmap_free(&tl->sync); - - __i915_vma_unpin(tl->hwsp_ggtt); -} - -void __i915_timeline_free(struct kref *kref) -{ - struct i915_timeline *timeline = - container_of(kref, typeof(*timeline), kref); - - i915_timeline_fini(timeline); - kfree(timeline); -} - -static void timelines_fini(struct intel_gt *gt) -{ - struct i915_gt_timelines *timelines = >->timelines; - - GEM_BUG_ON(!list_empty(&timelines->active_list)); - GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); - - mutex_destroy(&timelines->mutex); -} - -void i915_timelines_fini(struct drm_i915_private *i915) -{ - timelines_fini(&i915->gt); -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/mock_timeline.c" -#include "selftests/i915_timeline.c" -#endif diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h deleted file mode 100644 index a454d49f229f..000000000000 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef I915_TIMELINE_H -#define I915_TIMELINE_H - -#include - -#include "i915_active.h" -#include "i915_syncmap.h" -#include "i915_timeline_types.h" - -int i915_timeline_init(struct i915_timeline *tl, - struct intel_gt *gt, - struct i915_vma *hwsp); -void i915_timeline_fini(struct i915_timeline *tl); - -struct i915_timeline * -i915_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp); - -static inline struct i915_timeline * -i915_timeline_get(struct i915_timeline *timeline) -{ - kref_get(&timeline->kref); - return timeline; -} - -void __i915_timeline_free(struct kref *kref); -static inline void i915_timeline_put(struct i915_timeline *timeline) -{ - kref_put(&timeline->kref, __i915_timeline_free); -} - -static inline int __i915_timeline_sync_set(struct i915_timeline *tl, - u64 context, u32 seqno) -{ - return i915_syncmap_set(&tl->sync, context, seqno); -} - -static inline int i915_timeline_sync_set(struct i915_timeline *tl, - const struct dma_fence *fence) -{ - return __i915_timeline_sync_set(tl, fence->context, fence->seqno); -} - -static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl, - u64 context, u32 seqno) -{ - return i915_syncmap_is_later(&tl->sync, context, seqno); -} - -static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl, - const struct dma_fence *fence) -{ - return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno); -} - -int i915_timeline_pin(struct i915_timeline *tl); -int i915_timeline_get_seqno(struct i915_timeline *tl, - struct i915_request *rq, - u32 *seqno); -void i915_timeline_unpin(struct i915_timeline *tl); - -int i915_timeline_read_hwsp(struct i915_request *from, - struct i915_request *until, - u32 *hwsp_offset); - -void i915_timelines_init(struct drm_i915_private *i915); -void i915_timelines_park(struct drm_i915_private *i915); -void i915_timelines_fini(struct drm_i915_private *i915); - -#endif diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h deleted file mode 100644 index 931585e12d41..000000000000 --- a/drivers/gpu/drm/i915/i915_timeline_types.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2016 Intel Corporation - */ - -#ifndef __I915_TIMELINE_TYPES_H__ -#define __I915_TIMELINE_TYPES_H__ - -#include -#include -#include -#include - -#include "i915_active_types.h" - -struct drm_i915_private; -struct i915_vma; -struct i915_timeline_cacheline; -struct i915_syncmap; - -struct i915_timeline { - u64 fence_context; - u32 seqno; - - struct mutex mutex; /* protects the flow of requests */ - - unsigned int pin_count; - const u32 *hwsp_seqno; - struct i915_vma *hwsp_ggtt; - u32 hwsp_offset; - - struct i915_timeline_cacheline *hwsp_cacheline; - - bool has_initial_breadcrumb; - - /** - * List of breadcrumbs associated with GPU requests currently - * outstanding. - */ - struct list_head requests; - - /* Contains an RCU guarded pointer to the last request. No reference is - * held to the request, users must carefully acquire a reference to - * the request using i915_active_request_get_request_rcu(), or hold the - * struct_mutex. - */ - struct i915_active_request last_request; - - /** - * We track the most recent seqno that we wait on in every context so - * that we only have to emit a new await and dependency on a more - * recent sync point. As the contexts may be executed out-of-order, we - * have to track each individually and can not rely on an absolute - * global_seqno. When we know that all tracked fences are completed - * (i.e. when the driver is idle), we know that the syncmap is - * redundant and we can discard it without loss of generality. - */ - struct i915_syncmap *sync; - - struct list_head link; - struct intel_gt *gt; - - struct kref kref; -}; - -#endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index d5dc4427d664..2b31a4ee0b4c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -12,7 +12,7 @@ selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */ selftest(uncore, intel_uncore_live_selftests) selftest(workarounds, intel_workarounds_live_selftests) -selftest(timelines, i915_timeline_live_selftests) +selftest(timelines, intel_timeline_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index 510eb176bb2c..b55da4d9ccba 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -15,7 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests) selftest(syncmap, i915_syncmap_mock_selftests) selftest(uncore, intel_uncore_mock_selftests) selftest(engine, intel_engine_cs_mock_selftests) -selftest(timelines, i915_timeline_mock_selftests) +selftest(timelines, intel_timeline_mock_selftests) selftest(requests, i915_request_mock_selftests) selftest(objects, i915_gem_object_mock_selftests) selftest(phys, i915_gem_phys_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c deleted file mode 100644 index 44d031446f08..000000000000 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ /dev/null @@ -1,845 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2017-2018 Intel Corporation - */ - -#include - -#include "gem/i915_gem_pm.h" - -#include "i915_random.h" -#include "i915_selftest.h" - -#include "igt_flush_test.h" -#include "mock_gem_device.h" -#include "mock_timeline.h" - -static struct page *hwsp_page(struct i915_timeline *tl) -{ - struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj; - - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - return sg_page(obj->mm.pages->sgl); -} - -static unsigned long hwsp_cacheline(struct i915_timeline *tl) -{ - unsigned long address = (unsigned long)page_address(hwsp_page(tl)); - - return (address + tl->hwsp_offset) / CACHELINE_BYTES; -} - -#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) - -struct mock_hwsp_freelist { - struct drm_i915_private *i915; - struct radix_tree_root cachelines; - struct i915_timeline **history; - unsigned long count, max; - struct rnd_state prng; -}; - -enum { - SHUFFLE = BIT(0), -}; - -static void __mock_hwsp_record(struct mock_hwsp_freelist *state, - unsigned int idx, - struct i915_timeline *tl) -{ - tl = xchg(&state->history[idx], tl); - if (tl) { - radix_tree_delete(&state->cachelines, hwsp_cacheline(tl)); - i915_timeline_put(tl); - } -} - -static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, - unsigned int count, - unsigned int flags) -{ - struct i915_timeline *tl; - unsigned int idx; - - while (count--) { - unsigned long cacheline; - int err; - - tl = i915_timeline_create(&state->i915->gt, NULL); - if (IS_ERR(tl)) - return PTR_ERR(tl); - - cacheline = hwsp_cacheline(tl); - err = radix_tree_insert(&state->cachelines, cacheline, tl); - if (err) { - if (err == -EEXIST) { - pr_err("HWSP cacheline %lu already used; duplicate allocation!\n", - cacheline); - } - i915_timeline_put(tl); - return err; - } - - idx = state->count++ % state->max; - __mock_hwsp_record(state, idx, tl); - } - - if (flags & SHUFFLE) - i915_prandom_shuffle(state->history, - sizeof(*state->history), - min(state->count, state->max), - &state->prng); - - count = i915_prandom_u32_max_state(min(state->count, state->max), - &state->prng); - while (count--) { - idx = --state->count % state->max; - __mock_hwsp_record(state, idx, NULL); - } - - return 0; -} - -static int mock_hwsp_freelist(void *arg) -{ - struct mock_hwsp_freelist state; - const struct { - const char *name; - unsigned int flags; - } phases[] = { - { "linear", 0 }, - { "shuffled", SHUFFLE }, - { }, - }, *p; - unsigned int na; - int err = 0; - - INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); - state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); - - state.i915 = mock_gem_device(); - if (!state.i915) - return -ENOMEM; - - /* - * Create a bunch of timelines and check that their HWSP do not overlap. - * Free some, and try again. - */ - - state.max = PAGE_SIZE / sizeof(*state.history); - state.count = 0; - state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL); - if (!state.history) { - err = -ENOMEM; - goto err_put; - } - - mutex_lock(&state.i915->drm.struct_mutex); - for (p = phases; p->name; p++) { - pr_debug("%s(%s)\n", __func__, p->name); - for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) { - err = __mock_hwsp_timeline(&state, na, p->flags); - if (err) - goto out; - } - } - -out: - for (na = 0; na < state.max; na++) - __mock_hwsp_record(&state, na, NULL); - mutex_unlock(&state.i915->drm.struct_mutex); - kfree(state.history); -err_put: - drm_dev_put(&state.i915->drm); - return err; -} - -struct __igt_sync { - const char *name; - u32 seqno; - bool expected; - bool set; -}; - -static int __igt_sync(struct i915_timeline *tl, - u64 ctx, - const struct __igt_sync *p, - const char *name) -{ - int ret; - - if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { - pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n", - name, p->name, ctx, p->seqno, yesno(p->expected)); - return -EINVAL; - } - - if (p->set) { - ret = __i915_timeline_sync_set(tl, ctx, p->seqno); - if (ret) - return ret; - } - - return 0; -} - -static int igt_sync(void *arg) -{ - const struct __igt_sync pass[] = { - { "unset", 0, false, false }, - { "new", 0, false, true }, - { "0a", 0, true, true }, - { "1a", 1, false, true }, - { "1b", 1, true, true }, - { "0b", 0, true, false }, - { "2a", 2, false, true }, - { "4", 4, false, true }, - { "INT_MAX", INT_MAX, false, true }, - { "INT_MAX-1", INT_MAX-1, true, false }, - { "INT_MAX+1", (u32)INT_MAX+1, false, true }, - { "INT_MAX", INT_MAX, true, false }, - { "UINT_MAX", UINT_MAX, false, true }, - { "wrap", 0, false, true }, - { "unwrap", UINT_MAX, true, false }, - {}, - }, *p; - struct i915_timeline tl; - int order, offset; - int ret = -ENODEV; - - mock_timeline_init(&tl, 0); - for (p = pass; p->name; p++) { - for (order = 1; order < 64; order++) { - for (offset = -1; offset <= (order > 1); offset++) { - u64 ctx = BIT_ULL(order) + offset; - - ret = __igt_sync(&tl, ctx, p, "1"); - if (ret) - goto out; - } - } - } - mock_timeline_fini(&tl); - - mock_timeline_init(&tl, 0); - for (order = 1; order < 64; order++) { - for (offset = -1; offset <= (order > 1); offset++) { - u64 ctx = BIT_ULL(order) + offset; - - for (p = pass; p->name; p++) { - ret = __igt_sync(&tl, ctx, p, "2"); - if (ret) - goto out; - } - } - } - -out: - mock_timeline_fini(&tl); - return ret; -} - -static unsigned int random_engine(struct rnd_state *rnd) -{ - return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd); -} - -static int bench_sync(void *arg) -{ - struct rnd_state prng; - struct i915_timeline tl; - unsigned long end_time, count; - u64 prng32_1M; - ktime_t kt; - int order, last_order; - - mock_timeline_init(&tl, 0); - - /* Lookups from cache are very fast and so the random number generation - * and the loop itself becomes a significant factor in the per-iteration - * timings. We try to compensate the results by measuring the overhead - * of the prng and subtract it from the reported results. - */ - prandom_seed_state(&prng, i915_selftest.random_seed); - count = 0; - kt = ktime_get(); - end_time = jiffies + HZ/10; - do { - u32 x; - - /* Make sure the compiler doesn't optimise away the prng call */ - WRITE_ONCE(x, prandom_u32_state(&prng)); - - count++; - } while (!time_after(jiffies, end_time)); - kt = ktime_sub(ktime_get(), kt); - pr_debug("%s: %lu random evaluations, %lluns/prng\n", - __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); - prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count); - - /* Benchmark (only) setting random context ids */ - prandom_seed_state(&prng, i915_selftest.random_seed); - count = 0; - kt = ktime_get(); - end_time = jiffies + HZ/10; - do { - u64 id = i915_prandom_u64_state(&prng); - - __i915_timeline_sync_set(&tl, id, 0); - count++; - } while (!time_after(jiffies, end_time)); - kt = ktime_sub(ktime_get(), kt); - kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); - pr_info("%s: %lu random insertions, %lluns/insert\n", - __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); - - /* Benchmark looking up the exact same context ids as we just set */ - prandom_seed_state(&prng, i915_selftest.random_seed); - end_time = count; - kt = ktime_get(); - while (end_time--) { - u64 id = i915_prandom_u64_state(&prng); - - if (!__i915_timeline_sync_is_later(&tl, id, 0)) { - mock_timeline_fini(&tl); - pr_err("Lookup of %llu failed\n", id); - return -EINVAL; - } - } - kt = ktime_sub(ktime_get(), kt); - kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); - pr_info("%s: %lu random lookups, %lluns/lookup\n", - __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); - - mock_timeline_fini(&tl); - cond_resched(); - - mock_timeline_init(&tl, 0); - - /* Benchmark setting the first N (in order) contexts */ - count = 0; - kt = ktime_get(); - end_time = jiffies + HZ/10; - do { - __i915_timeline_sync_set(&tl, count++, 0); - } while (!time_after(jiffies, end_time)); - kt = ktime_sub(ktime_get(), kt); - pr_info("%s: %lu in-order insertions, %lluns/insert\n", - __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); - - /* Benchmark looking up the exact same context ids as we just set */ - end_time = count; - kt = ktime_get(); - while (end_time--) { - if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) { - pr_err("Lookup of %lu failed\n", end_time); - mock_timeline_fini(&tl); - return -EINVAL; - } - } - kt = ktime_sub(ktime_get(), kt); - pr_info("%s: %lu in-order lookups, %lluns/lookup\n", - __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); - - mock_timeline_fini(&tl); - cond_resched(); - - mock_timeline_init(&tl, 0); - - /* Benchmark searching for a random context id and maybe changing it */ - prandom_seed_state(&prng, i915_selftest.random_seed); - count = 0; - kt = ktime_get(); - end_time = jiffies + HZ/10; - do { - u32 id = random_engine(&prng); - u32 seqno = prandom_u32_state(&prng); - - if (!__i915_timeline_sync_is_later(&tl, id, seqno)) - __i915_timeline_sync_set(&tl, id, seqno); - - count++; - } while (!time_after(jiffies, end_time)); - kt = ktime_sub(ktime_get(), kt); - kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20); - pr_info("%s: %lu repeated insert/lookups, %lluns/op\n", - __func__, count, (long long)div64_ul(ktime_to_ns(kt), count)); - mock_timeline_fini(&tl); - cond_resched(); - - /* Benchmark searching for a known context id and changing the seqno */ - for (last_order = 1, order = 1; order < 32; - ({ int tmp = last_order; last_order = order; order += tmp; })) { - unsigned int mask = BIT(order) - 1; - - mock_timeline_init(&tl, 0); - - count = 0; - kt = ktime_get(); - end_time = jiffies + HZ/10; - do { - /* Without assuming too many details of the underlying - * implementation, try to identify its phase-changes - * (if any)! - */ - u64 id = (u64)(count & mask) << order; - - __i915_timeline_sync_is_later(&tl, id, 0); - __i915_timeline_sync_set(&tl, id, 0); - - count++; - } while (!time_after(jiffies, end_time)); - kt = ktime_sub(ktime_get(), kt); - pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n", - __func__, count, order, - (long long)div64_ul(ktime_to_ns(kt), count)); - mock_timeline_fini(&tl); - cond_resched(); - } - - return 0; -} - -int i915_timeline_mock_selftests(void) -{ - static const struct i915_subtest tests[] = { - SUBTEST(mock_hwsp_freelist), - SUBTEST(igt_sync), - SUBTEST(bench_sync), - }; - - return i915_subtests(tests, NULL); -} - -static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value) -{ - u32 *cs; - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - if (INTEL_GEN(rq->i915) >= 8) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = addr; - *cs++ = 0; - *cs++ = value; - } else if (INTEL_GEN(rq->i915) >= 4) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = 0; - *cs++ = addr; - *cs++ = value; - } else { - *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cs++ = addr; - *cs++ = value; - *cs++ = MI_NOOP; - } - - intel_ring_advance(rq, cs); - - return 0; -} - -static struct i915_request * -tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value) -{ - struct i915_request *rq; - int err; - - lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */ - - err = i915_timeline_pin(tl); - if (err) { - rq = ERR_PTR(err); - goto out; - } - - rq = i915_request_create(engine->kernel_context); - if (IS_ERR(rq)) - goto out_unpin; - - err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); - i915_request_add(rq); - if (err) - rq = ERR_PTR(err); - -out_unpin: - i915_timeline_unpin(tl); -out: - if (IS_ERR(rq)) - pr_err("Failed to write to timeline!\n"); - return rq; -} - -static struct i915_timeline * -checked_i915_timeline_create(struct drm_i915_private *i915) -{ - struct i915_timeline *tl; - - tl = i915_timeline_create(&i915->gt, NULL); - if (IS_ERR(tl)) - return tl; - - if (*tl->hwsp_seqno != tl->seqno) { - pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n", - *tl->hwsp_seqno, tl->seqno); - i915_timeline_put(tl); - return ERR_PTR(-EINVAL); - } - - return tl; -} - -static int live_hwsp_engine(void *arg) -{ -#define NUM_TIMELINES 4096 - struct drm_i915_private *i915 = arg; - struct i915_timeline **timelines; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - unsigned long count, n; - int err = 0; - - /* - * Create a bunch of timelines and check we can write - * independently to each of their breadcrumb slots. - */ - - timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, - sizeof(*timelines), - GFP_KERNEL); - if (!timelines) - return -ENOMEM; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - count = 0; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) - continue; - - for (n = 0; n < NUM_TIMELINES; n++) { - struct i915_timeline *tl; - struct i915_request *rq; - - tl = checked_i915_timeline_create(i915); - if (IS_ERR(tl)) { - err = PTR_ERR(tl); - goto out; - } - - rq = tl_write(tl, engine, count); - if (IS_ERR(rq)) { - i915_timeline_put(tl); - err = PTR_ERR(rq); - goto out; - } - - timelines[count++] = tl; - } - } - -out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - - for (n = 0; n < count; n++) { - struct i915_timeline *tl = timelines[n]; - - if (!err && *tl->hwsp_seqno != n) { - pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", - n, *tl->hwsp_seqno); - err = -EINVAL; - } - i915_timeline_put(tl); - } - - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - - kvfree(timelines); - - return err; -#undef NUM_TIMELINES -} - -static int live_hwsp_alternate(void *arg) -{ -#define NUM_TIMELINES 4096 - struct drm_i915_private *i915 = arg; - struct i915_timeline **timelines; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - unsigned long count, n; - int err = 0; - - /* - * Create a bunch of timelines and check we can write - * independently to each of their breadcrumb slots with adjacent - * engines. - */ - - timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES, - sizeof(*timelines), - GFP_KERNEL); - if (!timelines) - return -ENOMEM; - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - count = 0; - for (n = 0; n < NUM_TIMELINES; n++) { - for_each_engine(engine, i915, id) { - struct i915_timeline *tl; - struct i915_request *rq; - - if (!intel_engine_can_store_dword(engine)) - continue; - - tl = checked_i915_timeline_create(i915); - if (IS_ERR(tl)) { - err = PTR_ERR(tl); - goto out; - } - - rq = tl_write(tl, engine, count); - if (IS_ERR(rq)) { - i915_timeline_put(tl); - err = PTR_ERR(rq); - goto out; - } - - timelines[count++] = tl; - } - } - -out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - - for (n = 0; n < count; n++) { - struct i915_timeline *tl = timelines[n]; - - if (!err && *tl->hwsp_seqno != n) { - pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", - n, *tl->hwsp_seqno); - err = -EINVAL; - } - i915_timeline_put(tl); - } - - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - - kvfree(timelines); - - return err; -#undef NUM_TIMELINES -} - -static int live_hwsp_wrap(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - struct i915_timeline *tl; - enum intel_engine_id id; - intel_wakeref_t wakeref; - int err = 0; - - /* - * Across a seqno wrap, we need to keep the old cacheline alive for - * foreign GPU references. - */ - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - tl = i915_timeline_create(&i915->gt, NULL); - if (IS_ERR(tl)) { - err = PTR_ERR(tl); - goto out_rpm; - } - if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) - goto out_free; - - err = i915_timeline_pin(tl); - if (err) - goto out_free; - - for_each_engine(engine, i915, id) { - const u32 *hwsp_seqno[2]; - struct i915_request *rq; - u32 seqno[2]; - - if (!intel_engine_can_store_dword(engine)) - continue; - - rq = i915_request_create(engine->kernel_context); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - tl->seqno = -4u; - - err = i915_timeline_get_seqno(tl, rq, &seqno[0]); - if (err) { - i915_request_add(rq); - goto out; - } - pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", - seqno[0], tl->hwsp_offset); - - err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); - if (err) { - i915_request_add(rq); - goto out; - } - hwsp_seqno[0] = tl->hwsp_seqno; - - err = i915_timeline_get_seqno(tl, rq, &seqno[1]); - if (err) { - i915_request_add(rq); - goto out; - } - pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", - seqno[1], tl->hwsp_offset); - - err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); - if (err) { - i915_request_add(rq); - goto out; - } - hwsp_seqno[1] = tl->hwsp_seqno; - - /* With wrap should come a new hwsp */ - GEM_BUG_ON(seqno[1] >= seqno[0]); - GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]); - - i915_request_add(rq); - - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - pr_err("Wait for timeline writes timed out!\n"); - err = -EIO; - goto out; - } - - if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { - pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", - *hwsp_seqno[0], *hwsp_seqno[1], - seqno[0], seqno[1]); - err = -EINVAL; - goto out; - } - - i915_retire_requests(i915); /* recycle HWSP */ - } - -out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - - i915_timeline_unpin(tl); -out_free: - i915_timeline_put(tl); -out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} - -static int live_hwsp_recycle(void *arg) -{ - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - unsigned long count; - int err = 0; - - /* - * Check seqno writes into one timeline at a time. We expect to - * recycle the breadcrumb slot between iterations and neither - * want to confuse ourselves or the GPU. - */ - - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - count = 0; - for_each_engine(engine, i915, id) { - IGT_TIMEOUT(end_time); - - if (!intel_engine_can_store_dword(engine)) - continue; - - do { - struct i915_timeline *tl; - struct i915_request *rq; - - tl = checked_i915_timeline_create(i915); - if (IS_ERR(tl)) { - err = PTR_ERR(tl); - goto out; - } - - rq = tl_write(tl, engine, count); - if (IS_ERR(rq)) { - i915_timeline_put(tl); - err = PTR_ERR(rq); - goto out; - } - - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - pr_err("Wait for timeline writes timed out!\n"); - i915_timeline_put(tl); - err = -EIO; - goto out; - } - - if (*tl->hwsp_seqno != count) { - pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", - count, *tl->hwsp_seqno); - err = -EINVAL; - } - - i915_timeline_put(tl); - count++; - - if (err) - goto out; - - i915_timelines_park(i915); /* Encourage recycling! */ - } while (!__igt_timeout(end_time, NULL)); - } - -out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - - return err; -} - -int i915_timeline_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(live_hwsp_recycle), - SUBTEST(live_hwsp_engine), - SUBTEST(live_hwsp_alternate), - SUBTEST(live_hwsp_wrap), - }; - - if (i915_terminally_wedged(i915)) - return 0; - - return i915_subtests(tests, i915); -} diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 021ba42a3a00..2741805b56c2 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -68,7 +68,7 @@ static void mock_device_release(struct drm_device *dev) i915_gem_contexts_fini(i915); mutex_unlock(&i915->drm.struct_mutex); - i915_timelines_fini(i915); + intel_timelines_fini(i915); drain_workqueue(i915->wq); i915_gem_drain_freed_objects(i915); @@ -199,7 +199,7 @@ struct drm_i915_private *mock_gem_device(void) i915->gt.awake = true; - i915_timelines_init(i915); + intel_timelines_init(i915); mutex_lock(&i915->drm.struct_mutex); @@ -230,7 +230,7 @@ err_engine: mock_engine_free(i915->engine[RCS0]); err_unlock: mutex_unlock(&i915->drm.struct_mutex); - i915_timelines_fini(i915); + intel_timelines_fini(i915); destroy_workqueue(i915->wq); err_drv: drm_mode_config_cleanup(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c deleted file mode 100644 index c80ac0fbdd3b..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ /dev/null @@ -1,29 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2017-2018 Intel Corporation - */ - -#include "../i915_timeline.h" - -#include "mock_timeline.h" - -void mock_timeline_init(struct i915_timeline *timeline, u64 context) -{ - timeline->gt = NULL; - timeline->fence_context = context; - - mutex_init(&timeline->mutex); - - INIT_ACTIVE_REQUEST(&timeline->last_request); - INIT_LIST_HEAD(&timeline->requests); - - i915_syncmap_init(&timeline->sync); - - INIT_LIST_HEAD(&timeline->link); -} - -void mock_timeline_fini(struct i915_timeline *timeline) -{ - i915_syncmap_free(&timeline->sync); -} diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h deleted file mode 100644 index b6deaa61110d..000000000000 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2017-2018 Intel Corporation - */ - -#ifndef __MOCK_TIMELINE__ -#define __MOCK_TIMELINE__ - -struct i915_timeline; - -void mock_timeline_init(struct i915_timeline *timeline, u64 context); -void mock_timeline_fini(struct i915_timeline *timeline); - -#endif /* !__MOCK_TIMELINE__ */ -- cgit v1.2.3 From db56f974941b75b2c96e577100e7abe64b03d9b9 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 21 Jun 2019 08:08:11 +0100 Subject: drm/i915: Eliminate dual personality of i915_scratch_offset Scratch vma lives under gt but the API used to work on i915. Make this consistent by renaming the function to intel_gt_scratch_offset and make it take struct intel_gt. v2: * Move to intel_gt. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-33-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.c | 38 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt.h | 8 +++++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 9 +++---- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 30 ++++++++++++++--------- drivers/gpu/drm/i915/i915_drv.h | 5 ---- drivers/gpu/drm/i915/i915_gem.c | 31 ++---------------------- drivers/gpu/drm/i915/i915_gpu_error.c | 4 ++-- 8 files changed, 75 insertions(+), 52 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index e30212e219ec..4961f74fd902 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -734,7 +734,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) struct measure_breadcrumb *frame; int dw = -ENOMEM; - GEM_BUG_ON(!engine->i915->gt.scratch); + GEM_BUG_ON(!engine->gt->scratch); frame = kzalloc(sizeof(*frame), GFP_KERNEL); if (!frame) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index e22ee3e823fa..8cca6b22b386 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -203,3 +203,41 @@ void intel_gt_chipset_flush(struct intel_gt *gt) if (INTEL_GEN(gt->i915) < 6) intel_gtt_chipset_flush(); } + +int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) +{ + struct drm_i915_private *i915 = gt->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int ret; + + obj = i915_gem_object_create_stolen(i915, size); + if (!obj) + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate scratch page\n"); + return PTR_ERR(obj); + } + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + + ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (ret) + goto err_unref; + + gt->scratch = vma; + return 0; + +err_unref: + i915_gem_object_put(obj); + return ret; +} + +void intel_gt_fini_scratch(struct intel_gt *gt) +{ + i915_vma_unpin_and_release(>->scratch, 0); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 29cd15be6a01..cf3c6cecc8ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -21,4 +21,12 @@ void intel_gt_clear_error_registers(struct intel_gt *gt, void intel_gt_flush_ggtt_writes(struct intel_gt *gt); void intel_gt_chipset_flush(struct intel_gt *gt); +int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size); +void intel_gt_fini_scratch(struct intel_gt *gt); + +static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt) +{ + return i915_ggtt_offset(gt->scratch); +} + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 3abcec3e4e0e..b3e0e25c5d80 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -135,6 +135,7 @@ #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_render_state.h" #include "i915_vgpu.h" @@ -1756,7 +1757,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) /* NB no one else is allowed to scribble over scratch + 256! */ *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = i915_scratch_offset(engine->i915) + 256; + *batch++ = intel_gt_scratch_offset(engine->gt) + 256; *batch++ = 0; *batch++ = MI_LOAD_REGISTER_IMM(1); @@ -1770,7 +1771,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = i915_scratch_offset(engine->i915) + 256; + *batch++ = intel_gt_scratch_offset(engine->gt) + 256; *batch++ = 0; return batch; @@ -1807,7 +1808,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE, - i915_scratch_offset(engine->i915) + + intel_gt_scratch_offset(engine->gt) + 2 * CACHELINE_BYTES); *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -2501,7 +2502,7 @@ static int gen8_emit_flush_render(struct i915_request *request, { struct intel_engine_cs *engine = request->engine; u32 scratch_addr = - i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES; + intel_gt_scratch_offset(engine->gt) + 2 * CACHELINE_BYTES; bool vf_flush_wa = false, dc_flush_wa = false; u32 *cs, flags = 0; int len; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index aa483bba04bf..d65b8cba1a8f 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -33,6 +33,8 @@ #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" + #include "i915_drv.h" #include "i915_gem_render_state.h" #include "i915_trace.h" @@ -75,7 +77,7 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode) *cs++ = cmd; while (num_store_dw--) { *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cs++ = i915_scratch_offset(rq->i915); + *cs++ = intel_gt_scratch_offset(rq->engine->gt); *cs++ = 0; } *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; @@ -148,7 +150,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) */ if (mode & EMIT_INVALIDATE) { *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; - *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_gt_scratch_offset(rq->engine->gt) | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; *cs++ = 0; @@ -156,7 +159,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) *cs++ = MI_FLUSH; *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; - *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_gt_scratch_offset(rq->engine->gt) | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; *cs++ = 0; } @@ -208,7 +212,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) static int gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) { - u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES; u32 *cs; cs = intel_ring_begin(rq, 6); @@ -241,7 +246,8 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) static int gen6_render_ring_flush(struct i915_request *rq, u32 mode) { - u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES; u32 *cs, flags = 0; int ret; @@ -299,7 +305,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = GFX_OP_PIPE_CONTROL(4); *cs++ = PIPE_CONTROL_QW_WRITE; - *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_gt_scratch_offset(rq->engine->gt) | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; /* Finally we can flush and with it emit the breadcrumb */ @@ -342,7 +349,8 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq) static int gen7_render_ring_flush(struct i915_request *rq, u32 mode) { - u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES; + u32 scratch_addr = + intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES; u32 *cs, flags = 0; /* @@ -1071,9 +1079,9 @@ i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { - u32 *cs, cs_offset = i915_scratch_offset(rq->i915); + u32 *cs, cs_offset = intel_gt_scratch_offset(rq->engine->gt); - GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE); + GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -1513,7 +1521,7 @@ static int flush_pd_dir(struct i915_request *rq) /* Stall until the page table load is complete */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); - *cs++ = i915_scratch_offset(rq->i915); + *cs++ = intel_gt_scratch_offset(rq->engine->gt); *cs++ = MI_NOOP; intel_ring_advance(rq, cs); @@ -1629,7 +1637,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) /* Insert a delay before the next switch! */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(last_reg); - *cs++ = i915_scratch_offset(rq->i915); + *cs++ = intel_gt_scratch_offset(rq->engine->gt); *cs++ = MI_NOOP; } *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 89a21fa4eac2..4077d15fe429 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2783,11 +2783,6 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) return I915_HWS_CSB_WRITE_INDEX; } -static inline u32 i915_scratch_offset(const struct drm_i915_private *i915) -{ - return i915_ggtt_offset(i915->gt.scratch); -} - static inline enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6e07127242d9..8dff3b8f12c3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1424,39 +1424,12 @@ err_active: static int i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) { - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int ret; - - obj = i915_gem_object_create_stolen(i915, size); - if (!obj) - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate scratch page\n"); - return PTR_ERR(obj); - } - - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_unref; - } - - ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); - if (ret) - goto err_unref; - - i915->gt.scratch = vma; - return 0; - -err_unref: - i915_gem_object_put(obj); - return ret; + return intel_gt_init_scratch(&i915->gt, size); } static void i915_gem_fini_scratch(struct drm_i915_private *i915) { - i915_vma_unpin_and_release(&i915->gt.scratch, 0); + intel_gt_fini_scratch(&i915->gt); } static int intel_engines_verify_workarounds(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 330a25c5db6a..59f5b0265ee3 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1441,8 +1441,8 @@ static void gem_record_rings(struct i915_gpu_state *error) if (HAS_BROKEN_CS_TLB(i915)) ee->wa_batchbuffer = - i915_error_object_create(i915, - i915->gt.scratch); + i915_error_object_create(i915, + engine->gt->scratch); request_record_user_bo(request, ee); ee->ctx = -- cgit v1.2.3 From 5f6730a4689269240e911853403456e243513ae0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 14:52:46 +0100 Subject: drm/i915: Prevent dereference of engine before NULL check in error capture smatch caught, drivers/gpu/drm/i915/i915_gpu_error.c:1418 gem_record_rings() warn: variable dereferenced before check 'engine' (see line 1413) Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190621135246.20683-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gpu_error.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 59f5b0265ee3..5489cd879315 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1410,7 +1410,6 @@ static void gem_record_rings(struct i915_gpu_state *error) for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = i915->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; - struct i915_ggtt *ggtt = engine->gt->ggtt; struct i915_request *request; ee->engine_id = -1; @@ -1428,7 +1427,7 @@ static void gem_record_rings(struct i915_gpu_state *error) struct i915_gem_context *ctx = request->gem_context; struct intel_ring *ring; - ee->vm = ctx->vm ?: &ggtt->vm; + ee->vm = ctx->vm ?: &engine->gt->ggtt->vm; record_context(&ee->context, ctx); -- cgit v1.2.3 From c6fe28b0c27dfc8103af453ed9723907cd96e3f0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 14:16:39 +0100 Subject: drm/i915/gt: Rename i915_gt_timelines Since the anonymous i915_gt became struct intel_gt and encloses struct i915_gt_timelines, rename i915_gt_timelines to intel_gt_timelines to match its parentage. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190621131640.28864-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_types.h | 2 +- drivers/gpu/drm/i915/gt/intel_timeline.c | 16 ++++++++-------- drivers/gpu/drm/i915/i915_gem.c | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index be891492505d..722506b7cec0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -25,7 +25,7 @@ struct intel_gt { struct intel_uncore *uncore; struct i915_ggtt *ggtt; - struct i915_gt_timelines { + struct intel_gt_timelines { struct mutex mutex; /* protects list, tainted by GPU */ struct list_head active_list; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 1a3f04458730..d6accef50f8e 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -17,7 +17,7 @@ struct intel_timeline_hwsp { struct intel_gt *gt; - struct i915_gt_timelines *gt_timelines; + struct intel_gt_timelines *gt_timelines; struct list_head free_link; struct i915_vma *vma; u64 free_bitmap; @@ -53,7 +53,7 @@ static struct i915_vma *__hwsp_alloc(struct intel_gt *gt) static struct i915_vma * hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) { - struct i915_gt_timelines *gt = &timeline->gt->timelines; + struct intel_gt_timelines *gt = &timeline->gt->timelines; struct intel_timeline_hwsp *hwsp; BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE); @@ -102,7 +102,7 @@ hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline) { - struct i915_gt_timelines *gt = hwsp->gt_timelines; + struct intel_gt_timelines *gt = hwsp->gt_timelines; unsigned long flags; spin_lock_irqsave(>->hwsp_lock, flags); @@ -261,7 +261,7 @@ int intel_timeline_init(struct intel_timeline *timeline, static void timelines_init(struct intel_gt *gt) { - struct i915_gt_timelines *timelines = >->timelines; + struct intel_gt_timelines *timelines = >->timelines; mutex_init(&timelines->mutex); INIT_LIST_HEAD(&timelines->active_list); @@ -280,7 +280,7 @@ void intel_timelines_init(struct drm_i915_private *i915) static void timeline_add_to_active(struct intel_timeline *tl) { - struct i915_gt_timelines *gt = &tl->gt->timelines; + struct intel_gt_timelines *gt = &tl->gt->timelines; mutex_lock(>->mutex); list_add(&tl->link, >->active_list); @@ -289,7 +289,7 @@ static void timeline_add_to_active(struct intel_timeline *tl) static void timeline_remove_from_active(struct intel_timeline *tl) { - struct i915_gt_timelines *gt = &tl->gt->timelines; + struct intel_gt_timelines *gt = &tl->gt->timelines; mutex_lock(>->mutex); list_del(&tl->link); @@ -298,7 +298,7 @@ static void timeline_remove_from_active(struct intel_timeline *tl) static void timelines_park(struct intel_gt *gt) { - struct i915_gt_timelines *timelines = >->timelines; + struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *timeline; mutex_lock(&timelines->mutex); @@ -572,7 +572,7 @@ void __intel_timeline_free(struct kref *kref) static void timelines_fini(struct intel_gt *gt) { - struct i915_gt_timelines *timelines = >->timelines; + struct intel_gt_timelines *timelines = >->timelines; GEM_BUG_ON(!list_empty(&timelines->active_list)); GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8dff3b8f12c3..e59be5c05e1b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -908,7 +908,7 @@ static long wait_for_timelines(struct drm_i915_private *i915, unsigned int flags, long timeout) { - struct i915_gt_timelines *gt = &i915->gt.timelines; + struct intel_gt_timelines *gt = &i915->gt.timelines; struct intel_timeline *tl; mutex_lock(>->mutex); -- cgit v1.2.3 From 80fc1c1991cdaefb9d5c9733c6c5b85b92498eee Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 14:16:40 +0100 Subject: drm/i915/gt: Fixup kerneldoc parameters drivers/gpu/drm/i915/gt/intel_mocs.c:513: warning: Function parameter or member 'gt' not described in 'intel_mocs_init_l3cc_table' drivers/gpu/drm/i915/gt/intel_mocs.c:513: warning: Excess function parameter 'dev_priv' description in 'intel_mocs_init_l3cc_table' intel_vgt_balloon/deballoon, i915_ggtt_probe_hw intel_wopcm_init_hw need similar treatment Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190621131640.28864-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_mocs.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- drivers/gpu/drm/i915/i915_vgpu.c | 4 ++-- drivers/gpu/drm/i915/intel_wopcm.c | 1 + 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index d08b8f47269b..ae6cbf0d517c 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -497,7 +497,7 @@ static int emit_mocs_l3cc_table(struct i915_request *rq, /** * intel_mocs_init_l3cc_table() - program the mocs control table - * @dev_priv: i915 device private + * @gt: the intel_gt container * * This function simply programs the mocs registers for the given table * starting at the given address. This register set is programmed in pairs. diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4dbfbccfa618..90f367397656 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2934,7 +2934,7 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) /** * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization - * @dev_priv: i915 device + * @i915: i915 device */ void i915_ggtt_cleanup_hw(struct drm_i915_private *i915) { @@ -3530,7 +3530,7 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt) /** * i915_ggtt_probe_hw - Probe GGTT hardware location - * @dev_priv: i915 device + * @i915: i915 device */ int i915_ggtt_probe_hw(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 9916bc6159b6..dbd1fa3c7d90 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -131,7 +131,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt, /** * intel_vgt_deballoon - deballoon reserved graphics address trunks - * @dev_priv: i915 device private data + * @ggtt: the global GGTT from which we reserved earlier * * This function is called to deallocate the ballooned-out graphic memory, when * driver is unloaded or when ballooning fails. @@ -172,7 +172,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, /** * intel_vgt_balloon - balloon out reserved graphics address trunks - * @dev_priv: i915 device private data + * @ggtt: the global GGTT from which to reserve * * This function is called at the initialization stage, to balloon out the * graphic address space allocated to other vGPUs, by marking these spaces as diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 931987e37241..8c850785e4b4 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -244,6 +244,7 @@ write_and_verify(struct intel_gt *gt, /** * intel_wopcm_init_hw() - Setup GuC WOPCM registers. * @wopcm: pointer to intel_wopcm. + * @gt: pointer to the containing GT * * Setup the GuC WOPCM size and offset registers with the calculated values. It * will verify the register values to make sure the registers are locked with -- cgit v1.2.3 From e29cc1d7e820d662d2d73b35281b70f01a24610b Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Fri, 21 Jun 2019 11:21:22 -0700 Subject: drm/i915/guc: reorder enable/disable communication steps Make sure we always have CT buffers enabled when the interrupts are enabled, so we can always handle interrupts from GuC. Also move the setting of the guc->send and guc->handler functions to the GuC communication control functions for consistency. The reorder also fixes the onion unwinding of intel_uc_init_hw, because guc_enable_communication would've left interrupts enabled when failing to enable CTB. v2: always retunr the result of ctch_enable() in intel_guc_ct_enable() (Michal) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110943 Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Michal Wajdeczko Reviewed-by: Matthew Brost Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621182123.31368-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_guc_ct.c | 22 ++++------------------ drivers/gpu/drm/i915/intel_guc_ct.h | 4 ++++ drivers/gpu/drm/i915/intel_uc.c | 19 ++++++++++++++++--- 3 files changed, 24 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index 3921809f812b..9e383a47609f 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -529,8 +529,8 @@ unlink: /* * Command Transport (CT) buffer based GuC send function. */ -static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size) +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) { struct intel_guc_ct *ct = &guc->ct; struct intel_guc_ct_channel *ctch = &ct->host_channel; @@ -834,7 +834,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct) * When we're communicating with the GuC over CT, GuC uses events * to notify us about new messages being posted on the RECV buffer. */ -static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) { struct intel_guc_ct *ct = &guc->ct; @@ -892,20 +892,11 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) { struct intel_guc *guc = ct_to_guc(ct); struct intel_guc_ct_channel *ctch = &ct->host_channel; - int err; if (ctch->enabled) return 0; - err = ctch_enable(guc, ctch); - if (unlikely(err)) - return err; - - /* Switch into cmd transport buffer based send() */ - guc->send = intel_guc_send_ct; - guc->handler = intel_guc_to_host_event_handler_ct; - DRM_INFO("CT: %s\n", enableddisabled(true)); - return 0; + return ctch_enable(guc, ctch); } /** @@ -921,9 +912,4 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) return; ctch_disable(guc, ctch); - - /* Disable send */ - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; - DRM_INFO("CT: %s\n", enableddisabled(false)); } diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h index 41ba593a4df7..0ec17493d83b 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/intel_guc_ct.h @@ -101,4 +101,8 @@ static inline void intel_guc_ct_stop(struct intel_guc_ct *ct) ct->host_channel.enabled = false; } +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc); + #endif /* _INTEL_GUC_CT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index ae45651ac73c..c7f82c944dd6 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -235,9 +235,20 @@ static void guc_disable_interrupts(struct intel_guc *guc) static int guc_enable_communication(struct intel_guc *guc) { + int ret; + + ret = intel_guc_ct_enable(&guc->ct); + if (ret) + return ret; + + guc->send = intel_guc_send_ct; + guc->handler = intel_guc_to_host_event_handler_ct; + guc_enable_interrupts(guc); - return intel_guc_ct_enable(&guc->ct); + DRM_INFO("GuC communication enabled\n"); + + return 0; } static void guc_stop_communication(struct intel_guc *guc) @@ -250,12 +261,14 @@ static void guc_stop_communication(struct intel_guc *guc) static void guc_disable_communication(struct intel_guc *guc) { - intel_guc_ct_disable(&guc->ct); - guc_disable_interrupts(guc); guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; + + intel_guc_ct_disable(&guc->ct); + + DRM_INFO("GuC communication disabled\n"); } int intel_uc_init_misc(struct drm_i915_private *i915) -- cgit v1.2.3 From 2ae70d28f27f6a5358541845c623760754d63ece Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Fri, 21 Jun 2019 11:21:23 -0700 Subject: drm/i915/guc: handle GuC messages received with CTB disabled There is a very small chance of triggering a log flush event when enabling or disabling CT buffers. Events triggered while CT buffers are disabled are logged in the SCRATCH_15 register using the same bits used in the CT message payload. Since our communication channel with GuC is turned off, we can save the message and handle it after we turn it back on. GuC should be idle and not generate more events in the meantime because we're not talking to it. v2: clear the mmio register on stop_communication as well (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190621182123.31368-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_guc.h | 5 +++ drivers/gpu/drm/i915/intel_uc.c | 74 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 08c906abdfa2..d6a75bc3d7f4 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -88,6 +88,9 @@ struct intel_guc { enum forcewake_domains fw_domains; } send_regs; + /* Store msg (e.g. log flush) that we see while CTBs are disabled */ + u32 mmio_msg; + /* To serialize the intel_guc_send actions */ struct mutex send_mutex; @@ -181,6 +184,8 @@ static inline bool intel_guc_is_loaded(struct intel_guc *guc) static inline int intel_guc_sanitize(struct intel_guc *guc) { intel_uc_fw_sanitize(&guc->fw); + guc->mmio_msg = 0; + return 0; } diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index c7f82c944dd6..fdf00f1ebb57 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -218,6 +218,53 @@ static void guc_free_load_err_log(struct intel_guc *guc) i915_gem_object_put(guc->load_err_log); } +/* + * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 + * register using the same bits used in the CT message payload. Since our + * communication channel with guc is turned off at this point, we can save the + * message and handle it after we turn it back on. + */ +static void guc_clear_mmio_msg(struct intel_guc *guc) +{ + intel_uncore_write(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15), 0); +} + +static void guc_get_mmio_msg(struct intel_guc *guc) +{ + u32 val; + + spin_lock_irq(&guc->irq_lock); + + val = intel_uncore_read(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15)); + guc->mmio_msg |= val & guc->msg_enabled_mask; + + /* + * clear all events, including the ones we're not currently servicing, + * to make sure we don't try to process a stale message if we enable + * handling of more events later. + */ + guc_clear_mmio_msg(guc); + + spin_unlock_irq(&guc->irq_lock); +} + +static void guc_handle_mmio_msg(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + + /* we need communication to be enabled to reply to GuC */ + GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop); + + if (!guc->mmio_msg) + return; + + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); + spin_unlock_irq(&i915->irq_lock); + + guc->mmio_msg = 0; +} + static void guc_reset_interrupts(struct intel_guc *guc) { guc->interrupts.reset(guc_to_i915(guc)); @@ -235,6 +282,7 @@ static void guc_disable_interrupts(struct intel_guc *guc) static int guc_enable_communication(struct intel_guc *guc) { + struct drm_i915_private *i915 = guc_to_i915(guc); int ret; ret = intel_guc_ct_enable(&guc->ct); @@ -244,8 +292,17 @@ static int guc_enable_communication(struct intel_guc *guc) guc->send = intel_guc_send_ct; guc->handler = intel_guc_to_host_event_handler_ct; + /* check for mmio messages received before/during the CT enable */ + guc_get_mmio_msg(guc); + guc_handle_mmio_msg(guc); + guc_enable_interrupts(guc); + /* check for CT messages received before we enabled interrupts */ + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_event_handler_ct(guc); + spin_unlock_irq(&i915->irq_lock); + DRM_INFO("GuC communication enabled\n"); return 0; @@ -257,10 +314,19 @@ static void guc_stop_communication(struct intel_guc *guc) guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; + + guc_clear_mmio_msg(guc); } static void guc_disable_communication(struct intel_guc *guc) { + /* + * Events generated during or after CT disable are logged by guc in + * via mmio. Make sure the register is clear before disabling CT since + * all events we cared about have already been processed via CT. + */ + guc_clear_mmio_msg(guc); + guc_disable_interrupts(guc); guc->send = intel_guc_send_nop; @@ -268,6 +334,14 @@ static void guc_disable_communication(struct intel_guc *guc) intel_guc_ct_disable(&guc->ct); + /* + * Check for messages received during/after the CT disable. We do not + * expect any messages to have arrived via CT between the interrupt + * disable and the CT disable because GuC should've been idle until we + * triggered the CT disable protocol. + */ + guc_get_mmio_msg(guc); + DRM_INFO("GuC communication disabled\n"); } -- cgit v1.2.3 From 9e9539800dd44b1190128d48a116f4660f5d206f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 19:37:57 +0100 Subject: drm/i915: Remove waiting & retiring from shrinker paths i915_gem_wait_for_idle() and i915_retire_requests() introduce a dependency on the timeline->mutex. This is problematic as we want to later perform allocations underneath i915_active.mutex, forming a link between the shrinker, the timeline and active mutexes. Nip this cycle in the bud by removing the acquisition of the timeline mutex (i.e. retiring) from inside the shrinker. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190621183801.23252-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 10 ---------- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 3 +++ drivers/gpu/drm/i915/gt/intel_gt_types.h | 2 +- drivers/gpu/drm/i915/gt/intel_timeline.c | 3 --- 4 files changed, 4 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 3a926a8755c6..1bbc690494c7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -169,7 +169,6 @@ i915_gem_shrink(struct drm_i915_private *i915, */ trace_i915_gem_shrink(i915, target, shrink); - i915_retire_requests(i915); /* * Unbinding of objects will require HW access; Let us not wake the @@ -269,8 +268,6 @@ i915_gem_shrink(struct drm_i915_private *i915, if (shrink & I915_SHRINK_BOUND) intel_runtime_pm_put(&i915->runtime_pm, wakeref); - i915_retire_requests(i915); - shrinker_unlock(i915, unlock); if (nr_scanned) @@ -427,12 +424,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr if (!shrinker_lock(i915, 0, &unlock)) return NOTIFY_DONE; - /* Force everything onto the inactive lists */ - if (i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT)) - goto out; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_BOUND | @@ -455,7 +446,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr } mutex_unlock(&i915->ggtt.vm.mutex); -out: shrinker_unlock(i915, unlock); *(unsigned long *)ptr += freed_pages; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 167c4a57c4cd..53c81b5dfd69 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -427,6 +427,9 @@ create_test_object(struct i915_gem_context *ctx, u64 size; int err; + /* Keep in GEM's good graces */ + i915_retire_requests(ctx->i915); + size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 722506b7cec0..c03e56628ee2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -26,7 +26,7 @@ struct intel_gt { struct i915_ggtt *ggtt; struct intel_gt_timelines { - struct mutex mutex; /* protects list, tainted by GPU */ + struct mutex mutex; /* protects list */ struct list_head active_list; /* Pack multiple timelines' seqnos into the same page */ diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index d6accef50f8e..44273b7c96f8 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -268,9 +268,6 @@ static void timelines_init(struct intel_gt *gt) spin_lock_init(&timelines->hwsp_lock); INIT_LIST_HEAD(&timelines->hwsp_free_list); - - /* via i915_gem_wait_for_idle() */ - i915_gem_shrinker_taints_mutex(gt->i915, &timelines->mutex); } void intel_timelines_init(struct drm_i915_private *i915) -- cgit v1.2.3 From 5361db1a33c7e2d58af7df045d4d3ddd4c87ab56 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 19:37:58 +0100 Subject: drm/i915: Track i915_active using debugobjects Provide runtime asserts and tracking of i915_active via debugobjects. For example, this should allow us to check that the i915_active is only active when we expect it to be and is never freed too early. One consequence is that, for simplicity, we no longer allow i915_active to be on-stack which only affected the selftests. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190621183801.23252-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_active.c | 66 ++++++++++++++++++++++- drivers/gpu/drm/i915/selftests/i915_active.c | 78 +++++++++++++++++++++------- 2 files changed, 123 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 293e5bcc4b6c..eb91a625c71f 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -4,6 +4,8 @@ * Copyright © 2019 Intel Corporation */ +#include + #include "gt/intel_engine_pm.h" #include "i915_drv.h" @@ -31,6 +33,55 @@ struct active_node { u64 timeline; }; +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) + +static void *active_debug_hint(void *addr) +{ + struct i915_active *ref = addr; + + return (void *)ref->retire ?: (void *)ref; +} + +static struct debug_obj_descr active_debug_desc = { + .name = "i915_active", + .debug_hint = active_debug_hint, +}; + +static void debug_active_init(struct i915_active *ref) +{ + debug_object_init(ref, &active_debug_desc); +} + +static void debug_active_activate(struct i915_active *ref) +{ + debug_object_activate(ref, &active_debug_desc); +} + +static void debug_active_deactivate(struct i915_active *ref) +{ + debug_object_deactivate(ref, &active_debug_desc); +} + +static void debug_active_fini(struct i915_active *ref) +{ + debug_object_free(ref, &active_debug_desc); +} + +static void debug_active_assert(struct i915_active *ref) +{ + debug_object_assert_init(ref, &active_debug_desc); +} + +#else + +static inline void debug_active_init(struct i915_active *ref) { } +static inline void debug_active_activate(struct i915_active *ref) { } +static inline void debug_active_deactivate(struct i915_active *ref) { } +static inline void debug_active_fini(struct i915_active *ref) { } +static inline void debug_active_assert(struct i915_active *ref) { } + +#endif + static void __active_park(struct i915_active *ref) { @@ -50,6 +101,8 @@ __active_retire(struct i915_active *ref) if (--ref->count) return; + debug_active_deactivate(ref); + /* return the unused nodes to our slabcache */ __active_park(ref); @@ -155,6 +208,8 @@ void i915_active_init(struct drm_i915_private *i915, struct i915_active *ref, void (*retire)(struct i915_active *ref)) { + debug_active_init(ref); + ref->i915 = i915; ref->retire = retire; ref->tree = RB_ROOT; @@ -191,13 +246,21 @@ out: bool i915_active_acquire(struct i915_active *ref) { + debug_active_assert(ref); lockdep_assert_held(BKL(ref)); - return !ref->count++; + + if (ref->count++) + return false; + + debug_active_activate(ref); + return true; } void i915_active_release(struct i915_active *ref) { + debug_active_assert(ref); lockdep_assert_held(BKL(ref)); + __active_retire(ref); } @@ -260,6 +323,7 @@ out: #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) void i915_active_fini(struct i915_active *ref) { + debug_active_fini(ref); GEM_BUG_ON(i915_active_request_isset(&ref->last)); GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); GEM_BUG_ON(ref->count); diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index c0b3537a5fa6..98493bcc91f2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -16,28 +16,51 @@ struct live_active { bool retired; }; -static void __live_active_retire(struct i915_active *base) +static void __live_free(struct live_active *active) +{ + i915_active_fini(&active->base); + kfree(active); +} + +static void __live_retire(struct i915_active *base) { struct live_active *active = container_of(base, typeof(*active), base); active->retired = true; } -static int __live_active_setup(struct drm_i915_private *i915, - struct live_active *active) +static struct live_active *__live_alloc(struct drm_i915_private *i915) +{ + struct live_active *active; + + active = kzalloc(sizeof(*active), GFP_KERNEL); + if (!active) + return NULL; + + i915_active_init(i915, &active->base, __live_retire); + + return active; +} + +static struct live_active * +__live_active_setup(struct drm_i915_private *i915) { struct intel_engine_cs *engine; struct i915_sw_fence *submit; + struct live_active *active; enum intel_engine_id id; unsigned int count = 0; int err = 0; - submit = heap_fence_create(GFP_KERNEL); - if (!submit) - return -ENOMEM; + active = __live_alloc(i915); + if (!active) + return ERR_PTR(-ENOMEM); - i915_active_init(i915, &active->base, __live_active_retire); - active->retired = false; + submit = heap_fence_create(GFP_KERNEL); + if (!submit) { + kfree(active); + return ERR_PTR(-ENOMEM); + } if (!i915_active_acquire(&active->base)) { pr_err("First i915_active_acquire should report being idle\n"); @@ -84,64 +107,79 @@ out: i915_sw_fence_commit(submit); heap_fence_put(submit); - return err; + /* XXX leaks live_active on error */ + return err ? ERR_PTR(err) : active; } static int live_active_wait(void *arg) { struct drm_i915_private *i915 = arg; - struct live_active active; + struct live_active *active; intel_wakeref_t wakeref; - int err; + int err = 0; /* Check that we get a callback when requests retire upon waiting */ mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - err = __live_active_setup(i915, &active); + active = __live_active_setup(i915); + if (IS_ERR(active)) { + err = PTR_ERR(active); + goto err; + } - i915_active_wait(&active.base); - if (!active.retired) { + i915_active_wait(&active->base); + if (!active->retired) { pr_err("i915_active not retired after waiting!\n"); err = -EINVAL; } - i915_active_fini(&active.base); + __live_free(active); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; +err: intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); + return err; } static int live_active_retire(void *arg) { struct drm_i915_private *i915 = arg; - struct live_active active; + struct live_active *active; intel_wakeref_t wakeref; - int err; + int err = 0; /* Check that we get a callback when requests are indirectly retired */ mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - err = __live_active_setup(i915, &active); + active = __live_active_setup(i915); + if (IS_ERR(active)) { + err = PTR_ERR(active); + goto err; + } /* waits for & retires all requests */ if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - if (!active.retired) { + if (!active->retired) { pr_err("i915_active not retired after flushing!\n"); err = -EINVAL; } - i915_active_fini(&active.base); + __live_free(active); + +err: intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); + return err; } -- cgit v1.2.3 From a93615f900bd19b59e74e04f7d8d4663ee5ea68f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 19:37:59 +0100 Subject: drm/i915: Throw away the active object retirement complexity Remove the accumulated optimisations that we have for i915_vma_retire and reduce it to the bare essential of tracking the active object reference. This allows us to only use atomic operations, and so will be able to avoid the struct_mutex requirement. The principal loss here is the shrinker MRU bumping, so now if we have to shrink, we will do so in much more random order and more likely to try and shrink recently used objects. That is a nuisance, but shrinking active objects is a second step we try to avoid and will always be a system-wide performance issue. The other loss is here is in the automatic pruning of the reservation_object when idling. This is not as large an issue as upon reservation_object introduction as now adding new fences into the object replaces already signaled fences, keeping the array compact. But we do lose the auto-expiration of stale fences and unused arrays. That may be a noticeable problem for which we need to re-implement autopruning. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190621183801.23252-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 - drivers/gpu/drm/i915/gem/i915_gem_object.h | 6 --- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 1 - drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 5 +- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 9 ---- drivers/gpu/drm/i915/gt/intel_lrc.c | 4 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 1 - drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 34 ++++++-------- drivers/gpu/drm/i915/i915_debugfs.c | 8 +--- drivers/gpu/drm/i915/i915_gem_batch_pool.c | 42 +++++++---------- drivers/gpu/drm/i915/i915_vma.c | 54 +++++----------------- 11 files changed, 47 insertions(+), 118 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 87275f9883ac..43194fbcbc2e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -160,7 +160,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, mutex_lock(&i915->drm.struct_mutex); - GEM_BUG_ON(i915_gem_object_is_active(obj)); list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { GEM_BUG_ON(i915_vma_is_active(vma)); vma->flags &= ~I915_VMA_PIN_MASK; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index dfebd5706f16..20754c15412a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -158,12 +158,6 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL; } -static inline bool -i915_gem_object_is_active(const struct drm_i915_gem_object *obj) -{ - return READ_ONCE(obj->active_count); -} - static inline bool i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 18bf4f8d6d80..34b51fad02de 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -154,7 +154,6 @@ struct drm_i915_gem_object { /** Count of VMA actually bound by this object */ atomic_t bind_count; - unsigned int active_count; /** Count of how many global VMA are currently pinned for use by HW */ unsigned int pin_global; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 1bbc690494c7..d99f1a600b96 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -229,8 +229,9 @@ i915_gem_shrink(struct drm_i915_private *i915, continue; if (!(shrink & I915_SHRINK_ACTIVE) && - (i915_gem_object_is_active(obj) || - i915_gem_object_is_framebuffer(obj))) + (i915_gem_object_is_framebuffer(obj) || + !reservation_object_test_signaled_rcu(obj->base.resv, + true))) continue; if (!(shrink & I915_SHRINK_BOUND) && diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 2812f7fa27fe..24a3c677ccd5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -475,15 +475,6 @@ static int igt_mmap_offset_exhaustion(void *arg) pr_err("[loop %d] Failed to busy the object\n", loop); goto err_obj; } - - /* NB we rely on the _active_ reference to access obj now */ - GEM_BUG_ON(!i915_gem_object_is_active(obj)); - err = create_mmap_offset(obj); - if (err) { - pr_err("[loop %d] create_mmap_offset failed with err=%d\n", - loop, err); - goto out; - } } out: diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b3e0e25c5d80..ad7638da785d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1486,9 +1486,7 @@ static void execlists_submit_request(struct i915_request *request) static void __execlists_context_fini(struct intel_context *ce) { intel_ring_put(ce->ring); - - GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); - i915_gem_object_put(ce->state->obj); + i915_vma_put(ce->state); } static void execlists_context_destroy(struct kref *kref) diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index d65b8cba1a8f..c9337e4b5ee0 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1327,7 +1327,6 @@ void intel_ring_free(struct kref *ref) static void __ring_context_fini(struct intel_context *ce) { - GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); i915_gem_object_put(ce->state->obj); } diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 0dc3896e49f5..3ceb397c8645 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -131,35 +131,29 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct drm_i915_private *i915 = h->i915; struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; + struct drm_i915_gem_object *obj; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; unsigned int flags; + void *vaddr; u32 *batch; int err; - h->gt = engine->gt; - - if (i915_gem_object_is_active(h->obj)) { - struct drm_i915_gem_object *obj; - void *vaddr; + obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vaddr = i915_gem_object_pin_map(obj, - i915_coherent_map_type(i915)); - if (IS_ERR(vaddr)) { - i915_gem_object_put(obj); - return ERR_CAST(vaddr); - } + vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(i915)); + if (IS_ERR(vaddr)) { + i915_gem_object_put(obj); + return ERR_CAST(vaddr); + } - i915_gem_object_unpin_map(h->obj); - i915_gem_object_put(h->obj); + i915_gem_object_unpin_map(h->obj); + i915_gem_object_put(h->obj); - h->obj = obj; - h->batch = vaddr; - } + h->obj = obj; + h->batch = vaddr; vma = i915_vma_instance(h->obj, vm, NULL); if (IS_ERR(vma)) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 62cf34db9280..eeecdad0e3ca 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -75,11 +75,6 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static char get_active_flag(struct drm_i915_gem_object *obj) -{ - return i915_gem_object_is_active(obj) ? '*' : ' '; -} - static char get_pin_flag(struct drm_i915_gem_object *obj) { return obj->pin_global ? 'p' : ' '; @@ -144,9 +139,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) unsigned int frontbuffer_bits; int pin_count = 0; - seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", + seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s", &obj->base, - get_active_flag(obj), get_pin_flag(obj), get_tiling_flag(obj), get_global_flag(obj), diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c index 25a3e4d09a2f..b17f23991253 100644 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c @@ -94,34 +94,26 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, list = &pool->cache_list[n]; list_for_each_entry(obj, list, batch_pool_link) { + struct reservation_object *resv = obj->base.resv; + /* The batches are strictly LRU ordered */ - if (i915_gem_object_is_active(obj)) { - struct reservation_object *resv = obj->base.resv; - - if (!reservation_object_test_signaled_rcu(resv, true)) - break; - - i915_retire_requests(pool->engine->i915); - GEM_BUG_ON(i915_gem_object_is_active(obj)); - - /* - * The object is now idle, clear the array of shared - * fences before we add a new request. Although, we - * remain on the same engine, we may be on a different - * timeline and so may continually grow the array, - * trapping a reference to all the old fences, rather - * than replace the existing fence. - */ - if (rcu_access_pointer(resv->fence)) { - reservation_object_lock(resv, NULL); - reservation_object_add_excl_fence(resv, NULL); - reservation_object_unlock(resv); - } + if (!reservation_object_test_signaled_rcu(resv, true)) + break; + + /* + * The object is now idle, clear the array of shared + * fences before we add a new request. Although, we + * remain on the same engine, we may be on a different + * timeline and so may continually grow the array, + * trapping a reference to all the old fences, rather + * than replace the existing fence. + */ + if (rcu_access_pointer(resv->fence)) { + reservation_object_lock(resv, NULL); + reservation_object_add_excl_fence(resv, NULL); + reservation_object_unlock(resv); } - GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv, - true)); - if (obj->base.size >= size) goto found; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 503f1180af12..c13b86e6ef1f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -78,43 +78,11 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) #endif -static void obj_bump_mru(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - list_move_tail(&obj->mm.link, &i915->mm.shrink_list); - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - - obj->mm.dirty = true; /* be paranoid */ -} - static void __i915_vma_retire(struct i915_active *ref) { struct i915_vma *vma = container_of(ref, typeof(*vma), active); - struct drm_i915_gem_object *obj = vma->obj; - - GEM_BUG_ON(!i915_gem_object_is_active(obj)); - if (--obj->active_count) - return; - - /* Prune the shared fence arrays iff completely idle (inc. external) */ - if (reservation_object_trylock(obj->base.resv)) { - if (reservation_object_test_signaled_rcu(obj->base.resv, true)) - reservation_object_add_excl_fence(obj->base.resv, NULL); - reservation_object_unlock(obj->base.resv); - } - /* - * Bump our place on the bound list to keep it roughly in LRU order - * so that we don't steal from recently used but inactive objects - * (unless we are forced to ofc!) - */ - if (i915_gem_object_is_shrinkable(obj)) - obj_bump_mru(obj); - - i915_gem_object_put(obj); /* and drop the active reference */ + i915_vma_put(vma); } static struct i915_vma * @@ -922,6 +890,7 @@ int i915_vma_move_to_active(struct i915_vma *vma, unsigned int flags) { struct drm_i915_gem_object *obj = vma->obj; + int err; assert_vma_held(vma); assert_object_held(obj); @@ -935,17 +904,13 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - if (!vma->active.count && !obj->active_count++) - i915_gem_object_get(obj); /* once more for the active ref */ - - if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) { - if (!vma->active.count && !--obj->active_count) - i915_gem_object_put(obj); - return -ENOMEM; - } + if (i915_active_acquire(&vma->active)) + i915_vma_get(vma); - GEM_BUG_ON(!i915_vma_is_active(vma)); - GEM_BUG_ON(!obj->active_count); + err = i915_active_ref(&vma->active, rq->fence.context, rq); + i915_active_release(&vma->active); + if (unlikely(err)) + return err; obj->write_domain = 0; if (flags & EXEC_OBJECT_WRITE) { @@ -957,11 +922,14 @@ int i915_vma_move_to_active(struct i915_vma *vma, obj->read_domains = 0; } obj->read_domains |= I915_GEM_GPU_DOMAINS; + obj->mm.dirty = true; if (flags & EXEC_OBJECT_NEEDS_FENCE) __i915_active_request_set(&vma->last_fence, rq); export_fence(vma, rq, flags); + + GEM_BUG_ON(!i915_vma_is_active(vma)); return 0; } -- cgit v1.2.3 From 12c255b5dad115e87f81ea45708b5f82b9a55253 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 19:38:00 +0100 Subject: drm/i915: Provide an i915_active.acquire callback If we introduce a callback for i915_active that is only called the first time we use the i915_active and is symmetrically paired with the i915_active.retire callback, we can replace the open-coded and non-atomic implementations -- which will be very fragile (i.e. broken) upon removing the struct_mutex serialisation. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190621183801.23252-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 8 +- drivers/gpu/drm/i915/gt/intel_context.c | 65 ++++---- drivers/gpu/drm/i915/gt/intel_context.h | 14 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 6 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/gt/intel_timeline.c | 16 +- drivers/gpu/drm/i915/gt/mock_engine.c | 2 +- drivers/gpu/drm/i915/i915_active.c | 225 ++++++++++++++------------- drivers/gpu/drm/i915/i915_active.h | 25 +-- drivers/gpu/drm/i915/i915_active_types.h | 10 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/i915_vma.c | 22 ++- drivers/gpu/drm/i915/selftests/i915_active.c | 53 +++++-- 13 files changed, 258 insertions(+), 192 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 628673d1d7f8..8a9787cf0cd0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -923,8 +923,12 @@ static int context_barrier_task(struct i915_gem_context *ctx, if (!cb) return -ENOMEM; - i915_active_init(i915, &cb->base, cb_retire); - i915_active_acquire(&cb->base); + i915_active_init(i915, &cb->base, NULL, cb_retire); + err = i915_active_acquire(&cb->base); + if (err) { + kfree(cb); + return err; + } for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { struct i915_request *rq; diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 23120901c55f..938dd032b820 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -95,11 +95,15 @@ void intel_context_unpin(struct intel_context *ce) intel_context_put(ce); } -static int __context_pin_state(struct i915_vma *vma, unsigned long flags) +static int __context_pin_state(struct i915_vma *vma) { + u64 flags; int err; - err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL); + flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; + flags |= PIN_HIGH | PIN_GLOBAL; + + err = i915_vma_pin(vma, 0, 0, flags); if (err) return err; @@ -119,7 +123,7 @@ static void __context_unpin_state(struct i915_vma *vma) __i915_vma_unpin(vma); } -static void intel_context_retire(struct i915_active *active) +static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); @@ -130,35 +134,11 @@ static void intel_context_retire(struct i915_active *active) intel_context_put(ce); } -void -intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - GEM_BUG_ON(!engine->cops); - - kref_init(&ce->ref); - - ce->gem_context = ctx; - ce->engine = engine; - ce->ops = engine->cops; - ce->sseu = engine->sseu; - - INIT_LIST_HEAD(&ce->signal_link); - INIT_LIST_HEAD(&ce->signals); - - mutex_init(&ce->pin_mutex); - - i915_active_init(ctx->i915, &ce->active, intel_context_retire); -} - -int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) +static int __intel_context_active(struct i915_active *active) { + struct intel_context *ce = container_of(active, typeof(*ce), active); int err; - if (!i915_active_acquire(&ce->active)) - return 0; - intel_context_get(ce); err = intel_ring_pin(ce->ring); @@ -168,7 +148,7 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) if (!ce->state) return 0; - err = __context_pin_state(ce->state, flags); + err = __context_pin_state(ce->state); if (err) goto err_ring; @@ -188,15 +168,30 @@ err_ring: intel_ring_unpin(ce->ring); err_put: intel_context_put(ce); - i915_active_cancel(&ce->active); return err; } -void intel_context_active_release(struct intel_context *ce) +void +intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine) { - /* Nodes preallocated in intel_context_active() */ - i915_active_acquire_barrier(&ce->active); - i915_active_release(&ce->active); + GEM_BUG_ON(!engine->cops); + + kref_init(&ce->ref); + + ce->gem_context = ctx; + ce->engine = engine; + ce->ops = engine->cops; + ce->sseu = engine->sseu; + + INIT_LIST_HEAD(&ce->signal_link); + INIT_LIST_HEAD(&ce->signals); + + mutex_init(&ce->pin_mutex); + + i915_active_init(ctx->i915, &ce->active, + __intel_context_active, __intel_context_retire); } static void i915_global_context_shrink(void) diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index a47275bc4f01..40cd8320fcc3 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -9,6 +9,7 @@ #include +#include "i915_active.h" #include "intel_context_types.h" #include "intel_engine_types.h" @@ -102,8 +103,17 @@ static inline void intel_context_exit(struct intel_context *ce) ce->ops->exit(ce); } -int intel_context_active_acquire(struct intel_context *ce, unsigned long flags); -void intel_context_active_release(struct intel_context *ce); +static inline int intel_context_active_acquire(struct intel_context *ce) +{ + return i915_active_acquire(&ce->active); +} + +static inline void intel_context_active_release(struct intel_context *ce) +{ + /* Nodes preallocated in intel_context_active() */ + i915_active_acquire_barrier(&ce->active); + i915_active_release(&ce->active); +} static inline struct intel_context *intel_context_get(struct intel_context *ce) { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index ad7638da785d..c8a0c9b32764 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1542,12 +1542,10 @@ __execlists_context_pin(struct intel_context *ce, goto err; GEM_BUG_ON(!ce->state); - ret = intel_context_active_acquire(ce, - engine->i915->ggtt.pin_bias | - PIN_OFFSET_BIAS | - PIN_HIGH); + ret = intel_context_active_acquire(ce); if (ret) goto err; + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); vaddr = i915_gem_object_pin_map(ce->state->obj, i915_coherent_map_type(engine->i915) | diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c9337e4b5ee0..f094406dcc56 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1455,7 +1455,7 @@ static int ring_context_pin(struct intel_context *ce) ce->state = vma; } - err = intel_context_active_acquire(ce, PIN_HIGH); + err = intel_context_active_acquire(ce); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 44273b7c96f8..478258274986 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -146,6 +146,15 @@ static void __cacheline_retire(struct i915_active *active) __idle_cacheline_free(cl); } +static int __cacheline_active(struct i915_active *active) +{ + struct intel_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + __i915_vma_pin(cl->hwsp->vma); + return 0; +} + static struct intel_timeline_cacheline * cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) { @@ -168,15 +177,16 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) cl->hwsp = hwsp; cl->vaddr = page_pack_bits(vaddr, cacheline); - i915_active_init(hwsp->gt->i915, &cl->active, __cacheline_retire); + i915_active_init(hwsp->gt->i915, &cl->active, + __cacheline_active, __cacheline_retire); return cl; } static void cacheline_acquire(struct intel_timeline_cacheline *cl) { - if (cl && i915_active_acquire(&cl->active)) - __i915_vma_pin(cl->hwsp->vma); + if (cl) + i915_active_acquire(&cl->active); } static void cacheline_release(struct intel_timeline_cacheline *cl) diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index bf0974b12f3d..490ebd121f4c 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -155,7 +155,7 @@ static int mock_context_pin(struct intel_context *ce) return -ENOMEM; } - ret = intel_context_active_acquire(ce, PIN_HIGH); + ret = intel_context_active_acquire(ce); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index eb91a625c71f..cb6a1eadf7df 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -39,7 +39,7 @@ static void *active_debug_hint(void *addr) { struct i915_active *ref = addr; - return (void *)ref->retire ?: (void *)ref; + return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; } static struct debug_obj_descr active_debug_desc = { @@ -83,50 +83,58 @@ static inline void debug_active_assert(struct i915_active *ref) { } #endif static void -__active_park(struct i915_active *ref) +__active_retire(struct i915_active *ref) { struct active_node *it, *n; + struct rb_root root; + bool retire = false; + + lockdep_assert_held(&ref->mutex); + + /* return the unused nodes to our slabcache -- flushing the allocator */ + if (atomic_dec_and_test(&ref->count)) { + debug_active_deactivate(ref); + root = ref->tree; + ref->tree = RB_ROOT; + ref->cache = NULL; + retire = true; + } - rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + mutex_unlock(&ref->mutex); + if (!retire) + return; + + ref->retire(ref); + + rbtree_postorder_for_each_entry_safe(it, n, &root, node) { GEM_BUG_ON(i915_active_request_isset(&it->base)); kmem_cache_free(global.slab_cache, it); } - ref->tree = RB_ROOT; } static void -__active_retire(struct i915_active *ref) +active_retire(struct i915_active *ref) { - GEM_BUG_ON(!ref->count); - if (--ref->count) + GEM_BUG_ON(!atomic_read(&ref->count)); + if (atomic_add_unless(&ref->count, -1, 1)) return; - debug_active_deactivate(ref); - - /* return the unused nodes to our slabcache */ - __active_park(ref); - - ref->retire(ref); + /* One active may be flushed from inside the acquire of another */ + mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); + __active_retire(ref); } static void node_retire(struct i915_active_request *base, struct i915_request *rq) { - __active_retire(container_of(base, struct active_node, base)->ref); -} - -static void -last_retire(struct i915_active_request *base, struct i915_request *rq) -{ - __active_retire(container_of(base, struct i915_active, last)); + active_retire(container_of(base, struct active_node, base)->ref); } static struct i915_active_request * active_instance(struct i915_active *ref, u64 idx) { - struct active_node *node; + struct active_node *node, *prealloc; struct rb_node **p, *parent; - struct i915_request *old; /* * We track the most recently used timeline to skip a rbtree search @@ -134,20 +142,18 @@ active_instance(struct i915_active *ref, u64 idx) * at all. We can reuse the last slot if it is empty, that is * after the previous activity has been retired, or if it matches the * current timeline. - * - * Note that we allow the timeline to be active simultaneously in - * the rbtree and the last cache. We do this to avoid having - * to search and replace the rbtree element for a new timeline, with - * the cost being that we must be aware that the ref may be retired - * twice for the same timeline (as the older rbtree element will be - * retired before the new request added to last). */ - old = i915_active_request_raw(&ref->last, BKL(ref)); - if (!old || old->fence.context == idx) - goto out; + node = READ_ONCE(ref->cache); + if (node && node->timeline == idx) + return &node->base; - /* Move the currently active fence into the rbtree */ - idx = old->fence.context; + /* Preallocate a replacement, just in case */ + prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); + if (!prealloc) + return NULL; + + mutex_lock(&ref->mutex); + GEM_BUG_ON(i915_active_is_idle(ref)); parent = NULL; p = &ref->tree.rb_node; @@ -155,8 +161,10 @@ active_instance(struct i915_active *ref, u64 idx) parent = *p; node = rb_entry(parent, struct active_node, node); - if (node->timeline == idx) - goto replace; + if (node->timeline == idx) { + kmem_cache_free(global.slab_cache, prealloc); + goto out; + } if (node->timeline < idx) p = &parent->rb_right; @@ -164,17 +172,7 @@ active_instance(struct i915_active *ref, u64 idx) p = &parent->rb_left; } - node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); - - /* kmalloc may retire the ref->last (thanks shrinker)! */ - if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) { - kmem_cache_free(global.slab_cache, node); - goto out; - } - - if (unlikely(!node)) - return ERR_PTR(-ENOMEM); - + node = prealloc; i915_active_request_init(&node->base, NULL, node_retire); node->ref = ref; node->timeline = idx; @@ -182,40 +180,29 @@ active_instance(struct i915_active *ref, u64 idx) rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &ref->tree); -replace: - /* - * Overwrite the previous active slot in the rbtree with last, - * leaving last zeroed. If the previous slot is still active, - * we must be careful as we now only expect to receive one retire - * callback not two, and so much undo the active counting for the - * overwritten slot. - */ - if (i915_active_request_isset(&node->base)) { - /* Retire ourselves from the old rq->active_list */ - __list_del_entry(&node->base.link); - ref->count--; - GEM_BUG_ON(!ref->count); - } - GEM_BUG_ON(list_empty(&ref->last.link)); - list_replace_init(&ref->last.link, &node->base.link); - node->base.request = fetch_and_zero(&ref->last.request); - out: - return &ref->last; + ref->cache = node; + mutex_unlock(&ref->mutex); + + return &node->base; } -void i915_active_init(struct drm_i915_private *i915, - struct i915_active *ref, - void (*retire)(struct i915_active *ref)) +void __i915_active_init(struct drm_i915_private *i915, + struct i915_active *ref, + int (*active)(struct i915_active *ref), + void (*retire)(struct i915_active *ref), + struct lock_class_key *key) { debug_active_init(ref); ref->i915 = i915; + ref->active = active; ref->retire = retire; ref->tree = RB_ROOT; - i915_active_request_init(&ref->last, NULL, last_retire); + ref->cache = NULL; init_llist_head(&ref->barriers); - ref->count = 0; + atomic_set(&ref->count, 0); + __mutex_init(&ref->mutex, "i915_active", key); } int i915_active_ref(struct i915_active *ref, @@ -223,68 +210,84 @@ int i915_active_ref(struct i915_active *ref, struct i915_request *rq) { struct i915_active_request *active; - int err = 0; + int err; /* Prevent reaping in case we malloc/wait while building the tree */ - i915_active_acquire(ref); + err = i915_active_acquire(ref); + if (err) + return err; active = active_instance(ref, timeline); - if (IS_ERR(active)) { - err = PTR_ERR(active); + if (!active) { + err = -ENOMEM; goto out; } if (!i915_active_request_isset(active)) - ref->count++; + atomic_inc(&ref->count); __i915_active_request_set(active, rq); - GEM_BUG_ON(!ref->count); out: i915_active_release(ref); return err; } -bool i915_active_acquire(struct i915_active *ref) +int i915_active_acquire(struct i915_active *ref) { + int err; + debug_active_assert(ref); - lockdep_assert_held(BKL(ref)); + if (atomic_add_unless(&ref->count, 1, 0)) + return 0; + + err = mutex_lock_interruptible(&ref->mutex); + if (err) + return err; - if (ref->count++) - return false; + if (!atomic_read(&ref->count) && ref->active) + err = ref->active(ref); + if (!err) { + debug_active_activate(ref); + atomic_inc(&ref->count); + } + + mutex_unlock(&ref->mutex); - debug_active_activate(ref); - return true; + return err; } void i915_active_release(struct i915_active *ref) { debug_active_assert(ref); - lockdep_assert_held(BKL(ref)); - - __active_retire(ref); + active_retire(ref); } int i915_active_wait(struct i915_active *ref) { struct active_node *it, *n; - int ret = 0; + int err; - if (i915_active_acquire(ref)) - goto out_release; + might_sleep(); + if (RB_EMPTY_ROOT(&ref->tree)) + return 0; - ret = i915_active_request_retire(&ref->last, BKL(ref)); - if (ret) - goto out_release; + err = mutex_lock_interruptible(&ref->mutex); + if (err) + return err; + + if (!atomic_add_unless(&ref->count, 1, 0)) { + mutex_unlock(&ref->mutex); + return 0; + } rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - ret = i915_active_request_retire(&it->base, BKL(ref)); - if (ret) + err = i915_active_request_retire(&it->base, BKL(ref)); + if (err) break; } -out_release: - i915_active_release(ref); - return ret; + __active_retire(ref); + return err; } int i915_request_await_active_request(struct i915_request *rq, @@ -299,23 +302,24 @@ int i915_request_await_active_request(struct i915_request *rq, int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) { struct active_node *it, *n; - int err = 0; + int err; - /* await allocates and so we need to avoid hitting the shrinker */ - if (i915_active_acquire(ref)) - goto out; /* was idle */ + if (RB_EMPTY_ROOT(&ref->tree)) + return 0; - err = i915_request_await_active_request(rq, &ref->last); + /* await allocates and so we need to avoid hitting the shrinker */ + err = i915_active_acquire(ref); if (err) - goto out; + return err; + mutex_lock(&ref->mutex); rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { err = i915_request_await_active_request(rq, &it->base); if (err) - goto out; + break; } + mutex_unlock(&ref->mutex); -out: i915_active_release(ref); return err; } @@ -324,9 +328,9 @@ out: void i915_active_fini(struct i915_active *ref) { debug_active_fini(ref); - GEM_BUG_ON(i915_active_request_isset(&ref->last)); GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); - GEM_BUG_ON(ref->count); + GEM_BUG_ON(atomic_read(&ref->count)); + mutex_destroy(&ref->mutex); } #endif @@ -353,7 +357,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, (void *)engine, node_retire); node->timeline = kctx->ring->timeline->fence_context; node->ref = ref; - ref->count++; + atomic_inc(&ref->count); intel_engine_pm_get(engine); llist_add((struct llist_node *)&node->base.link, @@ -380,8 +384,9 @@ void i915_active_acquire_barrier(struct i915_active *ref) { struct llist_node *pos, *next; - i915_active_acquire(ref); + GEM_BUG_ON(i915_active_is_idle(ref)); + mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) { struct intel_engine_cs *engine; struct active_node *node; @@ -411,7 +416,7 @@ void i915_active_acquire_barrier(struct i915_active *ref) &engine->barrier_tasks); intel_engine_pm_put(engine); } - i915_active_release(ref); + mutex_unlock(&ref->mutex); } void i915_request_add_barriers(struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index c14eebf6d074..134166d31251 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -369,9 +369,16 @@ i915_active_request_retire(struct i915_active_request *active, * synchronisation. */ -void i915_active_init(struct drm_i915_private *i915, - struct i915_active *ref, - void (*retire)(struct i915_active *ref)); +void __i915_active_init(struct drm_i915_private *i915, + struct i915_active *ref, + int (*active)(struct i915_active *ref), + void (*retire)(struct i915_active *ref), + struct lock_class_key *key); +#define i915_active_init(i915, ref, active, retire) do { \ + static struct lock_class_key __key; \ + \ + __i915_active_init(i915, ref, active, retire, &__key); \ +} while (0) int i915_active_ref(struct i915_active *ref, u64 timeline, @@ -384,20 +391,14 @@ int i915_request_await_active(struct i915_request *rq, int i915_request_await_active_request(struct i915_request *rq, struct i915_active_request *active); -bool i915_active_acquire(struct i915_active *ref); - -static inline void i915_active_cancel(struct i915_active *ref) -{ - GEM_BUG_ON(ref->count != 1); - ref->count = 0; -} - +int i915_active_acquire(struct i915_active *ref); void i915_active_release(struct i915_active *ref); +void __i915_active_release_nested(struct i915_active *ref, int subclass); static inline bool i915_active_is_idle(const struct i915_active *ref) { - return !ref->count; + return !atomic_read(&ref->count); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index c025991b9233..5b0a3024ce24 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -7,7 +7,9 @@ #ifndef _I915_ACTIVE_TYPES_H_ #define _I915_ACTIVE_TYPES_H_ +#include #include +#include #include #include @@ -24,13 +26,17 @@ struct i915_active_request { i915_active_retire_fn retire; }; +struct active_node; + struct i915_active { struct drm_i915_private *i915; + struct active_node *cache; struct rb_root tree; - struct i915_active_request last; - unsigned int count; + struct mutex mutex; + atomic_t count; + int (*active)(struct i915_active *ref); void (*retire)(struct i915_active *ref); struct llist_head barriers; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 90f367397656..ff1d5008a256 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2055,7 +2055,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - i915_active_init(i915, &vma->active, NULL); + i915_active_init(i915, &vma->active, NULL, NULL); INIT_ACTIVE_REQUEST(&vma->last_fence); vma->vm = &ggtt->vm; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index c13b86e6ef1f..c20a3022cd80 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -78,11 +78,20 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason) #endif -static void __i915_vma_retire(struct i915_active *ref) +static inline struct i915_vma *active_to_vma(struct i915_active *ref) { - struct i915_vma *vma = container_of(ref, typeof(*vma), active); + return container_of(ref, typeof(struct i915_vma), active); +} - i915_vma_put(vma); +static int __i915_vma_active(struct i915_active *ref) +{ + i915_vma_get(active_to_vma(ref)); + return 0; +} + +static void __i915_vma_retire(struct i915_active *ref) +{ + i915_vma_put(active_to_vma(ref)); } static struct i915_vma * @@ -107,7 +116,8 @@ vma_create(struct drm_i915_gem_object *obj, vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - i915_active_init(vm->i915, &vma->active, __i915_vma_retire); + i915_active_init(vm->i915, &vma->active, + __i915_vma_active, __i915_vma_retire); INIT_ACTIVE_REQUEST(&vma->last_fence); INIT_LIST_HEAD(&vma->closed_link); @@ -904,11 +914,7 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - if (i915_active_acquire(&vma->active)) - i915_vma_get(vma); - err = i915_active_ref(&vma->active, rq->fence.context, rq); - i915_active_release(&vma->active); if (unlikely(err)) return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 98493bcc91f2..84fce379c0de 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -4,6 +4,8 @@ * Copyright © 2018 Intel Corporation */ +#include + #include "gem/i915_gem_pm.h" #include "i915_selftest.h" @@ -13,20 +15,47 @@ struct live_active { struct i915_active base; + struct kref ref; bool retired; }; +static void __live_get(struct live_active *active) +{ + kref_get(&active->ref); +} + static void __live_free(struct live_active *active) { i915_active_fini(&active->base); kfree(active); } +static void __live_release(struct kref *ref) +{ + struct live_active *active = container_of(ref, typeof(*active), ref); + + __live_free(active); +} + +static void __live_put(struct live_active *active) +{ + kref_put(&active->ref, __live_release); +} + +static int __live_active(struct i915_active *base) +{ + struct live_active *active = container_of(base, typeof(*active), base); + + __live_get(active); + return 0; +} + static void __live_retire(struct i915_active *base) { struct live_active *active = container_of(base, typeof(*active), base); active->retired = true; + __live_put(active); } static struct live_active *__live_alloc(struct drm_i915_private *i915) @@ -37,7 +66,8 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915) if (!active) return NULL; - i915_active_init(i915, &active->base, __live_retire); + kref_init(&active->ref); + i915_active_init(i915, &active->base, __live_active, __live_retire); return active; } @@ -62,11 +92,9 @@ __live_active_setup(struct drm_i915_private *i915) return ERR_PTR(-ENOMEM); } - if (!i915_active_acquire(&active->base)) { - pr_err("First i915_active_acquire should report being idle\n"); - err = -EINVAL; + err = i915_active_acquire(&active->base); + if (err) goto out; - } for_each_engine(engine, i915, id) { struct i915_request *rq; @@ -97,18 +125,21 @@ __live_active_setup(struct drm_i915_private *i915) pr_err("i915_active retired before submission!\n"); err = -EINVAL; } - if (active->base.count != count) { + if (atomic_read(&active->base.count) != count) { pr_err("i915_active not tracking all requests, found %d, expected %d\n", - active->base.count, count); + atomic_read(&active->base.count), count); err = -EINVAL; } out: i915_sw_fence_commit(submit); heap_fence_put(submit); + if (err) { + __live_put(active); + active = ERR_PTR(err); + } - /* XXX leaks live_active on error */ - return err ? ERR_PTR(err) : active; + return active; } static int live_active_wait(void *arg) @@ -135,7 +166,7 @@ static int live_active_wait(void *arg) err = -EINVAL; } - __live_free(active); + __live_put(active); if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; @@ -174,7 +205,7 @@ static int live_active_retire(void *arg) err = -EINVAL; } - __live_free(active); + __live_put(active); err: intel_runtime_pm_put(&i915->runtime_pm, wakeref); -- cgit v1.2.3 From fb993aa7a4049263fc1a139daf89fadc79a0ecc7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 19:38:01 +0100 Subject: drm/i915: Local debug BUG_ON for intel_wakeref Avoid pulling in i915_gem.h just so that we can use a conditional BUG_ON for debugging. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190621183801.23252-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_wakeref.c | 9 +++++---- drivers/gpu/drm/i915/intel_wakeref.h | 7 +++++++ 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 3db6fa682823..06bd8b215cc2 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -5,7 +5,7 @@ */ #include "intel_runtime_pm.h" -#include "i915_gem.h" +#include "intel_wakeref.h" static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) { @@ -17,7 +17,7 @@ static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf) intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); intel_runtime_pm_put(rpm, wakeref); - GEM_BUG_ON(!wakeref); + INTEL_WAKEREF_BUG_ON(!wakeref); } int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, @@ -48,6 +48,7 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm, atomic_inc(&wf->count); mutex_unlock(&wf->mutex); + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); return 0; } @@ -115,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) if (!refcount_inc_not_zero(&wf->count)) { spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) { - GEM_BUG_ON(wf->wakeref); + INTEL_WAKEREF_BUG_ON(wf->wakeref); wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm); refcount_set(&wf->count, 1); } @@ -134,5 +135,5 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) { intel_wakeref_auto(wf, 0); - GEM_BUG_ON(wf->wakeref); + INTEL_WAKEREF_BUG_ON(wf->wakeref); } diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 9cbb2ebf575b..d45e78639dc4 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -13,6 +13,12 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) +#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) +#else +#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + struct intel_runtime_pm; typedef depot_stack_handle_t intel_wakeref_t; @@ -86,6 +92,7 @@ intel_wakeref_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf, int (*fn)(struct intel_wakeref *wf)) { + INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0); if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex)) return __intel_wakeref_put_last(rpm, wf, fn); -- cgit v1.2.3 From b2dbf8d982a4f02a00261a5f8f75d2f0bf765de4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 21 Jun 2019 22:57:33 +0100 Subject: drm/i915/blt: Remove recursive vma->lock As we have already plugged the w->dma into the reservation_object, and have set ourselves up to automatically signal the request and w->dma on completion, we do not need to export the rq->fence directly and just use the w->dma fence. This avoids having to take the reservation_lock inside the worker which cross-release lockdep would complain about. :) Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190621215733.12070-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 14 +++++++------- drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c | 11 ----------- 2 files changed, 7 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 1fdab0767a47..9b01c3b5b31d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm, vma->ops = &proxy_vma_ops; sleeve->vma = vma; - sleeve->obj = i915_gem_object_get(obj); sleeve->pages = pages; sleeve->page_sizes = *page_sizes; @@ -85,7 +84,6 @@ err_free: static void destroy_sleeve(struct i915_sleeve *sleeve) { - i915_gem_object_put(sleeve->obj); kfree(sleeve); } @@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work) { struct clear_pages_work *w = container_of(work, typeof(*w), work); struct drm_i915_private *i915 = w->ce->gem_context->i915; - struct drm_i915_gem_object *obj = w->sleeve->obj; + struct drm_i915_gem_object *obj = w->sleeve->vma->obj; struct i915_vma *vma = w->sleeve->vma; struct i915_request *rq; int err = w->dma.error; @@ -193,10 +191,12 @@ static void clear_pages_worker(struct work_struct *work) goto out_request; } - /* XXX: more feverish nightmares await */ - i915_vma_lock(vma); - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); + /* + * w->dma is already exported via (vma|obj)->resv we need only + * keep track of the GPU activity within this vma/request, and + * propagate the signal from the request to w->dma. + */ + err = i915_active_ref(&vma->active, rq->fence.context, rq); if (err) goto out_request; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index f3a5eb807c1c..855481252bda 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -63,17 +63,6 @@ static int igt_client_fill(void *arg) if (err) goto err_unpin; - /* - * XXX: For now do the wait without the object resv lock to - * ensure we don't deadlock. - */ - err = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT); - if (err) - goto err_unpin; - i915_gem_object_lock(obj); err = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); -- cgit v1.2.3 From 8db7933ee3d442627c716ee49a70fa3aef18640d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 24 Jun 2019 10:20:09 +0100 Subject: drm/i915/execlists: Always clear ring_pause if we do not submit In the unlikely case (thank you CI!), we may find ourselves wanting to issue a preemption but having no runnable requests left. In this case, we set the semaphore before computing the preemption and so must unset it before forgetting (or else we leave the machine busywaiting until the next request comes along and so likely hang). v2: Replace readback with only a wmb after asserting the semaphore Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190624092009.30189-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index c8a0c9b32764..28685ba91a2c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -240,7 +240,8 @@ ring_set_paused(const struct intel_engine_cs *engine, int state) * until the dword is false. */ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; - wmb(); + if (state) + wmb(); } static inline struct i915_priolist *to_priolist(struct rb_node *rb) @@ -1243,6 +1244,8 @@ done: *port = execlists_schedule_in(last, port - execlists->pending); memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists_submit_ports(engine); + } else { + ring_set_paused(engine, 0); } } -- cgit v1.2.3 From 871918dffefc594e765cc7e885a36a7fd3f38da7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 24 Jun 2019 15:16:30 +0100 Subject: drm/i915/gem: Clear read/write domains for GPU clear Update the domains for the write via the GPU so that we do not shortcircuit any set-domain clflush afterwards. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110978 Fixes: b2dbf8d982a4 ("drm/i915/blt: Remove recursive vma->lock") Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190624141630.11015-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 9b01c3b5b31d..6f537e8e4dea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -162,11 +162,12 @@ static void clear_pages_worker(struct work_struct *work) goto out_signal; if (obj->cache_dirty) { - obj->write_domain = 0; if (i915_gem_object_has_struct_page(obj)) drm_clflush_sg(w->sleeve->pages); obj->cache_dirty = false; } + obj->read_domains = I915_GEM_GPU_DOMAINS; + obj->write_domain = 0; /* XXX: we need to kill this */ mutex_lock(&i915->drm.struct_mutex); -- cgit v1.2.3 From f9a393875d3af13cc3267477746608dadb7f17c1 Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Tue, 25 Jun 2019 10:06:55 +0100 Subject: drm/i915: Disable SAMPLER_STATE prefetching on all Gen11 steppings. The Demand Prefetch workaround (binding table prefetching) only applies to Icelake A0/B0. But the Sampler Prefetch workaround needs to be applied to all Gen11 steppings, according to a programming note in the SARCHKMD documentation. Using the Intel Gallium driver, I have seen intermittent failures in the dEQP-GLES31.functional.copy_image.non_compressed.* tests. After applying this workaround, the tests reliably pass. v2: Remove the overlap with a pre-production w/a BSpec: 9663 Signed-off-by: Kenneth Graunke Signed-off-by: Chris Wilson Cc: stable@vger.kernel.org Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190625090655.19220-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 2835ab70199b..d776e111e5d0 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1252,8 +1252,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) wa_write_or(wal, GEN7_SARCHKMD, - GEN7_DISABLE_DEMAND_PREFETCH | - GEN7_DISABLE_SAMPLER_PREFETCH); + GEN7_DISABLE_DEMAND_PREFETCH); + + /* Wa_1606682166:icl */ + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_SAMPLER_PREFETCH); } if (IS_GEN_RANGE(i915, 9, 11)) { -- cgit v1.2.3 From 8dcfdfb4501012a8d36d2157dc73925715f2befb Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Mon, 10 Jun 2019 11:19:14 +0300 Subject: drm/i915/perf: fix ICL perf register offsets We got the wrong offsets (could they have changed?). New values were computed off an error state by looking up the register offset in the context image as written by the HW. Signed-off-by: Lionel Landwerlin Fixes: 1de401c08fa805 ("drm/i915/perf: enable perf support on ICL") Acked-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20190610081914.25428-1-lionel.g.landwerlin@intel.com --- drivers/gpu/drm/i915/i915_perf.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 3d8162d28730..d28a5bf80bd7 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3479,9 +3479,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; - dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; - dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; - + if (IS_GEN(dev_priv, 10)) { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; + } else { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e; + } dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); } } -- cgit v1.2.3 From fc25441c7b9d81dc18cce0a0fa5527370e386652 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Fri, 21 Jun 2019 08:18:47 -0700 Subject: drm/i915/ehl: Add one additional PCH ID to MCC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's one additional ID that we should treat as Mule Creek Canyon. Cc: José Roberto de Souza Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190621151847.31302-1-matthew.d.roper@intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4a211d5e2701..6241a4615501 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -220,6 +220,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) WARN_ON(!IS_ICELAKE(dev_priv)); return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: + case INTEL_PCH_MCC2_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4077d15fe429..118c5d2d834b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2323,6 +2323,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 +#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ -- cgit v1.2.3 From 9a6a644096a1066c2ef5c47db0db79ac72af7967 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 14 Jun 2019 14:37:49 -0700 Subject: drm/i915/ehl: Add missing VECS engine MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL can have up to one VECS(video enhancement) engine, so add it to the device_info. BSpec: 29152 Cc: Rodrigo Vivi Cc: Bob Paauwe Cc: Matt Roper Cc: Clint Taylor Signed-off-by: José Roberto de Souza Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190614213749.15870-1-jose.souza@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 6c9f46fc3e12..94b588e0a1dd 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -761,7 +761,7 @@ static const struct intel_device_info intel_elkhartlake_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), .require_force_probe = 1, - .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0), + .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .ppgtt_size = 36, }; -- cgit v1.2.3 From 07bfe6bf1052f074093cdea95d6041f48b994c4b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 25 Jun 2019 14:01:09 +0100 Subject: drm/i915/execlists: Convert recursive defer_request() into iterative As this engine owns the lock around rq->sched.link (for those waiters submitted to this engine), we can use that link as an element in a local list. We can thus replace the recursive algorithm with an iterative walk over the ordered list of waiters. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 52 +++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 28685ba91a2c..22afd2616d7f 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists) return *last; } -static void -defer_request(struct i915_request * const rq, struct list_head * const pl) +static void defer_request(struct i915_request *rq, struct list_head * const pl) { - struct i915_dependency *p; + LIST_HEAD(list); /* * We want to move the interrupted request to the back of @@ -845,34 +844,37 @@ defer_request(struct i915_request * const rq, struct list_head * const pl) * flight and were waiting for the interrupted request to * be run after it again. */ - list_move_tail(&rq->sched.link, pl); + do { + struct i915_dependency *p; - list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { - struct i915_request *w = - container_of(p->waiter, typeof(*w), sched); + GEM_BUG_ON(i915_request_is_active(rq)); + list_move_tail(&rq->sched.link, pl); - /* Leave semaphores spinning on the other engines */ - if (w->engine != rq->engine) - continue; + list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { + struct i915_request *w = + container_of(p->waiter, typeof(*w), sched); - /* No waiter should start before the active request completed */ - GEM_BUG_ON(i915_request_started(w)); + /* Leave semaphores spinning on the other engines */ + if (w->engine != rq->engine) + continue; - GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); - if (rq_prio(w) < rq_prio(rq)) - continue; + /* No waiter should start before its signaler */ + GEM_BUG_ON(i915_request_started(w) && + !i915_request_completed(rq)); - if (list_empty(&w->sched.link)) - continue; /* Not yet submitted; unready */ + GEM_BUG_ON(i915_request_is_active(w)); + if (list_empty(&w->sched.link)) + continue; /* Not yet submitted; unready */ - /* - * This should be very shallow as it is limited by the - * number of requests that can fit in a ring (<64) and - * the number of contexts that can be in flight on this - * engine. - */ - defer_request(w, pl); - } + if (rq_prio(w) < rq_prio(rq)) + continue; + + GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); + list_move_tail(&w->sched.link, &list); + } + + rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); + } while (rq); } static void defer_active(struct intel_engine_cs *engine) -- cgit v1.2.3 From 8e9c2f621893ae2cc57c34fa96ece011eebd94b7 Mon Sep 17 00:00:00 2001 From: "Robert M. Fosha" Date: Tue, 25 Jun 2019 09:41:07 -0700 Subject: drm/i915/guc: Add debug capture of GuC exception Detect GuC firmware load failure due to an exception during execution in GuC firmware. Output the GuC EIP where exception occurred to dmesg for GuC debug information. v2: correct typos, change debug message and error code returned for GuC exception (Michal) Signed-off-by: Robert M. Fosha Cc: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190625164107.21512-1-robert.m.fosha@intel.com --- drivers/gpu/drm/i915/intel_guc_fw.c | 7 +++++++ drivers/gpu/drm/i915/intel_guc_reg.h | 1 + 2 files changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index 72cdafd9636a..970f39ef248b 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -197,6 +197,7 @@ static inline bool guc_ready(struct intel_guc *guc, u32 *status) static int guc_wait_ucode(struct intel_guc *guc) { + struct drm_i915_private *i915 = guc_to_i915(guc); u32 status; int ret; @@ -216,6 +217,12 @@ static int guc_wait_ucode(struct intel_guc *guc) ret = -ENOEXEC; } + if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { + DRM_ERROR("GuC firmware exception. EIP: %#x\n", + intel_uncore_read(&i915->uncore, SOFT_SCRATCH(13))); + ret = -ENXIO; + } + if (ret == 0 && !guc_xfer_completed(guc, &status)) { DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", status); diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index a214f8b71929..d90b88fadb5e 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -37,6 +37,7 @@ #define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) #define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) #define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT) #define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) #define GS_MIA_SHIFT 16 #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) -- cgit v1.2.3 From 0c91621cad492e362c37330e1a0985bcdda00fda Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 25 Jun 2019 14:01:10 +0100 Subject: drm/i915/gt: Pass intel_gt to pm routines Switch from passing the i915 container to newly named struct intel_gt. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 4 ++-- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 2 +- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 8 ++++---- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 24 +++++++++++++--------- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 9 ++++---- drivers/gpu/drm/i915/gt/intel_reset.c | 6 +++--- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/selftests/i915_gem.c | 2 +- 10 files changed, 33 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index cf8edb6822ee..1c5dfbfad71b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2437,7 +2437,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, * wakeref that we hold until the GPU has been idle for at least * 100ms. */ - intel_gt_pm_get(eb.i915); + intel_gt_pm_get(&eb.i915->gt); err = i915_mutex_lock_interruptible(dev); if (err) @@ -2607,7 +2607,7 @@ err_engine: err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: - intel_gt_pm_put(eb.i915); + intel_gt_pm_put(&eb.i915->gt); i915_gem_context_put(eb.gem_context); err_destroy: eb_destroy(&eb); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 8f721cf0ab99..ee1f66594a35 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -258,7 +258,7 @@ void i915_gem_resume(struct drm_i915_private *i915) * guarantee that the context image is complete. So let's just reset * it and start again. */ - intel_gt_resume(i915); + intel_gt_resume(&i915->gt); if (i915_gem_init_hw(i915)) goto err_wedged; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 24a3c677ccd5..a1f0b235f56b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -379,7 +379,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_shrinker_unregister(i915); - intel_gt_pm_get(i915); + intel_gt_pm_get(&i915->gt); cancel_delayed_work_sync(&i915->gem.retire_work); flush_work(&i915->gem.idle_work); @@ -387,7 +387,7 @@ static void disable_retire_worker(struct drm_i915_private *i915) static void restore_retire_worker(struct drm_i915_private *i915) { - intel_gt_pm_put(i915); + intel_gt_pm_put(&i915->gt); mutex_lock(&i915->drm.struct_mutex); igt_flush_test(i915, I915_WAIT_LOCKED); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 2ce00d3dc42a..5253c382034d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -18,7 +18,7 @@ static int __engine_unpark(struct intel_wakeref *wf) GEM_TRACE("%s\n", engine->name); - intel_gt_pm_get(engine->i915); + intel_gt_pm_get(engine->gt); /* Pin the default state for fast resets from atomic context. */ map = NULL; @@ -129,7 +129,7 @@ static int __engine_park(struct intel_wakeref *wf) engine->execlists.no_priolist = false; - intel_gt_pm_put(engine->i915); + intel_gt_pm_put(engine->gt); return 0; } @@ -149,7 +149,7 @@ int intel_engines_resume(struct drm_i915_private *i915) enum intel_engine_id id; int err = 0; - intel_gt_pm_get(i915); + intel_gt_pm_get(&i915->gt); for_each_engine(engine, i915, id) { intel_engine_pm_get(engine); engine->serial++; /* kernel context lost */ @@ -162,7 +162,7 @@ int intel_engines_resume(struct drm_i915_private *i915) break; } } - intel_gt_pm_put(i915); + intel_gt_pm_put(&i915->gt); return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 6062840b5b46..ec6b69d014b6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -50,9 +50,11 @@ static int intel_gt_unpark(struct intel_wakeref *wf) return 0; } -void intel_gt_pm_get(struct drm_i915_private *i915) +void intel_gt_pm_get(struct intel_gt *gt) { - intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark); + struct intel_runtime_pm *rpm = >->i915->runtime_pm; + + intel_wakeref_get(rpm, >->wakeref, intel_gt_unpark); } static int intel_gt_park(struct intel_wakeref *wf) @@ -75,9 +77,11 @@ static int intel_gt_park(struct intel_wakeref *wf) return 0; } -void intel_gt_pm_put(struct drm_i915_private *i915) +void intel_gt_pm_put(struct intel_gt *gt) { - intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park); + struct intel_runtime_pm *rpm = >->i915->runtime_pm; + + intel_wakeref_put(rpm, >->wakeref, intel_gt_park); } void intel_gt_pm_init_early(struct intel_gt *gt) @@ -96,7 +100,7 @@ static bool reset_engines(struct drm_i915_private *i915) /** * intel_gt_sanitize: called after the GPU has lost power - * @i915: the i915 device + * @gt: the i915 GT container * @force: ignore a failed reset and sanitize engine state anyway * * Anytime we reset the GPU, either with an explicit GPU reset or through a @@ -104,21 +108,21 @@ static bool reset_engines(struct drm_i915_private *i915) * to match. Note that calling intel_gt_sanitize() if the GPU has not * been reset results in much confusion! */ -void intel_gt_sanitize(struct drm_i915_private *i915, bool force) +void intel_gt_sanitize(struct intel_gt *gt, bool force) { struct intel_engine_cs *engine; enum intel_engine_id id; GEM_TRACE("\n"); - if (!reset_engines(i915) && !force) + if (!reset_engines(gt->i915) && !force) return; - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) intel_engine_reset(engine, false); } -void intel_gt_resume(struct drm_i915_private *i915) +void intel_gt_resume(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -129,7 +133,7 @@ void intel_gt_resume(struct drm_i915_private *i915) * Only the kernel contexts should remain pinned over suspend, * allowing us to fixup the user contexts on their first pin. */ - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { struct intel_context *ce; ce = engine->kernel_context; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index b6049a907890..4dbb92cf58d7 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -9,7 +9,6 @@ #include -struct drm_i915_private; struct intel_gt; enum { @@ -17,12 +16,12 @@ enum { INTEL_GT_PARK, }; -void intel_gt_pm_get(struct drm_i915_private *i915); -void intel_gt_pm_put(struct drm_i915_private *i915); +void intel_gt_pm_get(struct intel_gt *gt); +void intel_gt_pm_put(struct intel_gt *gt); void intel_gt_pm_init_early(struct intel_gt *gt); -void intel_gt_sanitize(struct drm_i915_private *i915, bool force); -void intel_gt_resume(struct drm_i915_private *i915); +void intel_gt_sanitize(struct intel_gt *gt, bool force); +void intel_gt_resume(struct intel_gt *gt); #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 3c925af64793..e92054e118cc 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -714,7 +714,7 @@ static void reset_prepare(struct drm_i915_private *i915) struct intel_engine_cs *engine; enum intel_engine_id id; - intel_gt_pm_get(i915); + intel_gt_pm_get(&i915->gt); for_each_engine(engine, i915, id) reset_prepare_engine(engine); @@ -765,7 +765,7 @@ static void reset_finish(struct drm_i915_private *i915) reset_finish_engine(engine); intel_engine_signal_breadcrumbs(engine); } - intel_gt_pm_put(i915); + intel_gt_pm_put(&i915->gt); } static void nop_submit_request(struct i915_request *request) @@ -891,7 +891,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) } mutex_unlock(&i915->gt.timelines.mutex); - intel_gt_sanitize(i915, false); + intel_gt_sanitize(&i915->gt, false); /* * Undo nop_submit_request. We prevent all new i915 requests from diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6241a4615501..f5b7c37c165f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2377,7 +2377,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_power_domains_resume(dev_priv); - intel_gt_sanitize(dev_priv, true); + intel_gt_sanitize(&dev_priv->gt, true); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e59be5c05e1b..deecbe128e5b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1157,7 +1157,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * it may impact the display and we are uncertain about the stability * of the reset, so this could be applied to even earlier gen. */ - intel_gt_sanitize(i915, false); + intel_gt_sanitize(&i915->gt, false); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); intel_runtime_pm_put(&i915->runtime_pm, wakeref); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index c6a01a6e87f1..ed0c17bf6613 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -115,7 +115,7 @@ static void pm_resume(struct drm_i915_private *i915) * that runtime-pm just works. */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - intel_gt_sanitize(i915, false); + intel_gt_sanitize(&i915->gt, false); i915_gem_sanitize(i915); i915_gem_resume(i915); } -- cgit v1.2.3 From 5f22e5b3116ce42f0fdd38d645b001cddf5336d7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 25 Jun 2019 14:01:14 +0100 Subject: drm/i915: Rename intel_wakeref_[is]_active Our general rule is to use is/has as the verb for boolean functions, rename intel_wakeref_active to intel_wakeref_is_active so the question being asked is clear. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 3 ++- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_pm.h | 9 +++++++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- drivers/gpu/drm/i915/intel_wakeref.h | 4 ++-- 6 files changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index ee1f66594a35..6b730bd4d72f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -54,7 +54,8 @@ static void idle_work_handler(struct work_struct *work) mutex_lock(&i915->drm.struct_mutex); intel_wakeref_lock(&i915->gt.wakeref); - park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work); + park = (!intel_wakeref_is_active(&i915->gt.wakeref) && + !work_pending(work)); intel_wakeref_unlock(&i915->gt.wakeref); if (park) i915_gem_park(i915); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 4961f74fd902..d1508f0b4c84 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1155,7 +1155,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) if (i915_reset_failed(engine->i915)) return true; - if (!intel_wakeref_active(&engine->wakeref)) + if (!intel_engine_pm_is_awake(engine)) return true; /* Waiting to drain ELSP? */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index b326cd993d60..f3f5b031b4a1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -7,12 +7,21 @@ #ifndef INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H +#include "intel_engine_types.h" +#include "intel_wakeref.h" + struct drm_i915_private; struct intel_engine_cs; void intel_engine_pm_get(struct intel_engine_cs *engine); void intel_engine_pm_put(struct intel_engine_cs *engine); +static inline bool +intel_engine_pm_is_awake(const struct intel_engine_cs *engine) +{ + return intel_wakeref_is_active(&engine->wakeref); +} + void intel_engine_park(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 22afd2616d7f..471e134de186 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -684,7 +684,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) * that all ELSP are drained i.e. we have processed the CSB, * before allowing ourselves to idle and calling intel_runtime_pm_put(). */ - GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref)); + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); /* * ELSQ note: the submit queue is not cleared after being submitted diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index e92054e118cc..8ce92c51564e 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1072,7 +1072,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - if (!intel_wakeref_active(&engine->wakeref)) + if (!intel_engine_pm_is_awake(engine)) return 0; reset_prepare_engine(engine); diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index d45e78639dc4..f74272770a5c 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -128,13 +128,13 @@ intel_wakeref_unlock(struct intel_wakeref *wf) } /** - * intel_wakeref_active: Query whether the wakeref is currently held + * intel_wakeref_is_active: Query whether the wakeref is currently held * @wf: the wakeref * * Returns: true if the wakeref is currently held. */ static inline bool -intel_wakeref_active(struct intel_wakeref *wf) +intel_wakeref_is_active(const struct intel_wakeref *wf) { return READ_ONCE(wf->wakeref); } -- cgit v1.2.3 From c8d84778e52733cbbc05c1f3ea77635feb099642 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 25 Jun 2019 14:01:22 +0100 Subject: drm/i915/selftests: Hold ref on request across waits As we wait upon the request, we should be sure to hold our own reference for our checks. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190625130128.11009-14-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_request.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 0fdf948a93a0..1bbfc43d4a9e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -75,55 +75,58 @@ static int igt_wait_request(void *arg) err = -ENOMEM; goto out_unlock; } + i915_request_get(request); if (i915_request_wait(request, 0, 0) != -ETIME) { pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T) != -ETIME) { pr_err("request wait succeeded (expected timeout before submit!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_completed(request)) { pr_err("request completed before submit!!\n"); - goto out_unlock; + goto out_request; } i915_request_add(request); if (i915_request_wait(request, 0, 0) != -ETIME) { pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_completed(request)) { pr_err("request completed immediately!\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T / 2) != -ETIME) { pr_err("request wait succeeded (expected timeout!)\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T) == -ETIME) { pr_err("request wait timed out!\n"); - goto out_unlock; + goto out_request; } if (!i915_request_completed(request)) { pr_err("request not complete after waiting!\n"); - goto out_unlock; + goto out_request; } if (i915_request_wait(request, 0, T) == -ETIME) { pr_err("request wait timed out when already complete!\n"); - goto out_unlock; + goto out_request; } err = 0; +out_request: + i915_request_put(request); out_unlock: mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); -- cgit v1.2.3 From b38565faded7da1f84b2b9a9a6d41ea1ebc48936 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 00:33:48 +0100 Subject: drm/i915/gt: Drop stale commentary for timeline density We no longer allocate a contiguous set of timeline ids for all engines upon creation, so we no longer should assume that the timelines are densely allocated within a context. Hopefully, the set of fences used within a workload are still dense enough for us to take advantage of the compressed radix tree used for the syncmap. Signed-off-by: Chris Wilson Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190625233349.32371-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_timeline.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 478258274986..3bbb632cb535 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -210,16 +210,6 @@ int intel_timeline_init(struct intel_timeline *timeline, { void *vaddr; - /* - * Ideally we want a set of engines on a single leaf as we expect - * to mostly be tracking synchronisation between engines. It is not - * a huge issue if this is not the case, but we may want to mitigate - * any page crossing penalties if they become an issue. - * - * Called during early_init before we know how many engines there are. - */ - BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); - timeline->gt = gt; timeline->pin_count = 0; timeline->has_initial_breadcrumb = !hwsp; -- cgit v1.2.3 From f0ca820cc0067ce4debd9893650e5c188c83b941 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 00:33:49 +0100 Subject: drm/i915/gt: Always call kref_init for the timeline Always initialise the refcount, even for the embedded timelines inside mock devices. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190625233349.32371-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_timeline.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 3bbb632cb535..6daa9eb59e19 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -210,8 +210,11 @@ int intel_timeline_init(struct intel_timeline *timeline, { void *vaddr; + kref_init(&timeline->kref); + timeline->gt = gt; timeline->pin_count = 0; + timeline->has_initial_breadcrumb = !hwsp; timeline->hwsp_cacheline = NULL; @@ -357,8 +360,6 @@ intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp) return ERR_PTR(err); } - kref_init(&timeline->kref); - return timeline; } -- cgit v1.2.3 From cba17e5d647b4dabb3c9d8bc1bf2636f1a3a9687 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 25 Jun 2019 20:48:59 +0100 Subject: drm/i915/gt: Add some debug tracing for context pinning Add the context pin/unpin events to the trace for post-mortem debugging. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190625194859.28005-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context.c | 10 ++++++++++ drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 3 +++ 2 files changed, 13 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 938dd032b820..1110fc8f657a 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -59,6 +59,10 @@ int __intel_context_do_pin(struct intel_context *ce) if (err) goto err; + GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n", + ce->engine->name, ce->ring->timeline->fence_context, + ce->ring->head, ce->ring->tail); + i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */ smp_mb__before_atomic(); /* flush pin before it is visible */ @@ -85,6 +89,9 @@ void intel_context_unpin(struct intel_context *ce) mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); if (likely(atomic_dec_and_test(&ce->pin_count))) { + GEM_TRACE("%s context:%llx retire\n", + ce->engine->name, ce->ring->timeline->fence_context); + ce->ops->unpin(ce); i915_gem_context_put(ce->gem_context); @@ -127,6 +134,9 @@ static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); + GEM_TRACE("%s context:%llx retire\n", + ce->engine->name, ce->ring->timeline->fence_context); + if (ce->state) __context_unpin_state(ce->state); diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index f094406dcc56..81f9b0422e6a 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1197,6 +1197,7 @@ int intel_ring_pin(struct intel_ring *ring) GEM_BUG_ON(ring->vaddr); ring->vaddr = addr; + GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context); return 0; err_ring: @@ -1223,6 +1224,8 @@ void intel_ring_unpin(struct intel_ring *ring) if (!atomic_dec_and_test(&ring->pin_count)) return; + GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context); + /* Discard any unused bytes beyond that submitted to hw. */ intel_ring_reset(ring, ring->tail); -- cgit v1.2.3 From 5ccf2027bb9362f8c4909e980e3856451a55dcb2 Mon Sep 17 00:00:00 2001 From: Lee Shawn C Date: Thu, 20 Jun 2019 08:44:16 -0700 Subject: drm/i915: Check backlight type while doing eDP backlight initializaiton MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If LFP backlight type setting from VBT was "VESA eDP AUX Interface". Driver should check panel capability and try to initialize aux backlight. No matter i915_modparams.enable_dpcd_backlight was enabled or not. v2: access dev_priv->vbt.backlight.type directly and remove unused function. v3: 1. Modify i915.enable_dpcd_backlight type from bool to int and give default value as 0 (disable). 2. Add a judgement to check LFP backlight type was aux interface or not. Cc: Ville Syrjälä Cc: Jani Nikula Cc: Jose Roberto de Souza Cc: Cooper Chiou Signed-off-by: Lee Shawn C Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1561045456-12171-1-git-send-email-shawn.c.lee@intel.com --- drivers/gpu/drm/i915/display/intel_bios.h | 1 + drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c | 5 ++++- drivers/gpu/drm/i915/i915_params.c | 5 +++-- drivers/gpu/drm/i915/i915_params.h | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 4e42cfaf61a7..0b7be6389a07 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -42,6 +42,7 @@ enum intel_backlight_type { INTEL_BACKLIGHT_DISPLAY_DDI, INTEL_BACKLIGHT_DSI_DCS, INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE, + INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE, }; struct edp_power_seq { diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 7ded95a334db..6b0b73479fb8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -264,8 +264,11 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; + struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev); - if (!i915_modparams.enable_dpcd_backlight) + if (i915_modparams.enable_dpcd_backlight == 0 || + (i915_modparams.enable_dpcd_backlight == -1 && + dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)) return -ENODEV; if (!intel_dp_aux_display_control_capable(intel_connector)) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 5b07766a1c26..296452f9efe4 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -169,8 +169,9 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); #endif -i915_param_named(enable_dpcd_backlight, bool, 0600, - "Enable support for DPCD backlight control (default:false)"); +i915_param_named(enable_dpcd_backlight, int, 0600, + "Enable support for DPCD backlight control" + "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)"); #if IS_ENABLED(CONFIG_DRM_I915_GVT) i915_param_named(enable_gvt, bool, 0400, diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index a4770ce46bd2..d29ade3b7de6 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -64,6 +64,7 @@ struct drm_printer; param(int, reset, 2) \ param(unsigned int, inject_load_failure, 0) \ param(int, fastboot, -1) \ + param(int, enable_dpcd_backlight, 0) \ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ @@ -76,7 +77,6 @@ struct drm_printer; param(bool, verbose_state_checks, true) \ param(bool, nuclear_pageflip, false) \ param(bool, enable_dp_mst, true) \ - param(bool, enable_dpcd_backlight, false) \ param(bool, enable_gvt, false) #define MEMBER(T, member, ...) T member; -- cgit v1.2.3 From d8474795665462dc7888711ae4b8888a4496eda8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 14:44:31 +0100 Subject: drm/i915/selftests: Serialise nop reset with retirement In order for the reset count to be accurate across our selftest, we need to prevent the background retire worker from modifying our expected state. To preserve the intent of symmetry, we apply this to both i915_reset and i915_reset_engine, even though it strictly only affects i915_reset_engine currently. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190626134433.6318-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 3ceb397c8645..0e0b6c572ae9 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -398,6 +398,7 @@ static int igt_reset_nop(void *arg) count = 0; do { mutex_lock(&i915->drm.struct_mutex); + for_each_engine(engine, i915, id) { int i; @@ -413,11 +414,12 @@ static int igt_reset_nop(void *arg) i915_request_add(rq); } } - mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_lock(i915); i915_reset(i915, ALL_ENGINES, NULL); igt_global_reset_unlock(i915); + + mutex_unlock(&i915->drm.struct_mutex); if (i915_reset_failed(i915)) { err = -EIO; break; @@ -511,9 +513,8 @@ static int igt_reset_nop_engine(void *arg) i915_request_add(rq); } - mutex_unlock(&i915->drm.struct_mutex); - err = i915_reset_engine(engine, NULL); + mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("i915_reset_engine failed\n"); break; -- cgit v1.2.3 From 1e5deb2632654c16cd4d2b69cbc24626d9463361 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 14:44:32 +0100 Subject: drm/i915/selftests: Drop manual request wakerefs around hangcheck We no longer need to manually acquire a wakeref for request emission, so drop the redundant wakerefs, letting us test our wakeref handling more precisely. References: 79ffac8599c4 ("drm/i915: Invert the GEM wakeref hierarchy") Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190626134433.6318-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 7 ------- drivers/gpu/drm/i915/gt/selftest_reset.c | 4 ++-- 2 files changed, 2 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 0e0b6c572ae9..cf592a049a71 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -373,7 +373,6 @@ static int igt_reset_nop(void *arg) struct i915_gem_context *ctx; unsigned int reset_count, count; enum intel_engine_id id; - intel_wakeref_t wakeref; struct drm_file *file; IGT_TIMEOUT(end_time); int err = 0; @@ -393,7 +392,6 @@ static int igt_reset_nop(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); reset_count = i915_reset_count(&i915->gpu_error); count = 0; do { @@ -442,8 +440,6 @@ static int igt_reset_nop(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - out: mock_file_free(i915, file); if (i915_reset_failed(i915)) @@ -457,7 +453,6 @@ static int igt_reset_nop_engine(void *arg) struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; - intel_wakeref_t wakeref; struct drm_file *file; int err = 0; @@ -479,7 +474,6 @@ static int igt_reset_nop_engine(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_engine(engine, i915, id) { unsigned int reset_count, reset_engine_count; unsigned int count; @@ -549,7 +543,6 @@ static int igt_reset_nop_engine(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: mock_file_free(i915, file); if (i915_reset_failed(i915)) diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 89da9e7cc1ba..64c2c8ab64ec 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -63,8 +63,8 @@ static int igt_atomic_reset(void *arg) /* Check that the resets are usable from atomic context */ + intel_gt_pm_get(&i915->gt); igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); /* Flush any requests before we get started and check basics */ if (!igt_force_reset(i915)) @@ -89,8 +89,8 @@ static int igt_atomic_reset(void *arg) igt_force_reset(i915); unlock: - mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); + intel_gt_pm_put(&i915->gt); return err; } -- cgit v1.2.3 From faaa2902b5a965610e7b9c5e702dd5f6b8970c60 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 14:44:33 +0100 Subject: drm/i915/selftests: Fixup atomic reset checking We require that the intel_gpu_reset() was atomic, not the whole of i915_reset() which is guarded by a mutex. However, we do require that i915_reset_engine() is atomic for use from within the submission tasklet. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190626134433.6318-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_reset.c | 65 +++++++++++++++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 64c2c8ab64ec..641cf3aee8d5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -73,11 +73,13 @@ static int igt_atomic_reset(void *arg) for (p = igt_atomic_phases; p->name; p++) { GEM_TRACE("intel_gpu_reset under %s\n", p->name); - p->critical_section_begin(); reset_prepare(i915); + p->critical_section_begin(); + err = intel_gpu_reset(i915, ALL_ENGINES); - reset_finish(i915); + p->critical_section_end(); + reset_finish(i915); if (err) { pr_err("intel_gpu_reset failed under %s\n", p->name); @@ -95,12 +97,71 @@ unlock: return err; } +static int igt_atomic_engine_reset(void *arg) +{ + struct drm_i915_private *i915 = arg; + const typeof(*igt_atomic_phases) *p; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that the resets are usable from atomic context */ + + if (!intel_has_reset_engine(i915)) + return 0; + + if (USES_GUC_SUBMISSION(i915)) + return 0; + + intel_gt_pm_get(&i915->gt); + igt_global_reset_lock(i915); + + /* Flush any requests before we get started and check basics */ + if (!igt_force_reset(i915)) + goto out_unlock; + + for_each_engine(engine, i915, id) { + tasklet_disable_nosync(&engine->execlists.tasklet); + intel_engine_pm_get(engine); + + for (p = igt_atomic_phases; p->name; p++) { + GEM_TRACE("i915_reset_engine(%s) under %s\n", + engine->name, p->name); + + p->critical_section_begin(); + err = i915_reset_engine(engine, NULL); + p->critical_section_end(); + + if (err) { + pr_err("i915_reset_engine(%s) failed under %s\n", + engine->name, p->name); + break; + } + } + + intel_engine_pm_put(engine); + tasklet_enable(&engine->execlists.tasklet); + if (err) + break; + } + + /* As we poke around the guts, do a full reset before continuing. */ + igt_force_reset(i915); + +out_unlock: + igt_global_reset_unlock(i915); + intel_gt_pm_put(&i915->gt); + + return err; +} + int intel_reset_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_global_reset), /* attempt to recover GPU first */ SUBTEST(igt_wedged_reset), SUBTEST(igt_atomic_reset), + SUBTEST(igt_atomic_engine_reset), }; intel_wakeref_t wakeref; int err = 0; -- cgit v1.2.3 From 4c888e7bd26f58deb27c2e6ddc90000b89ee9393 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 20:08:39 +0300 Subject: drm/i915: Fix various tracepoints for gen2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gen2 doesn't have a frame counter and apparently we no longer provide a fake .get_vblank_counter() hook for it. That means all tracepoints calling that hook will oops. Update the tracepoints to use intel_crtc_get_vblank_counter() which will gracefully fall back to using the software counter. This is actually a better approach since we now get (hopefully accurate) frame numbers in the traces. This also gets rid of the raw driver->get_vblank_counter() calls, which we need to do in order to switch to the per-crtc vblank vfuncs. v2: Deal with new tracepoints v3: Use a distinct variable name for the internal crtc iterator (Chris) Cc: Shawn Guo Cc: Daniel Vetter Fixes: 967dd4841787 ("drm: remove drm_vblank_no_hw_counter assignment from driver code") Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190619170842.20579-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 4 +- drivers/gpu/drm/i915/i915_trace.h | 76 ++++++++++++---------------- 2 files changed, 35 insertions(+), 45 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8592a7d422de..5f30f348b0c0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) /* FIXME: assert CPU port conditions for SNB+ */ } - trace_intel_pipe_enable(dev_priv, pipe); + trace_intel_pipe_enable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - trace_intel_pipe_disable(dev_priv, pipe); + trace_intel_pipe_disable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f4ce643b3bc3..cce426b23a24 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -21,24 +21,22 @@ /* watermark/fifo updates */ TRACE_EVENT(intel_pipe_enable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) __array(u32, scanline, 3) __field(enum pipe, pipe) ), - TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable, ); TRACE_EVENT(intel_pipe_disable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) @@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable, ), TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); ), @@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun, ), TP_fast_assign( + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pipe %c, frame=%u, scanline=%u", @@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun, TP_fast_assign( enum pipe pipe = pch_transcoder; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pch transcoder %c, frame=%u, scanline=%u", @@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr, ), TP_fast_assign( - enum pipe pipe; - for_each_pipe(dev_priv, pipe) { - __entry->frame[pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline[pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + struct intel_crtc *crtc; + for_each_intel_crtc(&dev_priv->drm, crtc) { + __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); + __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); } __entry->old = old; __entry->new = new; @@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; @@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->level = wm->level; __entry->cxsr = wm->cxsr; @@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->sprite0_start = sprite0_start; __entry->sprite1_start = sprite1_start; @@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); @@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), @@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; __entry->max = crtc->debug.max_vbl; -- cgit v1.2.3 From 08fa8fd0faa5716ed180e109a022e2c0f42c2e4a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 20:08:40 +0300 Subject: drm/i915: Switch to per-crtc vblank vfuncs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch from the driver-wide vblank vfuncs to the per-crtc ones so that we don't have so many platform specific vfuncs in the driver struct. We still need to do something about the rest fo the irq vfuncs... v2: s/INTEL_GEN>=3/IS_GEN3/ Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190619170842.20579-3-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_display.c | 104 +++++++++++++++++++++++---- drivers/gpu/drm/i915/i915_irq.c | 93 +++++++++++------------- drivers/gpu/drm/i915/i915_irq.h | 14 ++++ 3 files changed, 143 insertions(+), 68 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 5f30f348b0c0..e55bd75528c1 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13567,7 +13567,7 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) if (!vblank->max_vblank_count) return (u32)drm_crtc_accurate_vblank_count(&crtc->base); - return dev->driver->get_vblank_counter(dev, crtc->pipe); + return crtc->base.funcs->get_vblank_counter(&crtc->base); } static void intel_update_crtc(struct drm_crtc *crtc, @@ -14105,18 +14105,6 @@ static int intel_atomic_commit(struct drm_device *dev, return 0; } -static const struct drm_crtc_funcs intel_crtc_funcs = { - .gamma_set = drm_atomic_helper_legacy_gamma_set, - .set_config = drm_atomic_helper_set_config, - .destroy = intel_crtc_destroy, - .page_flip = drm_atomic_helper_page_flip, - .atomic_duplicate_state = intel_crtc_duplicate_state, - .atomic_destroy_state = intel_crtc_destroy_state, - .set_crc_source = intel_crtc_set_crc_source, - .verify_crc_source = intel_crtc_verify_crc_source, - .get_crc_sources = intel_crtc_get_crc_sources, -}; - struct wait_rps_boost { struct wait_queue_entry wait; @@ -14910,8 +14898,76 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, scaler_state->scaler_id = -1; } +#define INTEL_CRTC_FUNCS \ + .gamma_set = drm_atomic_helper_legacy_gamma_set, \ + .set_config = drm_atomic_helper_set_config, \ + .destroy = intel_crtc_destroy, \ + .page_flip = drm_atomic_helper_page_flip, \ + .atomic_duplicate_state = intel_crtc_duplicate_state, \ + .atomic_destroy_state = intel_crtc_destroy_state, \ + .set_crc_source = intel_crtc_set_crc_source, \ + .verify_crc_source = intel_crtc_verify_crc_source, \ + .get_crc_sources = intel_crtc_get_crc_sources + +static const struct drm_crtc_funcs bdw_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = g4x_get_vblank_counter, + .enable_vblank = bdw_enable_vblank, + .disable_vblank = bdw_disable_vblank, +}; + +static const struct drm_crtc_funcs ilk_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = g4x_get_vblank_counter, + .enable_vblank = ilk_enable_vblank, + .disable_vblank = ilk_disable_vblank, +}; + +static const struct drm_crtc_funcs g4x_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = g4x_get_vblank_counter, + .enable_vblank = i965_enable_vblank, + .disable_vblank = i965_disable_vblank, +}; + +static const struct drm_crtc_funcs i965_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i965_enable_vblank, + .disable_vblank = i965_disable_vblank, +}; + +static const struct drm_crtc_funcs i945gm_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i945gm_enable_vblank, + .disable_vblank = i945gm_disable_vblank, +}; + +static const struct drm_crtc_funcs i915_crtc_funcs = { + INTEL_CRTC_FUNCS, + + .get_vblank_counter = i915_get_vblank_counter, + .enable_vblank = i8xx_enable_vblank, + .disable_vblank = i8xx_disable_vblank, +}; + +static const struct drm_crtc_funcs i8xx_crtc_funcs = { + INTEL_CRTC_FUNCS, + + /* no hw vblank counter */ + .enable_vblank = i8xx_enable_vblank, + .disable_vblank = i8xx_disable_vblank, +}; + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { + const struct drm_crtc_funcs *funcs; struct intel_crtc *intel_crtc; struct intel_crtc_state *crtc_state = NULL; struct intel_plane *primary = NULL; @@ -14955,10 +15011,28 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) } intel_crtc->plane_ids_mask |= BIT(cursor->id); + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv) || + IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv)) + funcs = &g4x_crtc_funcs; + else if (IS_GEN(dev_priv, 4)) + funcs = &i965_crtc_funcs; + else if (IS_I945GM(dev_priv)) + funcs = &i945gm_crtc_funcs; + else if (IS_GEN(dev_priv, 3)) + funcs = &i915_crtc_funcs; + else + funcs = &i8xx_crtc_funcs; + } else { + if (INTEL_GEN(dev_priv) >= 8) + funcs = &bdw_crtc_funcs; + else + funcs = &ilk_crtc_funcs; + } + ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, &primary->base, &cursor->base, - &intel_crtc_funcs, - "pipe %c", pipe_name(pipe)); + funcs, "pipe %c", pipe_name(pipe)); if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b2e27b5b0df9..4fbe8d90950a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -924,11 +924,12 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) /* Called from drm generic code, passed a 'crtc', which * we use as a pipe index */ -static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +u32 i915_get_vblank_counter(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)]; const struct drm_display_mode *mode = &vblank->hwmode; + enum pipe pipe = to_intel_crtc(crtc)->pipe; i915_reg_t high_frame, low_frame; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; unsigned long irqflags; @@ -989,9 +990,10 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; } -static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +u32 g4x_get_vblank_counter(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -3243,9 +3245,10 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ -static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) +int i8xx_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3255,19 +3258,20 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) return 0; } -static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe) +int i945gm_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); if (dev_priv->i945gm_vblank.enabled++ == 0) schedule_work(&dev_priv->i945gm_vblank.work); - return i8xx_enable_vblank(dev, pipe); + return i8xx_enable_vblank(crtc); } -static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) +int i965_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3278,9 +3282,10 @@ static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) return 0; } -static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) +int ilk_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -3293,14 +3298,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) * PSR is active as no frames are generated. */ if (HAS_PSR(dev_priv)) - drm_vblank_restore(dev, pipe); + drm_crtc_vblank_restore(crtc); return 0; } -static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) +int bdw_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3311,7 +3317,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) * PSR is active as no frames are generated, so check only for PSR. */ if (HAS_PSR(dev_priv)) - drm_vblank_restore(dev, pipe); + drm_crtc_vblank_restore(crtc); return 0; } @@ -3319,9 +3325,10 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ -static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) +void i8xx_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3329,19 +3336,20 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe) +void i945gm_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); - i8xx_disable_vblank(dev, pipe); + i8xx_disable_vblank(crtc); if (--dev_priv->i945gm_vblank.enabled == 0) schedule_work(&dev_priv->i945gm_vblank.work); } -static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) +void i965_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3350,9 +3358,10 @@ static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) +void ilk_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; u32 bit = INTEL_GEN(dev_priv) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); @@ -3362,9 +3371,10 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) +void bdw_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -3372,7 +3382,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void i945gm_vblank_work_func(struct work_struct *work) +void i945gm_vblank_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, i945gm_vblank.work); @@ -4805,11 +4815,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 8) rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) - dev->driver->get_vblank_counter = g4x_get_vblank_counter; - else if (INTEL_GEN(dev_priv) >= 3) - dev->driver->get_vblank_counter = i915_get_vblank_counter; - dev->vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on @@ -4839,32 +4844,24 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_preinstall = cherryview_irq_reset; dev->driver->irq_postinstall = cherryview_irq_postinstall; dev->driver->irq_uninstall = cherryview_irq_reset; - dev->driver->enable_vblank = i965_enable_vblank; - dev->driver->disable_vblank = i965_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else if (IS_VALLEYVIEW(dev_priv)) { dev->driver->irq_handler = valleyview_irq_handler; dev->driver->irq_preinstall = valleyview_irq_reset; dev->driver->irq_postinstall = valleyview_irq_postinstall; dev->driver->irq_uninstall = valleyview_irq_reset; - dev->driver->enable_vblank = i965_enable_vblank; - dev->driver->disable_vblank = i965_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else if (INTEL_GEN(dev_priv) >= 11) { dev->driver->irq_handler = gen11_irq_handler; dev->driver->irq_preinstall = gen11_irq_reset; dev->driver->irq_postinstall = gen11_irq_postinstall; dev->driver->irq_uninstall = gen11_irq_reset; - dev->driver->enable_vblank = gen8_enable_vblank; - dev->driver->disable_vblank = gen8_disable_vblank; dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; } else if (INTEL_GEN(dev_priv) >= 8) { dev->driver->irq_handler = gen8_irq_handler; dev->driver->irq_preinstall = gen8_irq_reset; dev->driver->irq_postinstall = gen8_irq_postinstall; dev->driver->irq_uninstall = gen8_irq_reset; - dev->driver->enable_vblank = gen8_enable_vblank; - dev->driver->disable_vblank = gen8_disable_vblank; if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) @@ -4876,8 +4873,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_preinstall = ironlake_irq_reset; dev->driver->irq_postinstall = ironlake_irq_postinstall; dev->driver->irq_uninstall = ironlake_irq_reset; - dev->driver->enable_vblank = ironlake_enable_vblank; - dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { if (IS_GEN(dev_priv, 2)) { @@ -4885,29 +4880,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_reset; - dev->driver->enable_vblank = i8xx_enable_vblank; - dev->driver->disable_vblank = i8xx_disable_vblank; } else if (IS_I945GM(dev_priv)) { dev->driver->irq_preinstall = i915_irq_reset; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_reset; dev->driver->irq_handler = i915_irq_handler; - dev->driver->enable_vblank = i945gm_enable_vblank; - dev->driver->disable_vblank = i945gm_disable_vblank; } else if (IS_GEN(dev_priv, 3)) { dev->driver->irq_preinstall = i915_irq_reset; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_reset; dev->driver->irq_handler = i915_irq_handler; - dev->driver->enable_vblank = i8xx_enable_vblank; - dev->driver->disable_vblank = i8xx_disable_vblank; } else { dev->driver->irq_preinstall = i965_irq_reset; dev->driver->irq_postinstall = i965_irq_postinstall; dev->driver->irq_uninstall = i965_irq_reset; dev->driver->irq_handler = i965_irq_handler; - dev->driver->enable_vblank = i965_enable_vblank; - dev->driver->disable_vblank = i965_disable_vblank; } if (I915_HAS_HOTPLUG(dev_priv)) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index cb25dd213308..ef782e5ab240 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -114,4 +114,18 @@ void gen11_reset_guc_interrupts(struct drm_i915_private *i915); void gen11_enable_guc_interrupts(struct drm_i915_private *i915); void gen11_disable_guc_interrupts(struct drm_i915_private *i915); +u32 i915_get_vblank_counter(struct drm_crtc *crtc); +u32 g4x_get_vblank_counter(struct drm_crtc *crtc); + +int i8xx_enable_vblank(struct drm_crtc *crtc); +int i945gm_enable_vblank(struct drm_crtc *crtc); +int i965_enable_vblank(struct drm_crtc *crtc); +int ilk_enable_vblank(struct drm_crtc *crtc); +int bdw_enable_vblank(struct drm_crtc *crtc); +void i8xx_disable_vblank(struct drm_crtc *crtc); +void i945gm_disable_vblank(struct drm_crtc *crtc); +void i965_disable_vblank(struct drm_crtc *crtc); +void ilk_disable_vblank(struct drm_crtc *crtc); +void bdw_disable_vblank(struct drm_crtc *crtc); + #endif /* __I915_IRQ_H__ */ -- cgit v1.2.3 From b318b82455bd9b2899a61108a6d84d4a2d4b6df8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 20 Jun 2019 13:33:34 +0300 Subject: drm/i915: Nuke drm_driver irq vfuncs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop using the irq vfuncs under drm_driver. That's not going to fly in a mixed gen environment since the structure is shared between all the devices. v2: Allow intel_irq_uninstall() to be called twice due to intel_modeset_cleanup() calling it as well. Toss in a FIXME to remind us that this is not great. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190620103334.15651-1-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 289 ++++++++++++++++++++-------------------- 2 files changed, 149 insertions(+), 142 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f5b7c37c165f..441a68338b9b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -756,7 +756,7 @@ cleanup_gem: cleanup_modeset: intel_modeset_cleanup(dev); cleanup_irq: - drm_irq_uninstall(dev); + intel_irq_uninstall(dev_priv); intel_gmbus_teardown(dev_priv); cleanup_csr: intel_csr_ucode_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4fbe8d90950a..c76c498769d8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2187,8 +2187,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t valleyview_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -2273,8 +2272,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) static irqreturn_t cherryview_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -2693,8 +2691,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, */ static irqreturn_t ironlake_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; @@ -3004,7 +3001,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs) static irqreturn_t gen8_irq_handler(int irq, void *arg) { - struct drm_i915_private *dev_priv = to_i915(arg); + struct drm_i915_private *dev_priv = arg; void __iomem * const regs = dev_priv->uncore.regs; u32 master_ctl; u32 gt_iir[4]; @@ -3203,7 +3200,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) static irqreturn_t gen11_irq_handler(int irq, void *arg) { - struct drm_i915_private * const i915 = to_i915(arg); + struct drm_i915_private * const i915 = arg; void __iomem * const regs = i915->uncore.regs; u32 master_ctl; u32 gu_misc_iir; @@ -3457,10 +3454,8 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) * * This function needs to be called before interrupts are enabled. */ -static void ibx_irq_pre_postinstall(struct drm_device *dev) +static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (HAS_PCH_NOP(dev_priv)) return; @@ -3529,9 +3524,8 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) /* drm_dma.h hooks */ -static void ironlake_irq_reset(struct drm_device *dev) +static void ironlake_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; GEN3_IRQ_RESET(uncore, DE); @@ -3548,10 +3542,8 @@ static void ironlake_irq_reset(struct drm_device *dev) ibx_irq_reset(dev_priv); } -static void valleyview_irq_reset(struct drm_device *dev) +static void valleyview_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - I915_WRITE(VLV_MASTER_IER, 0); POSTING_READ(VLV_MASTER_IER); @@ -3573,9 +3565,8 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) GEN8_IRQ_RESET_NDX(uncore, GT, 3); } -static void gen8_irq_reset(struct drm_device *dev) +static void gen8_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; int pipe; @@ -3618,9 +3609,8 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); } -static void gen11_irq_reset(struct drm_device *dev) +static void gen11_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_uncore *uncore = &dev_priv->uncore; int pipe; @@ -3693,9 +3683,8 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, synchronize_irq(dev_priv->drm.irq); } -static void cherryview_irq_reset(struct drm_device *dev) +static void cherryview_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; I915_WRITE(GEN8_MASTER_IRQ, 0); @@ -3960,9 +3949,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) __bxt_hpd_detection_setup(dev_priv, enabled_irqs); } -static void ibx_irq_postinstall(struct drm_device *dev) +static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 mask; if (HAS_PCH_NOP(dev_priv)) @@ -3985,9 +3973,8 @@ static void ibx_irq_postinstall(struct drm_device *dev) spt_hpd_detection_setup(dev_priv); } -static void gen5_gt_irq_postinstall(struct drm_device *dev) +static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 pm_irqs, gt_irqs; @@ -4024,9 +4011,8 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) } } -static int ironlake_irq_postinstall(struct drm_device *dev) +static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 display_mask, extra_mask; @@ -4053,16 +4039,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev) dev_priv->irq_mask = ~display_mask; - ibx_irq_pre_postinstall(dev); + ibx_irq_pre_postinstall(dev_priv); GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, display_mask | extra_mask); - gen5_gt_irq_postinstall(dev); + gen5_gt_irq_postinstall(dev_priv); ilk_hpd_detection_setup(dev_priv); - ibx_irq_postinstall(dev); + ibx_irq_postinstall(dev_priv); if (IS_IRONLAKE_M(dev_priv)) { /* Enable PCU event interrupts @@ -4074,8 +4060,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); spin_unlock_irq(&dev_priv->irq_lock); } - - return 0; } void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) @@ -4107,11 +4091,9 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) } -static int valleyview_irq_postinstall(struct drm_device *dev) +static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - gen5_gt_irq_postinstall(dev); + gen5_gt_irq_postinstall(dev_priv); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) @@ -4120,8 +4102,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); POSTING_READ(VLV_MASTER_IER); - - return 0; } static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) @@ -4228,22 +4208,18 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) } } -static int gen8_irq_postinstall(struct drm_device *dev) +static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_pre_postinstall(dev); + ibx_irq_pre_postinstall(dev_priv); gen8_gt_irq_postinstall(dev_priv); gen8_de_irq_postinstall(dev_priv); if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_postinstall(dev); + ibx_irq_postinstall(dev_priv); gen8_master_intr_enable(dev_priv->uncore.regs); - - return 0; } static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) @@ -4277,9 +4253,8 @@ static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); } -static void icp_irq_postinstall(struct drm_device *dev) +static void icp_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 mask = SDE_GMBUS_ICP; WARN_ON(I915_READ(SDEIER) != 0); @@ -4292,14 +4267,13 @@ static void icp_irq_postinstall(struct drm_device *dev) icp_hpd_detection_setup(dev_priv); } -static int gen11_irq_postinstall(struct drm_device *dev) +static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_uncore *uncore = &dev_priv->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_postinstall(dev); + icp_irq_postinstall(dev_priv); gen11_gt_irq_postinstall(dev_priv); gen8_de_irq_postinstall(dev_priv); @@ -4310,14 +4284,10 @@ static int gen11_irq_postinstall(struct drm_device *dev) gen11_master_intr_enable(dev_priv->uncore.regs); POSTING_READ(GEN11_GFX_MSTR_IRQ); - - return 0; } -static int cherryview_irq_postinstall(struct drm_device *dev) +static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - gen8_gt_irq_postinstall(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -4327,13 +4297,10 @@ static int cherryview_irq_postinstall(struct drm_device *dev) I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); POSTING_READ(GEN8_MASTER_IRQ); - - return 0; } -static void i8xx_irq_reset(struct drm_device *dev) +static void i8xx_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; i9xx_pipestat_irq_reset(dev_priv); @@ -4341,9 +4308,8 @@ static void i8xx_irq_reset(struct drm_device *dev) GEN2_IRQ_RESET(uncore); } -static int i8xx_irq_postinstall(struct drm_device *dev) +static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u16 enable_mask; @@ -4372,8 +4338,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev) i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); - - return 0; } static void i8xx_error_irq_ack(struct drm_i915_private *i915, @@ -4454,8 +4418,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, static irqreturn_t i8xx_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -4498,9 +4461,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) return ret; } -static void i915_irq_reset(struct drm_device *dev) +static void i915_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; if (I915_HAS_HOTPLUG(dev_priv)) { @@ -4513,9 +4475,8 @@ static void i915_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(uncore, GEN2_); } -static int i915_irq_postinstall(struct drm_device *dev) +static void i915_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; @@ -4553,14 +4514,11 @@ static int i915_irq_postinstall(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); i915_enable_asle_pipestat(dev_priv); - - return 0; } static irqreturn_t i915_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -4611,9 +4569,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) return ret; } -static void i965_irq_reset(struct drm_device *dev) +static void i965_irq_reset(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); @@ -4624,9 +4581,8 @@ static void i965_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(uncore, GEN2_); } -static int i965_irq_postinstall(struct drm_device *dev) +static void i965_irq_postinstall(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; u32 error_mask; @@ -4676,8 +4632,6 @@ static int i965_irq_postinstall(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); i915_enable_asle_pipestat(dev_priv); - - return 0; } static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -4707,8 +4661,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) static irqreturn_t i965_irq_handler(int irq, void *arg) { - struct drm_device *dev = arg; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = arg; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -4839,65 +4792,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; - if (IS_CHERRYVIEW(dev_priv)) { - dev->driver->irq_handler = cherryview_irq_handler; - dev->driver->irq_preinstall = cherryview_irq_reset; - dev->driver->irq_postinstall = cherryview_irq_postinstall; - dev->driver->irq_uninstall = cherryview_irq_reset; - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_VALLEYVIEW(dev_priv)) { - dev->driver->irq_handler = valleyview_irq_handler; - dev->driver->irq_preinstall = valleyview_irq_reset; - dev->driver->irq_postinstall = valleyview_irq_postinstall; - dev->driver->irq_uninstall = valleyview_irq_reset; - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (INTEL_GEN(dev_priv) >= 11) { - dev->driver->irq_handler = gen11_irq_handler; - dev->driver->irq_preinstall = gen11_irq_reset; - dev->driver->irq_postinstall = gen11_irq_postinstall; - dev->driver->irq_uninstall = gen11_irq_reset; - dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; - } else if (INTEL_GEN(dev_priv) >= 8) { - dev->driver->irq_handler = gen8_irq_handler; - dev->driver->irq_preinstall = gen8_irq_reset; - dev->driver->irq_postinstall = gen8_irq_postinstall; - dev->driver->irq_uninstall = gen8_irq_reset; - if (IS_GEN9_LP(dev_priv)) + if (HAS_GMCH(dev_priv)) { + if (I915_HAS_HOTPLUG(dev_priv)) + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } else { + if (INTEL_GEN(dev_priv) >= 11) + dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; + else if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; else dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; - } else if (HAS_PCH_SPLIT(dev_priv)) { - dev->driver->irq_handler = ironlake_irq_handler; - dev->driver->irq_preinstall = ironlake_irq_reset; - dev->driver->irq_postinstall = ironlake_irq_postinstall; - dev->driver->irq_uninstall = ironlake_irq_reset; - dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; - } else { - if (IS_GEN(dev_priv, 2)) { - dev->driver->irq_preinstall = i8xx_irq_reset; - dev->driver->irq_postinstall = i8xx_irq_postinstall; - dev->driver->irq_handler = i8xx_irq_handler; - dev->driver->irq_uninstall = i8xx_irq_reset; - } else if (IS_I945GM(dev_priv)) { - dev->driver->irq_preinstall = i915_irq_reset; - dev->driver->irq_postinstall = i915_irq_postinstall; - dev->driver->irq_uninstall = i915_irq_reset; - dev->driver->irq_handler = i915_irq_handler; - } else if (IS_GEN(dev_priv, 3)) { - dev->driver->irq_preinstall = i915_irq_reset; - dev->driver->irq_postinstall = i915_irq_postinstall; - dev->driver->irq_uninstall = i915_irq_reset; - dev->driver->irq_handler = i915_irq_handler; - } else { - dev->driver->irq_preinstall = i965_irq_reset; - dev->driver->irq_postinstall = i965_irq_postinstall; - dev->driver->irq_uninstall = i965_irq_reset; - dev->driver->irq_handler = i965_irq_handler; - } - if (I915_HAS_HOTPLUG(dev_priv)) - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } } @@ -4918,6 +4824,75 @@ void intel_irq_fini(struct drm_i915_private *i915) kfree(i915->l3_parity.remap_info[i]); } +static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) +{ + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + return cherryview_irq_handler; + else if (IS_VALLEYVIEW(dev_priv)) + return valleyview_irq_handler; + else if (IS_GEN(dev_priv, 4)) + return i965_irq_handler; + else if (IS_GEN(dev_priv, 3)) + return i915_irq_handler; + else + return i8xx_irq_handler; + } else { + if (INTEL_GEN(dev_priv) >= 11) + return gen11_irq_handler; + else if (INTEL_GEN(dev_priv) >= 8) + return gen8_irq_handler; + else + return ironlake_irq_handler; + } +} + +static void intel_irq_reset(struct drm_i915_private *dev_priv) +{ + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + cherryview_irq_reset(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_irq_reset(dev_priv); + else if (IS_GEN(dev_priv, 4)) + i965_irq_reset(dev_priv); + else if (IS_GEN(dev_priv, 3)) + i915_irq_reset(dev_priv); + else + i8xx_irq_reset(dev_priv); + } else { + if (INTEL_GEN(dev_priv) >= 11) + gen11_irq_reset(dev_priv); + else if (INTEL_GEN(dev_priv) >= 8) + gen8_irq_reset(dev_priv); + else + ironlake_irq_reset(dev_priv); + } +} + +static void intel_irq_postinstall(struct drm_i915_private *dev_priv) +{ + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + cherryview_irq_postinstall(dev_priv); + else if (IS_VALLEYVIEW(dev_priv)) + valleyview_irq_postinstall(dev_priv); + else if (IS_GEN(dev_priv, 4)) + i965_irq_postinstall(dev_priv); + else if (IS_GEN(dev_priv, 3)) + i915_irq_postinstall(dev_priv); + else + i8xx_irq_postinstall(dev_priv); + } else { + if (INTEL_GEN(dev_priv) >= 11) + gen11_irq_postinstall(dev_priv); + else if (INTEL_GEN(dev_priv) >= 8) + gen8_irq_postinstall(dev_priv); + else + ironlake_irq_postinstall(dev_priv); + } +} + /** * intel_irq_install - enables the hardware interrupt * @dev_priv: i915 device instance @@ -4931,6 +4906,9 @@ void intel_irq_fini(struct drm_i915_private *i915) */ int intel_irq_install(struct drm_i915_private *dev_priv) { + int irq = dev_priv->drm.pdev->irq; + int ret; + /* * We enable some interrupt sources in our postinstall hooks, so mark * interrupts as enabled _before_ actually enabling them to avoid @@ -4938,7 +4916,20 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ dev_priv->runtime_pm.irqs_enabled = true; - return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); + dev_priv->drm.irq_enabled = true; + + intel_irq_reset(dev_priv); + + ret = request_irq(irq, intel_irq_handler(dev_priv), + IRQF_SHARED, DRIVER_NAME, dev_priv); + if (ret < 0) { + dev_priv->drm.irq_enabled = false; + return ret; + } + + intel_irq_postinstall(dev_priv); + + return ret; } /** @@ -4950,7 +4941,23 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { - drm_irq_uninstall(&dev_priv->drm); + int irq = dev_priv->drm.pdev->irq; + + /* + * FIXME we can get called twice during driver load + * error handling due to intel_modeset_cleanup() + * calling us out of sequence. Would be nice if + * it didn't do that... + */ + if (!dev_priv->drm.irq_enabled) + return; + + dev_priv->drm.irq_enabled = false; + + intel_irq_reset(dev_priv); + + free_irq(irq, dev_priv); + intel_hpd_cancel_work(dev_priv); dev_priv->runtime_pm.irqs_enabled = false; } @@ -4964,7 +4971,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) */ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) { - dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); + intel_irq_reset(dev_priv); dev_priv->runtime_pm.irqs_enabled = false; synchronize_irq(dev_priv->drm.irq); } @@ -4979,6 +4986,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) { dev_priv->runtime_pm.irqs_enabled = true; - dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); - dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); + intel_irq_reset(dev_priv); + intel_irq_postinstall(dev_priv); } -- cgit v1.2.3 From 7d23e59376039d08728444cf05b2db6f7b86ff22 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 20:08:42 +0300 Subject: drm/i915: Initialize drm_driver vblank funcs at compile time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the .get_vblank_timestamp() and .get_scanout_position() initialization to happen at compile time. No point in delaying it since we always assign the same functions. Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190619170842.20579-5-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 3 +++ drivers/gpu/drm/i915/i915_irq.c | 11 ++++------- drivers/gpu/drm/i915/i915_irq.h | 5 +++++ 3 files changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 441a68338b9b..12182d2fc03c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -3232,6 +3232,9 @@ static struct drm_driver driver = { .gem_prime_export = i915_gem_prime_export, .gem_prime_import = i915_gem_prime_import, + .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, + .get_scanout_position = i915_get_crtc_scanoutpos, + .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, .ioctls = i915_ioctls, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index c76c498769d8..1b83d6e2ae69 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1109,10 +1109,10 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) +bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, @@ -4789,9 +4789,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) */ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); - dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; - dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; - if (HAS_GMCH(dev_priv)) { if (I915_HAS_HOTPLUG(dev_priv)) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index ef782e5ab240..5af5654f801d 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -114,6 +114,11 @@ void gen11_reset_guc_interrupts(struct drm_i915_private *i915); void gen11_enable_guc_interrupts(struct drm_i915_private *i915); void gen11_disable_guc_interrupts(struct drm_i915_private *i915); +bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode); + u32 i915_get_vblank_counter(struct drm_crtc *crtc); u32 g4x_get_vblank_counter(struct drm_crtc *crtc); -- cgit v1.2.3 From de5147b8ce6d51f634661d7c531385371485cec6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 16:45:47 +0100 Subject: drm/i915: Add a wakeref getter for iff the wakeref is already active For use in the next patch, we want to acquire a wakeref without having to wake the device up -- i.e. only acquire the engine wakeref if the engine is already active. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_pm.h | 7 ++++++- drivers/gpu/drm/i915/intel_wakeref.h | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index f3f5b031b4a1..7d057cdcd919 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -11,7 +11,6 @@ #include "intel_wakeref.h" struct drm_i915_private; -struct intel_engine_cs; void intel_engine_pm_get(struct intel_engine_cs *engine); void intel_engine_pm_put(struct intel_engine_cs *engine); @@ -22,6 +21,12 @@ intel_engine_pm_is_awake(const struct intel_engine_cs *engine) return intel_wakeref_is_active(&engine->wakeref); } +static inline bool +intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) +{ + return intel_wakeref_get_if_active(&engine->wakeref); +} + void intel_engine_park(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index f74272770a5c..1d6f5986e4e5 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -71,6 +71,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm, return 0; } +/** + * intel_wakeref_get_if_in_use: Acquire the wakeref + * @wf: the wakeref + * + * Acquire a hold on the wakeref, but only if the wakeref is already + * active. + * + * Returns: true if the wakeref was acquired, false otherwise. + */ +static inline bool +intel_wakeref_get_if_active(struct intel_wakeref *wf) +{ + return atomic_inc_not_zero(&wf->count); +} + /** * intel_wakeref_put: Release the wakeref * @i915: the drm_i915_private device -- cgit v1.2.3 From 18398904ca9e3ddd180e2ecd45886e146b1d9d5b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 16:45:48 +0100 Subject: drm/i915: Only recover active engines If we issue a reset to a currently idle engine, leave it idle afterwards. This is useful to excise a linkage between reset and the shrinker. When waking the engine, we need to pin the default context image which we use for overwriting a guilty context -- if the engine is idle we do not need this pinned image! However, this pinning means that waking the engine acquires the FS_RECLAIM, and so may trigger the shrinker. The shrinker itself may need to wait upon the GPU to unbind and object and so may require services of reset; ergo we should avoid the engine wake up path. The danger in skipping the recovery for idle engines is that we leave the engine with no context defined, which may interfere with the operation of the power context on some older platforms. In practice, we should only be resetting an active GPU but it something to look out for on Ironlake (if memory serves). Fixes: 79ffac8599c4 ("drm/i915: Invert the GEM wakeref hierarchy") Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Cc: Tvrtko Ursulin Cc: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_reset.c | 37 +++++++++++++++++++------------- drivers/gpu/drm/i915/gt/selftest_reset.c | 6 ++++-- 2 files changed, 26 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8ce92c51564e..e7cbd9cf85c1 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -678,7 +678,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) * written to the powercontext is undefined and so we may lose * GPU state upon resume, i.e. fail to restart after a reset. */ - intel_engine_pm_get(engine); intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); engine->reset.prepare(engine); } @@ -709,16 +708,21 @@ static void revoke_mmaps(struct drm_i915_private *i915) } } -static void reset_prepare(struct drm_i915_private *i915) +static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915) { struct intel_engine_cs *engine; + intel_engine_mask_t awake = 0; enum intel_engine_id id; - intel_gt_pm_get(&i915->gt); - for_each_engine(engine, i915, id) + for_each_engine(engine, i915, id) { + if (intel_engine_pm_get_if_awake(engine)) + awake |= engine->mask; reset_prepare_engine(engine); + } intel_uc_reset_prepare(i915); + + return awake; } static void gt_revoke(struct drm_i915_private *i915) @@ -752,20 +756,22 @@ static int gt_reset(struct drm_i915_private *i915, static void reset_finish_engine(struct intel_engine_cs *engine) { engine->reset.finish(engine); - intel_engine_pm_put(engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); + + intel_engine_signal_breadcrumbs(engine); } -static void reset_finish(struct drm_i915_private *i915) +static void reset_finish(struct drm_i915_private *i915, + intel_engine_mask_t awake) { struct intel_engine_cs *engine; enum intel_engine_id id; for_each_engine(engine, i915, id) { reset_finish_engine(engine); - intel_engine_signal_breadcrumbs(engine); + if (awake & engine->mask) + intel_engine_pm_put(engine); } - intel_gt_pm_put(&i915->gt); } static void nop_submit_request(struct i915_request *request) @@ -789,6 +795,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; + intel_engine_mask_t awake; enum intel_engine_id id; if (test_bit(I915_WEDGED, &error->flags)) @@ -808,7 +815,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - reset_prepare(i915); + awake = reset_prepare(i915); /* Even if the GPU reset fails, it should still stop the engines */ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) @@ -832,7 +839,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) for_each_engine(engine, i915, id) engine->cancel_requests(engine); - reset_finish(i915); + reset_finish(i915, awake); GEM_TRACE("end\n"); } @@ -964,6 +971,7 @@ void i915_reset(struct drm_i915_private *i915, const char *reason) { struct i915_gpu_error *error = &i915->gpu_error; + intel_engine_mask_t awake; int ret; GEM_TRACE("flags=%lx\n", error->flags); @@ -980,7 +988,7 @@ void i915_reset(struct drm_i915_private *i915, dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); error->reset_count++; - reset_prepare(i915); + awake = reset_prepare(i915); if (!intel_has_gpu_reset(i915)) { if (i915_modparams.reset) @@ -1021,7 +1029,7 @@ void i915_reset(struct drm_i915_private *i915, i915_queue_hangcheck(i915); finish: - reset_finish(i915); + reset_finish(i915, awake); unlock: mutex_unlock(&error->wedge_mutex); return; @@ -1072,7 +1080,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - if (!intel_engine_pm_is_awake(engine)) + if (!intel_engine_pm_get_if_awake(engine)) return 0; reset_prepare_engine(engine); @@ -1107,12 +1115,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) * process to program RING_MODE, HWSP and re-enable submission. */ ret = engine->resume(engine); - if (ret) - goto out; out: intel_engine_cancel_stop_cs(engine); reset_finish_engine(engine); + intel_engine_pm_put(engine); return ret; } diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 641cf3aee8d5..672e32e1ef95 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -71,15 +71,17 @@ static int igt_atomic_reset(void *arg) goto unlock; for (p = igt_atomic_phases; p->name; p++) { + intel_engine_mask_t awake; + GEM_TRACE("intel_gpu_reset under %s\n", p->name); - reset_prepare(i915); + awake = reset_prepare(i915); p->critical_section_begin(); err = intel_gpu_reset(i915, ALL_ENGINES); p->critical_section_end(); - reset_finish(i915); + reset_finish(i915, awake); if (err) { pr_err("intel_gpu_reset failed under %s\n", p->name); -- cgit v1.2.3 From 092be382a2602067766f190a113514d469162456 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 16:45:49 +0100 Subject: drm/i915: Lift intel_engines_resume() to callers Since the reset path wants to recover the engines itself, it only wants to reinitialise the hardware using i915_gem_init_hw(). Pull the call to intel_engines_resume() to the module init/resume path so we can avoid it during reset. Fixes: 79ffac8599c4 ("drm/i915: Invert the GEM wakeref hierarchy") Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Cc: Tvrtko Ursulin Cc: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 7 +-- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 24 ----------- drivers/gpu/drm/i915/gt/intel_engine_pm.h | 2 - drivers/gpu/drm/i915/gt/intel_gt_pm.c | 21 ++++++++- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 21 ++++++++- drivers/gpu/drm/i915/i915_gem.c | 71 +++++++++---------------------- 7 files changed, 65 insertions(+), 83 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 6b730bd4d72f..4d774376f5b8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -254,14 +254,15 @@ void i915_gem_resume(struct drm_i915_private *i915) i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(i915); + if (i915_gem_init_hw(i915)) + goto err_wedged; + /* * As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset * it and start again. */ - intel_gt_resume(&i915->gt); - - if (i915_gem_init_hw(i915)) + if (intel_gt_resume(&i915->gt)) goto err_wedged; intel_uc_resume(i915); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 5253c382034d..84e432abe8e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) { intel_wakeref_init(&engine->wakeref); } - -int intel_engines_resume(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = 0; - - intel_gt_pm_get(&i915->gt); - for_each_engine(engine, i915, id) { - intel_engine_pm_get(engine); - engine->serial++; /* kernel context lost */ - err = engine->resume(engine); - intel_engine_pm_put(engine); - if (err) { - dev_err(i915->drm.dev, - "Failed to restart %s (%d)\n", - engine->name, err); - break; - } - } - intel_gt_pm_put(&i915->gt); - - return err; -} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index 7d057cdcd919..015ac72d7ad0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -31,6 +31,4 @@ void intel_engine_park(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine); -int intel_engines_resume(struct drm_i915_private *i915); - #endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index ec6b69d014b6..36ba80e6a0b7 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -5,6 +5,7 @@ */ #include "i915_drv.h" +#include "intel_engine_pm.h" #include "intel_gt_pm.h" #include "intel_pm.h" #include "intel_wakeref.h" @@ -122,10 +123,11 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) intel_engine_reset(engine, false); } -void intel_gt_resume(struct intel_gt *gt) +int intel_gt_resume(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; + int err = 0; /* * After resume, we may need to poke into the pinned kernel @@ -133,9 +135,12 @@ void intel_gt_resume(struct intel_gt *gt) * Only the kernel contexts should remain pinned over suspend, * allowing us to fixup the user contexts on their first pin. */ + intel_gt_pm_get(gt); for_each_engine(engine, gt->i915, id) { struct intel_context *ce; + intel_engine_pm_get(engine); + ce = engine->kernel_context; if (ce) ce->ops->reset(ce); @@ -143,5 +148,19 @@ void intel_gt_resume(struct intel_gt *gt) ce = engine->preempt_context; if (ce) ce->ops->reset(ce); + + engine->serial++; /* kernel context lost */ + err = engine->resume(engine); + + intel_engine_pm_put(engine); + if (err) { + dev_err(gt->i915->drm.dev, + "Failed to restart %s (%d)\n", + engine->name, err); + break; + } } + intel_gt_pm_put(gt); + + return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 4dbb92cf58d7..ba960e1fc209 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -22,6 +22,6 @@ void intel_gt_pm_put(struct intel_gt *gt); void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_sanitize(struct intel_gt *gt, bool force); -void intel_gt_resume(struct intel_gt *gt); +int intel_gt_resume(struct intel_gt *gt); #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index e7cbd9cf85c1..adfdb908587f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -949,6 +949,21 @@ static int do_reset(struct drm_i915_private *i915, return gt_reset(i915, stalled_mask); } +static int resume(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int ret; + + for_each_engine(engine, i915, id) { + ret = engine->resume(engine); + if (ret) + return ret; + } + + return 0; +} + /** * i915_reset - reset chip after a hang * @i915: #drm_i915_private to reset @@ -1023,9 +1038,13 @@ void i915_reset(struct drm_i915_private *i915, if (ret) { DRM_ERROR("Failed to initialise HW following reset (%d)\n", ret); - goto error; + goto taint; } + ret = resume(i915); + if (ret) + goto taint; + i915_queue_hangcheck(i915); finish: diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index deecbe128e5b..b7f290b77f8f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,7 +46,6 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gemfs.h" -#include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" @@ -1192,12 +1191,17 @@ static void init_unused_rings(struct intel_gt *gt) } } -static int init_hw(struct intel_gt *gt) +int i915_gem_init_hw(struct drm_i915_private *i915) { - struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; + struct intel_uncore *uncore = &i915->uncore; + struct intel_gt *gt = &i915->gt; int ret; + BUG_ON(!i915->kernel_context); + ret = i915_terminally_wedged(i915); + if (ret) + return ret; + gt->last_init_time = ktime_get(); /* Double layer security blanket, see i915_gem_init() */ @@ -1248,51 +1252,10 @@ static int init_hw(struct intel_gt *gt) intel_mocs_init_l3cc_table(gt); - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - - return 0; - -out: - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - - return ret; -} - -int i915_gem_init_hw(struct drm_i915_private *i915) -{ - struct intel_uncore *uncore = &i915->uncore; - int ret; - - BUG_ON(!i915->kernel_context); - ret = i915_terminally_wedged(i915); - if (ret) - return ret; - - /* Double layer security blanket, see i915_gem_init() */ - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - ret = init_hw(&i915->gt); - if (ret) - goto err_init; - - /* Only when the HW is re-initialised, can we replay the requests */ - ret = intel_engines_resume(i915); - if (ret) - goto err_engines; - - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - intel_engines_set_scheduler_caps(i915); - return 0; - -err_engines: - intel_uc_fini_hw(i915); -err_init: +out: intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - - intel_engines_set_scheduler_caps(i915); - return ret; } @@ -1524,6 +1487,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) goto err_uc_init; + /* Only when the HW is re-initialised, can we replay the requests */ + ret = intel_gt_resume(&dev_priv->gt); + if (ret) + goto err_init_hw; + /* * Despite its name intel_init_clock_gating applies both display * clock gating workarounds; GT mmio workarounds and the occasional @@ -1537,20 +1505,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ret = intel_engines_verify_workarounds(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; ret = __intel_engines_record_defaults(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; if (i915_inject_load_failure()) { ret = -ENODEV; - goto err_init_hw; + goto err_gt; } if (i915_inject_load_failure()) { ret = -EIO; - goto err_init_hw; + goto err_gt; } intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); @@ -1564,7 +1532,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * HW as irrevisibly wedged, but keep enough state around that the * driver doesn't explode during runtime. */ -err_init_hw: +err_gt: mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_set_wedged(dev_priv); @@ -1574,6 +1542,7 @@ err_init_hw: i915_gem_drain_workqueue(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); +err_init_hw: intel_uc_fini_hw(dev_priv); err_uc_init: intel_uc_fini(dev_priv); -- cgit v1.2.3 From 9c811fce8a443921beaefef812c2caae8c2df427 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 25 Jun 2019 18:40:51 -0700 Subject: drm/i915/icl: Add new supported CD clocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now 180, 172.8 and 192 MHz are supported. 180 and 172.8 MHz CD clocks will only be used when audio is not enabled as state by BSpec and implemented in intel_crtc_compute_min_cdclk(), CD clock must be at least twice of Azalia BCLK and BCLK by default is 96 MHz, it could be set to 48 MHz but we are not reading it. v3: - making icl clock arrays static (Ville) BSpec: 20598 BSpec: 15729 Cc: Clint Taylor Cc: Ville Syrjälä Reviewed-by: Matt Roper Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190626014053.30541-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 8993ab283562..c8ebd31f7c24 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1756,9 +1756,10 @@ sanitize: static int icl_calc_cdclk(int min_cdclk, unsigned int ref) { - int ranges_24[] = { 312000, 552000, 648000 }; - int ranges_19_38[] = { 307200, 556800, 652800 }; - int *ranges; + static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 }; + static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 }; + const int *ranges; + int len, i; switch (ref) { default: @@ -1766,19 +1767,22 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref) /* fall through */ case 24000: ranges = ranges_24; + len = ARRAY_SIZE(ranges_24); break; case 19200: case 38400: ranges = ranges_19_38; + len = ARRAY_SIZE(ranges_19_38); break; } - if (min_cdclk > ranges[1]) - return ranges[2]; - else if (min_cdclk > ranges[0]) - return ranges[1]; - else - return ranges[0]; + for (i = 0; i < len; i++) { + if (min_cdclk <= ranges[i]) + return ranges[i]; + } + + WARN_ON(min_cdclk > ranges[len - 1]); + return ranges[len - 1]; } static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) @@ -1792,16 +1796,24 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) default: MISSING_CASE(cdclk); /* fall through */ + case 172800: case 307200: case 556800: case 652800: WARN_ON(dev_priv->cdclk.hw.ref != 19200 && dev_priv->cdclk.hw.ref != 38400); break; + case 180000: case 312000: case 552000: case 648000: WARN_ON(dev_priv->cdclk.hw.ref != 24000); + break; + case 192000: + WARN_ON(dev_priv->cdclk.hw.ref != 19200 && + dev_priv->cdclk.hw.ref != 38400 && + dev_priv->cdclk.hw.ref != 24000); + break; } ratio = cdclk / (dev_priv->cdclk.hw.ref / 2); -- cgit v1.2.3 From 6e63790efdf75529fda4b50e9561d0832da1cdb5 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 25 Jun 2019 18:40:52 -0700 Subject: drm/i915/ehl: Remove unsupported cd clocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL do not support 648 and 652.8 MHz. v2: - Limiting maximum CD clock by max_cdclk_freq instead of remove it from icl_calc_cdclk()(Ville and Jani) BSpec: 20598 Cc: Clint Taylor Cc: Matt Roper Cc: Ville Syrjälä Cc: Jani Nikula Reviewed-by: Matt Roper Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190626014053.30541-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index c8ebd31f7c24..0dda64482443 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2606,7 +2606,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) */ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 11) { + if (IS_ELKHARTLAKE(dev_priv)) { + if (dev_priv->cdclk.hw.ref == 24000) + dev_priv->max_cdclk_freq = 552000; + else + dev_priv->max_cdclk_freq = 556800; + } else if (INTEL_GEN(dev_priv) >= 11) { if (dev_priv->cdclk.hw.ref == 24000) dev_priv->max_cdclk_freq = 648000; else -- cgit v1.2.3 From 63c9dae71dc53928a82deb32e4651a4a44c7a4fe Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 25 Jun 2019 18:40:53 -0700 Subject: drm/i915/ehl: Add voltage level requirement table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL has it own voltage level requirement depending on cd clock. BSpec: 21809 Cc: Clint Taylor Cc: Matt Roper Cc: Ville Syrjälä Reviewed-by: Matt Roper Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190626014053.30541-3-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 35 ++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 0dda64482443..0b8b8ae3b7fc 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1866,14 +1866,23 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; } -static u8 icl_calc_voltage_level(int cdclk) +static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) { - if (cdclk > 556800) - return 2; - else if (cdclk > 312000) - return 1; - else - return 0; + if (IS_ELKHARTLAKE(dev_priv)) { + if (cdclk > 312000) + return 2; + else if (cdclk > 180000) + return 1; + else + return 0; + } else { + if (cdclk > 556800) + return 2; + else if (cdclk > 312000) + return 1; + else + return 0; + } } static void icl_get_cdclk(struct drm_i915_private *dev_priv, @@ -1924,7 +1933,7 @@ out: * at least what the CDCLK frequency requires. */ cdclk_state->voltage_level = - icl_calc_voltage_level(cdclk_state->cdclk); + icl_calc_voltage_level(dev_priv, cdclk_state->cdclk); } static void icl_init_cdclk(struct drm_i915_private *dev_priv) @@ -1959,7 +1968,8 @@ sanitize: sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv, sanitized_state.cdclk); sanitized_state.voltage_level = - icl_calc_voltage_level(sanitized_state.cdclk); + icl_calc_voltage_level(dev_priv, + sanitized_state.cdclk); icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } @@ -1970,7 +1980,8 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = cdclk_state.bypass; cdclk_state.vco = 0; - cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk); + cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv, + cdclk_state.cdclk); icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } @@ -2561,7 +2572,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; state->cdclk.logical.voltage_level = - max(icl_calc_voltage_level(cdclk), + max(icl_calc_voltage_level(dev_priv, cdclk), cnl_compute_min_voltage_level(state)); if (!state->active_crtcs) { @@ -2571,7 +2582,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; state->cdclk.actual.voltage_level = - icl_calc_voltage_level(cdclk); + icl_calc_voltage_level(dev_priv, cdclk); } else { state->cdclk.actual = state->cdclk.logical; } -- cgit v1.2.3 From 5ed7a0cf339463d29163bdb828913d9a3f8c8c9e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Wed, 26 Jun 2019 12:38:26 +0000 Subject: drm/i915: Move OA files to separate folder OA files look to be auto-generated so we can keep them all in dedicated subdirectory. Signed-off-by: Michal Wajdeczko Cc: Lionel Landwerlin Cc: Chris Wilson Cc: Jani Nikula Acked-by: Chris Wilson Acked-by: Lionel Landwerlin Acked-by: Jani Nikula Reviewed-by: Umesh Nerlige Ramappa Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190626123826.39760-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/Makefile | 36 +++++----- drivers/gpu/drm/i915/i915_oa_bdw.c | 91 ----------------------- drivers/gpu/drm/i915/i915_oa_bdw.h | 15 ---- drivers/gpu/drm/i915/i915_oa_bxt.c | 89 ----------------------- drivers/gpu/drm/i915/i915_oa_bxt.h | 15 ---- drivers/gpu/drm/i915/i915_oa_cflgt2.c | 90 ----------------------- drivers/gpu/drm/i915/i915_oa_cflgt2.h | 15 ---- drivers/gpu/drm/i915/i915_oa_cflgt3.c | 90 ----------------------- drivers/gpu/drm/i915/i915_oa_cflgt3.h | 15 ---- drivers/gpu/drm/i915/i915_oa_chv.c | 90 ----------------------- drivers/gpu/drm/i915/i915_oa_chv.h | 15 ---- drivers/gpu/drm/i915/i915_oa_cnl.c | 102 -------------------------- drivers/gpu/drm/i915/i915_oa_cnl.h | 15 ---- drivers/gpu/drm/i915/i915_oa_glk.c | 89 ----------------------- drivers/gpu/drm/i915/i915_oa_glk.h | 15 ---- drivers/gpu/drm/i915/i915_oa_hsw.c | 119 ------------------------------- drivers/gpu/drm/i915/i915_oa_hsw.h | 15 ---- drivers/gpu/drm/i915/i915_oa_icl.c | 99 ------------------------- drivers/gpu/drm/i915/i915_oa_icl.h | 15 ---- drivers/gpu/drm/i915/i915_oa_kblgt2.c | 90 ----------------------- drivers/gpu/drm/i915/i915_oa_kblgt2.h | 15 ---- drivers/gpu/drm/i915/i915_oa_kblgt3.c | 90 ----------------------- drivers/gpu/drm/i915/i915_oa_kblgt3.h | 15 ---- drivers/gpu/drm/i915/i915_oa_sklgt2.c | 89 ----------------------- drivers/gpu/drm/i915/i915_oa_sklgt2.h | 15 ---- drivers/gpu/drm/i915/i915_oa_sklgt3.c | 90 ----------------------- drivers/gpu/drm/i915/i915_oa_sklgt3.h | 15 ---- drivers/gpu/drm/i915/i915_oa_sklgt4.c | 90 ----------------------- drivers/gpu/drm/i915/i915_oa_sklgt4.h | 15 ---- drivers/gpu/drm/i915/i915_perf.c | 28 ++++---- drivers/gpu/drm/i915/oa/Makefile | 0 drivers/gpu/drm/i915/oa/i915_oa_bdw.c | 91 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_bdw.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_bxt.c | 89 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_bxt.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c | 90 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c | 90 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_chv.c | 90 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_chv.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_cnl.c | 102 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_cnl.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_glk.c | 89 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_glk.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_hsw.c | 119 +++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_hsw.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_icl.c | 99 +++++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_icl.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c | 90 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c | 90 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c | 89 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c | 90 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h | 15 ++++ drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c | 90 +++++++++++++++++++++++ drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h | 15 ++++ 59 files changed, 1551 insertions(+), 1549 deletions(-) delete mode 100644 drivers/gpu/drm/i915/i915_oa_bdw.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_bdw.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_bxt.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_bxt.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_cflgt2.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_cflgt2.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_cflgt3.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_cflgt3.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_chv.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_chv.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_cnl.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_cnl.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_glk.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_glk.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_hsw.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_hsw.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_icl.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_icl.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_kblgt2.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_kblgt2.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_kblgt3.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_kblgt3.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_sklgt2.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_sklgt2.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_sklgt3.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_sklgt3.h delete mode 100644 drivers/gpu/drm/i915/i915_oa_sklgt4.c delete mode 100644 drivers/gpu/drm/i915/i915_oa_sklgt4.h create mode 100644 drivers/gpu/drm/i915/oa/Makefile create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_bdw.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_bdw.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_bxt.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_bxt.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_chv.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_chv.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_cnl.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_cnl.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_glk.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_glk.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_hsw.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_hsw.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_icl.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_icl.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c create mode 100644 drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 84ac0fd1b8d0..0fbc3506b5fc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -212,6 +212,25 @@ i915-y += \ display/vlv_dsi.o \ display/vlv_dsi_pll.o +# perf code +obj-y += oa/ +i915-y += \ + oa/i915_oa_hsw.o \ + oa/i915_oa_bdw.o \ + oa/i915_oa_chv.o \ + oa/i915_oa_sklgt2.o \ + oa/i915_oa_sklgt3.o \ + oa/i915_oa_sklgt4.o \ + oa/i915_oa_bxt.o \ + oa/i915_oa_kblgt2.o \ + oa/i915_oa_kblgt3.o \ + oa/i915_oa_glk.o \ + oa/i915_oa_cflgt2.o \ + oa/i915_oa_cflgt3.o \ + oa/i915_oa_cnl.o \ + oa/i915_oa_icl.o +i915-y += i915_perf.o + # Post-mortem debug and GPU hang state capture i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o i915-$(CONFIG_DRM_I915_SELFTEST) += \ @@ -226,23 +245,6 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \ # virtual gpu code i915-y += i915_vgpu.o -# perf code -i915-y += i915_perf.o \ - i915_oa_hsw.o \ - i915_oa_bdw.o \ - i915_oa_chv.o \ - i915_oa_sklgt2.o \ - i915_oa_sklgt3.o \ - i915_oa_sklgt4.o \ - i915_oa_bxt.o \ - i915_oa_kblgt2.o \ - i915_oa_kblgt3.o \ - i915_oa_glk.o \ - i915_oa_cflgt2.o \ - i915_oa_cflgt3.o \ - i915_oa_cnl.o \ - i915_oa_icl.o - ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o include $(src)/gvt/Makefile diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c deleted file mode 100644 index 4acdb94555b7..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_bdw.c +++ /dev/null @@ -1,91 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_bdw.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x000000a0 }, - { _MMIO(0x9888), 0x198b0000 }, - { _MMIO(0x9888), 0x078b0066 }, - { _MMIO(0x9888), 0x118b0000 }, - { _MMIO(0x9888), 0x258b0000 }, - { _MMIO(0x9888), 0x21850008 }, - { _MMIO(0x9888), 0x0d834000 }, - { _MMIO(0x9888), 0x07844000 }, - { _MMIO(0x9888), 0x17804000 }, - { _MMIO(0x9888), 0x21800000 }, - { _MMIO(0x9888), 0x4f800000 }, - { _MMIO(0x9888), 0x41800000 }, - { _MMIO(0x9888), 0x31800000 }, - { _MMIO(0x9840), 0x00000080 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "d6de6f55-e526-4f79-a6a6-d7315c09044e", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h deleted file mode 100644 index 0e667f1a8aa1..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_bdw.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_BDW_H__ -#define __I915_OA_BDW_H__ - -extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c deleted file mode 100644 index a44195c39923..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_bxt.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_bxt.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x19800000 }, - { _MMIO(0x9888), 0x07800063 }, - { _MMIO(0x9888), 0x11800000 }, - { _MMIO(0x9888), 0x23810008 }, - { _MMIO(0x9888), 0x1d950400 }, - { _MMIO(0x9888), 0x0f922000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x55900000 }, - { _MMIO(0x9888), 0x47900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "5ee72f5c-092f-421e-8b70-225f7c3e9612", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h deleted file mode 100644 index 679e92cf4f1d..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_bxt.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_BXT_H__ -#define __I915_OA_BXT_H__ - -extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c deleted file mode 100644 index 7f60d51b8761..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_cflgt2.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "74fb4902-d3d3-4237-9e90-cbdc68d0a446", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h deleted file mode 100644 index 4d6025559bbe..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CFLGT2_H__ -#define __I915_OA_CFLGT2_H__ - -extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c deleted file mode 100644 index a92c38e3a0ce..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_cflgt3.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "577e8e2c-3fa0-4875-8743-3538d585e3b0", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h deleted file mode 100644 index 0697f4077402..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CFLGT3_H__ -#define __I915_OA_CFLGT3_H__ - -extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c deleted file mode 100644 index 71ec889a0114..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_chv.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_chv.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x000000a0 }, - { _MMIO(0x9888), 0x59800000 }, - { _MMIO(0x9888), 0x59800001 }, - { _MMIO(0x9888), 0x338b0000 }, - { _MMIO(0x9888), 0x258b0066 }, - { _MMIO(0x9888), 0x058b0000 }, - { _MMIO(0x9888), 0x038b0000 }, - { _MMIO(0x9888), 0x03844000 }, - { _MMIO(0x9888), 0x47800080 }, - { _MMIO(0x9888), 0x57800000 }, - { _MMIO(0x1823a4), 0x00000000 }, - { _MMIO(0x9888), 0x59800000 }, - { _MMIO(0x9840), 0x00000080 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "4a534b07-cba3-414d-8d60-874830e883aa", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h deleted file mode 100644 index 0986eae3135f..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_chv.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CHV_H__ -#define __I915_OA_CHV_H__ - -extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c deleted file mode 100644 index 5c23d883d6c9..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_cnl.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x0000ffff }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x0000ffff }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x0000ffff }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0xd04), 0x00000200 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x17060000 }, - { _MMIO(0x9840), 0x00000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x13034000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x07060066 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x05060000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x0f080040 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x07091000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x0f041000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x1d004000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x35000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x49000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x3d000000 }, - { _MMIO(0x9884), 0x00000007 }, - { _MMIO(0x9888), 0x31000000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "db41edd4-d8e7-4730-ad11-b9a2d6833503", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h deleted file mode 100644 index e830a406aff2..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_cnl.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_CNL_H__ -#define __I915_OA_CNL_H__ - -extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c deleted file mode 100644 index 4bdda66df7d2..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_glk.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_glk.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x19800000 }, - { _MMIO(0x9888), 0x07800063 }, - { _MMIO(0x9888), 0x11800000 }, - { _MMIO(0x9888), 0x23810008 }, - { _MMIO(0x9888), 0x1d950400 }, - { _MMIO(0x9888), 0x0f922000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x55900000 }, - { _MMIO(0x9888), 0x47900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "dd3fd789-e783-4204-8cd0-b671bbccb0cf", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h deleted file mode 100644 index 06dedf991edb..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_glk.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_GLK_H__ -#define __I915_OA_GLK_H__ - -extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c deleted file mode 100644 index cc6526fdd2bd..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_hsw.c +++ /dev/null @@ -1,119 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_hsw.h" - -static const struct i915_oa_reg b_counter_config_render_basic[] = { - { _MMIO(0x2724), 0x00800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2714), 0x00800000 }, - { _MMIO(0x2710), 0x00000000 }, -}; - -static const struct i915_oa_reg flex_eu_config_render_basic[] = { -}; - -static const struct i915_oa_reg mux_config_render_basic[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x253a4), 0x01600000 }, - { _MMIO(0x25440), 0x00100000 }, - { _MMIO(0x25128), 0x00000000 }, - { _MMIO(0x2691c), 0x00000800 }, - { _MMIO(0x26aa0), 0x01500000 }, - { _MMIO(0x26b9c), 0x00006000 }, - { _MMIO(0x2791c), 0x00000800 }, - { _MMIO(0x27aa0), 0x01500000 }, - { _MMIO(0x27b9c), 0x00006000 }, - { _MMIO(0x2641c), 0x00000400 }, - { _MMIO(0x25380), 0x00000010 }, - { _MMIO(0x2538c), 0x00000000 }, - { _MMIO(0x25384), 0x0800aaaa }, - { _MMIO(0x25400), 0x00000004 }, - { _MMIO(0x2540c), 0x06029000 }, - { _MMIO(0x25410), 0x00000002 }, - { _MMIO(0x25404), 0x5c30ffff }, - { _MMIO(0x25100), 0x00000016 }, - { _MMIO(0x25110), 0x00000400 }, - { _MMIO(0x25104), 0x00000000 }, - { _MMIO(0x26804), 0x00001211 }, - { _MMIO(0x26884), 0x00000100 }, - { _MMIO(0x26900), 0x00000002 }, - { _MMIO(0x26908), 0x00700000 }, - { _MMIO(0x26904), 0x00000000 }, - { _MMIO(0x26984), 0x00001022 }, - { _MMIO(0x26a04), 0x00000011 }, - { _MMIO(0x26a80), 0x00000006 }, - { _MMIO(0x26a88), 0x00000c02 }, - { _MMIO(0x26a84), 0x00000000 }, - { _MMIO(0x26b04), 0x00001000 }, - { _MMIO(0x26b80), 0x00000002 }, - { _MMIO(0x26b8c), 0x00000007 }, - { _MMIO(0x26b84), 0x00000000 }, - { _MMIO(0x27804), 0x00004844 }, - { _MMIO(0x27884), 0x00000400 }, - { _MMIO(0x27900), 0x00000002 }, - { _MMIO(0x27908), 0x0e000000 }, - { _MMIO(0x27904), 0x00000000 }, - { _MMIO(0x27984), 0x00004088 }, - { _MMIO(0x27a04), 0x00000044 }, - { _MMIO(0x27a80), 0x00000006 }, - { _MMIO(0x27a88), 0x00018040 }, - { _MMIO(0x27a84), 0x00000000 }, - { _MMIO(0x27b04), 0x00004000 }, - { _MMIO(0x27b80), 0x00000002 }, - { _MMIO(0x27b8c), 0x000000e0 }, - { _MMIO(0x27b84), 0x00000000 }, - { _MMIO(0x26104), 0x00002222 }, - { _MMIO(0x26184), 0x0c006666 }, - { _MMIO(0x26284), 0x04000000 }, - { _MMIO(0x26304), 0x04000000 }, - { _MMIO(0x26400), 0x00000002 }, - { _MMIO(0x26410), 0x000000a0 }, - { _MMIO(0x26404), 0x00000000 }, - { _MMIO(0x25420), 0x04108020 }, - { _MMIO(0x25424), 0x1284a420 }, - { _MMIO(0x2541c), 0x00000000 }, - { _MMIO(0x25428), 0x00042049 }, -}; - -static ssize_t -show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "403d8832-1a27-4aa6-a64e-f5389ce7b212", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h deleted file mode 100644 index 3d0c870cd0bd..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_hsw.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_HSW_H__ -#define __I915_OA_HSW_H__ - -extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c deleted file mode 100644 index baa51427a543..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_icl.c +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_icl.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x0000ffff }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x0000ffff }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x0000ffff }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0xd04), 0x00000200 }, - { _MMIO(0x9840), 0x00000000 }, - { _MMIO(0x9884), 0x00000000 }, - { _MMIO(0x9888), 0x10060000 }, - { _MMIO(0x9888), 0x22060000 }, - { _MMIO(0x9888), 0x16060000 }, - { _MMIO(0x9888), 0x24060000 }, - { _MMIO(0x9888), 0x18060000 }, - { _MMIO(0x9888), 0x1a060000 }, - { _MMIO(0x9888), 0x12060000 }, - { _MMIO(0x9888), 0x14060000 }, - { _MMIO(0x9888), 0x10060000 }, - { _MMIO(0x9888), 0x22060000 }, - { _MMIO(0x9884), 0x00000003 }, - { _MMIO(0x9888), 0x16130000 }, - { _MMIO(0x9888), 0x24000001 }, - { _MMIO(0x9888), 0x0e130056 }, - { _MMIO(0x9888), 0x10130000 }, - { _MMIO(0x9888), 0x1a130000 }, - { _MMIO(0x9888), 0x541f0001 }, - { _MMIO(0x9888), 0x181f0000 }, - { _MMIO(0x9888), 0x4c1f0000 }, - { _MMIO(0x9888), 0x301f0000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "a291665e-244b-4b76-9b9a-01de9d3c8068", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h deleted file mode 100644 index 24eaa97d61ba..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_icl.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_ICL_H__ -#define __I915_OA_ICL_H__ - -extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c deleted file mode 100644 index 168e49ab0d4d..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_kblgt2.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "baa3c7e4-52b6-4b85-801e-465a94b746dd", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h deleted file mode 100644 index a55398a904de..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_KBLGT2_H__ -#define __I915_OA_KBLGT2_H__ - -extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c deleted file mode 100644 index 6ffa553c388e..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_kblgt3.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "f1792f32-6db2-4b50-b4b2-557128f1688d", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h deleted file mode 100644 index 3ddd3483b7cc..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_KBLGT3_H__ -#define __I915_OA_KBLGT3_H__ - -extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c deleted file mode 100644 index 7ce6ee851d43..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_sklgt2.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810016 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "1651949f-0ac0-4cb1-a06f-dafd74a407d1", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h deleted file mode 100644 index be6256037239..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_SKLGT2_H__ -#define __I915_OA_SKLGT2_H__ - -extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c deleted file mode 100644 index 086ca2631e1c..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_sklgt3.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "2b985803-d3c9-4629-8a4f-634bfecba0e8", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h deleted file mode 100644 index 650beb068e56..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_SKLGT3_H__ -#define __I915_OA_SKLGT3_H__ - -extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c deleted file mode 100644 index b291a6eb8a87..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#include - -#include "i915_drv.h" -#include "i915_oa_sklgt4.h" - -static const struct i915_oa_reg b_counter_config_test_oa[] = { - { _MMIO(0x2740), 0x00000000 }, - { _MMIO(0x2744), 0x00800000 }, - { _MMIO(0x2714), 0xf0800000 }, - { _MMIO(0x2710), 0x00000000 }, - { _MMIO(0x2724), 0xf0800000 }, - { _MMIO(0x2720), 0x00000000 }, - { _MMIO(0x2770), 0x00000004 }, - { _MMIO(0x2774), 0x00000000 }, - { _MMIO(0x2778), 0x00000003 }, - { _MMIO(0x277c), 0x00000000 }, - { _MMIO(0x2780), 0x00000007 }, - { _MMIO(0x2784), 0x00000000 }, - { _MMIO(0x2788), 0x00100002 }, - { _MMIO(0x278c), 0x0000fff7 }, - { _MMIO(0x2790), 0x00100002 }, - { _MMIO(0x2794), 0x0000ffcf }, - { _MMIO(0x2798), 0x00100082 }, - { _MMIO(0x279c), 0x0000ffef }, - { _MMIO(0x27a0), 0x001000c2 }, - { _MMIO(0x27a4), 0x0000ffe7 }, - { _MMIO(0x27a8), 0x00100001 }, - { _MMIO(0x27ac), 0x0000ffe7 }, -}; - -static const struct i915_oa_reg flex_eu_config_test_oa[] = { -}; - -static const struct i915_oa_reg mux_config_test_oa[] = { - { _MMIO(0x9840), 0x00000080 }, - { _MMIO(0x9888), 0x11810000 }, - { _MMIO(0x9888), 0x07810013 }, - { _MMIO(0x9888), 0x1f810000 }, - { _MMIO(0x9888), 0x1d810000 }, - { _MMIO(0x9888), 0x1b930040 }, - { _MMIO(0x9888), 0x07e54000 }, - { _MMIO(0x9888), 0x1f908000 }, - { _MMIO(0x9888), 0x11900000 }, - { _MMIO(0x9888), 0x37900000 }, - { _MMIO(0x9888), 0x53900000 }, - { _MMIO(0x9888), 0x45900000 }, - { _MMIO(0x9888), 0x33900000 }, -}; - -static ssize_t -show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "1\n"); -} - -void -i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv) -{ - strlcpy(dev_priv->perf.oa.test_config.uuid, - "882fa433-1f4a-4a67-a962-c741888fe5f5", - sizeof(dev_priv->perf.oa.test_config.uuid)); - dev_priv->perf.oa.test_config.id = 1; - - dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; - dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); - - dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; - dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); - - dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; - dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); - - dev_priv->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5"; - dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; - - dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; - - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; - dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; - dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; -} diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h deleted file mode 100644 index 8dcf849d131e..000000000000 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - * - * Autogenerated file by GPU Top : https://github.com/rib/gputop - * DO NOT EDIT manually! - */ - -#ifndef __I915_OA_SKLGT4_H__ -#define __I915_OA_SKLGT4_H__ - -extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); - -#endif diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d28a5bf80bd7..357e63beb373 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -200,20 +200,20 @@ #include "gt/intel_lrc_reg.h" #include "i915_drv.h" -#include "i915_oa_hsw.h" -#include "i915_oa_bdw.h" -#include "i915_oa_chv.h" -#include "i915_oa_sklgt2.h" -#include "i915_oa_sklgt3.h" -#include "i915_oa_sklgt4.h" -#include "i915_oa_bxt.h" -#include "i915_oa_kblgt2.h" -#include "i915_oa_kblgt3.h" -#include "i915_oa_glk.h" -#include "i915_oa_cflgt2.h" -#include "i915_oa_cflgt3.h" -#include "i915_oa_cnl.h" -#include "i915_oa_icl.h" +#include "oa/i915_oa_hsw.h" +#include "oa/i915_oa_bdw.h" +#include "oa/i915_oa_chv.h" +#include "oa/i915_oa_sklgt2.h" +#include "oa/i915_oa_sklgt3.h" +#include "oa/i915_oa_sklgt4.h" +#include "oa/i915_oa_bxt.h" +#include "oa/i915_oa_kblgt2.h" +#include "oa/i915_oa_kblgt3.h" +#include "oa/i915_oa_glk.h" +#include "oa/i915_oa_cflgt2.h" +#include "oa/i915_oa_cflgt3.h" +#include "oa/i915_oa_cnl.h" +#include "oa/i915_oa_icl.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c new file mode 100644 index 000000000000..4acdb94555b7 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_bdw.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x000000a0 }, + { _MMIO(0x9888), 0x198b0000 }, + { _MMIO(0x9888), 0x078b0066 }, + { _MMIO(0x9888), 0x118b0000 }, + { _MMIO(0x9888), 0x258b0000 }, + { _MMIO(0x9888), 0x21850008 }, + { _MMIO(0x9888), 0x0d834000 }, + { _MMIO(0x9888), 0x07844000 }, + { _MMIO(0x9888), 0x17804000 }, + { _MMIO(0x9888), 0x21800000 }, + { _MMIO(0x9888), 0x4f800000 }, + { _MMIO(0x9888), 0x41800000 }, + { _MMIO(0x9888), 0x31800000 }, + { _MMIO(0x9840), 0x00000080 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "d6de6f55-e526-4f79-a6a6-d7315c09044e", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "d6de6f55-e526-4f79-a6a6-d7315c09044e"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h new file mode 100644 index 000000000000..0e667f1a8aa1 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_BDW_H__ +#define __I915_OA_BDW_H__ + +extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c new file mode 100644 index 000000000000..a44195c39923 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_bxt.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x19800000 }, + { _MMIO(0x9888), 0x07800063 }, + { _MMIO(0x9888), 0x11800000 }, + { _MMIO(0x9888), 0x23810008 }, + { _MMIO(0x9888), 0x1d950400 }, + { _MMIO(0x9888), 0x0f922000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x55900000 }, + { _MMIO(0x9888), 0x47900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "5ee72f5c-092f-421e-8b70-225f7c3e9612", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "5ee72f5c-092f-421e-8b70-225f7c3e9612"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h new file mode 100644 index 000000000000..679e92cf4f1d --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_BXT_H__ +#define __I915_OA_BXT_H__ + +extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c new file mode 100644 index 000000000000..7f60d51b8761 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_cflgt2.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810013 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "74fb4902-d3d3-4237-9e90-cbdc68d0a446", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h new file mode 100644 index 000000000000..4d6025559bbe --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_CFLGT2_H__ +#define __I915_OA_CFLGT2_H__ + +extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c new file mode 100644 index 000000000000..a92c38e3a0ce --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_cflgt3.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810013 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "577e8e2c-3fa0-4875-8743-3538d585e3b0", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "577e8e2c-3fa0-4875-8743-3538d585e3b0"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h new file mode 100644 index 000000000000..0697f4077402 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_CFLGT3_H__ +#define __I915_OA_CFLGT3_H__ + +extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c new file mode 100644 index 000000000000..71ec889a0114 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.c @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_chv.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x000000a0 }, + { _MMIO(0x9888), 0x59800000 }, + { _MMIO(0x9888), 0x59800001 }, + { _MMIO(0x9888), 0x338b0000 }, + { _MMIO(0x9888), 0x258b0066 }, + { _MMIO(0x9888), 0x058b0000 }, + { _MMIO(0x9888), 0x038b0000 }, + { _MMIO(0x9888), 0x03844000 }, + { _MMIO(0x9888), 0x47800080 }, + { _MMIO(0x9888), 0x57800000 }, + { _MMIO(0x1823a4), 0x00000000 }, + { _MMIO(0x9888), 0x59800000 }, + { _MMIO(0x9840), 0x00000080 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "4a534b07-cba3-414d-8d60-874830e883aa", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "4a534b07-cba3-414d-8d60-874830e883aa"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h new file mode 100644 index 000000000000..0986eae3135f --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_CHV_H__ +#define __I915_OA_CHV_H__ + +extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c new file mode 100644 index 000000000000..5c23d883d6c9 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_cnl.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x0000ffff }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x0000ffff }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x0000ffff }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0xd04), 0x00000200 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x17060000 }, + { _MMIO(0x9840), 0x00000000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x13034000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x07060066 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x05060000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x0f080040 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x07091000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x0f041000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x1d004000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x35000000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x49000000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x3d000000 }, + { _MMIO(0x9884), 0x00000007 }, + { _MMIO(0x9888), 0x31000000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "db41edd4-d8e7-4730-ad11-b9a2d6833503", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "db41edd4-d8e7-4730-ad11-b9a2d6833503"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h new file mode 100644 index 000000000000..e830a406aff2 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_CNL_H__ +#define __I915_OA_CNL_H__ + +extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c new file mode 100644 index 000000000000..4bdda66df7d2 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.c @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_glk.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x19800000 }, + { _MMIO(0x9888), 0x07800063 }, + { _MMIO(0x9888), 0x11800000 }, + { _MMIO(0x9888), 0x23810008 }, + { _MMIO(0x9888), 0x1d950400 }, + { _MMIO(0x9888), 0x0f922000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x55900000 }, + { _MMIO(0x9888), 0x47900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "dd3fd789-e783-4204-8cd0-b671bbccb0cf", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "dd3fd789-e783-4204-8cd0-b671bbccb0cf"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h new file mode 100644 index 000000000000..06dedf991edb --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_GLK_H__ +#define __I915_OA_GLK_H__ + +extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c new file mode 100644 index 000000000000..cc6526fdd2bd --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_hsw.h" + +static const struct i915_oa_reg b_counter_config_render_basic[] = { + { _MMIO(0x2724), 0x00800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2714), 0x00800000 }, + { _MMIO(0x2710), 0x00000000 }, +}; + +static const struct i915_oa_reg flex_eu_config_render_basic[] = { +}; + +static const struct i915_oa_reg mux_config_render_basic[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x253a4), 0x01600000 }, + { _MMIO(0x25440), 0x00100000 }, + { _MMIO(0x25128), 0x00000000 }, + { _MMIO(0x2691c), 0x00000800 }, + { _MMIO(0x26aa0), 0x01500000 }, + { _MMIO(0x26b9c), 0x00006000 }, + { _MMIO(0x2791c), 0x00000800 }, + { _MMIO(0x27aa0), 0x01500000 }, + { _MMIO(0x27b9c), 0x00006000 }, + { _MMIO(0x2641c), 0x00000400 }, + { _MMIO(0x25380), 0x00000010 }, + { _MMIO(0x2538c), 0x00000000 }, + { _MMIO(0x25384), 0x0800aaaa }, + { _MMIO(0x25400), 0x00000004 }, + { _MMIO(0x2540c), 0x06029000 }, + { _MMIO(0x25410), 0x00000002 }, + { _MMIO(0x25404), 0x5c30ffff }, + { _MMIO(0x25100), 0x00000016 }, + { _MMIO(0x25110), 0x00000400 }, + { _MMIO(0x25104), 0x00000000 }, + { _MMIO(0x26804), 0x00001211 }, + { _MMIO(0x26884), 0x00000100 }, + { _MMIO(0x26900), 0x00000002 }, + { _MMIO(0x26908), 0x00700000 }, + { _MMIO(0x26904), 0x00000000 }, + { _MMIO(0x26984), 0x00001022 }, + { _MMIO(0x26a04), 0x00000011 }, + { _MMIO(0x26a80), 0x00000006 }, + { _MMIO(0x26a88), 0x00000c02 }, + { _MMIO(0x26a84), 0x00000000 }, + { _MMIO(0x26b04), 0x00001000 }, + { _MMIO(0x26b80), 0x00000002 }, + { _MMIO(0x26b8c), 0x00000007 }, + { _MMIO(0x26b84), 0x00000000 }, + { _MMIO(0x27804), 0x00004844 }, + { _MMIO(0x27884), 0x00000400 }, + { _MMIO(0x27900), 0x00000002 }, + { _MMIO(0x27908), 0x0e000000 }, + { _MMIO(0x27904), 0x00000000 }, + { _MMIO(0x27984), 0x00004088 }, + { _MMIO(0x27a04), 0x00000044 }, + { _MMIO(0x27a80), 0x00000006 }, + { _MMIO(0x27a88), 0x00018040 }, + { _MMIO(0x27a84), 0x00000000 }, + { _MMIO(0x27b04), 0x00004000 }, + { _MMIO(0x27b80), 0x00000002 }, + { _MMIO(0x27b8c), 0x000000e0 }, + { _MMIO(0x27b84), 0x00000000 }, + { _MMIO(0x26104), 0x00002222 }, + { _MMIO(0x26184), 0x0c006666 }, + { _MMIO(0x26284), 0x04000000 }, + { _MMIO(0x26304), 0x04000000 }, + { _MMIO(0x26400), 0x00000002 }, + { _MMIO(0x26410), 0x000000a0 }, + { _MMIO(0x26404), 0x00000000 }, + { _MMIO(0x25420), 0x04108020 }, + { _MMIO(0x25424), 0x1284a420 }, + { _MMIO(0x2541c), 0x00000000 }, + { _MMIO(0x25428), 0x00042049 }, +}; + +static ssize_t +show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "403d8832-1a27-4aa6-a64e-f5389ce7b212", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_render_basic; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_render_basic); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_render_basic; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_render_basic); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_render_basic; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_render_basic); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "403d8832-1a27-4aa6-a64e-f5389ce7b212"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_render_basic_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h new file mode 100644 index 000000000000..3d0c870cd0bd --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_HSW_H__ +#define __I915_OA_HSW_H__ + +extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c new file mode 100644 index 000000000000..baa51427a543 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.c @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_icl.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x0000ffff }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x0000ffff }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x0000ffff }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0xd04), 0x00000200 }, + { _MMIO(0x9840), 0x00000000 }, + { _MMIO(0x9884), 0x00000000 }, + { _MMIO(0x9888), 0x10060000 }, + { _MMIO(0x9888), 0x22060000 }, + { _MMIO(0x9888), 0x16060000 }, + { _MMIO(0x9888), 0x24060000 }, + { _MMIO(0x9888), 0x18060000 }, + { _MMIO(0x9888), 0x1a060000 }, + { _MMIO(0x9888), 0x12060000 }, + { _MMIO(0x9888), 0x14060000 }, + { _MMIO(0x9888), 0x10060000 }, + { _MMIO(0x9888), 0x22060000 }, + { _MMIO(0x9884), 0x00000003 }, + { _MMIO(0x9888), 0x16130000 }, + { _MMIO(0x9888), 0x24000001 }, + { _MMIO(0x9888), 0x0e130056 }, + { _MMIO(0x9888), 0x10130000 }, + { _MMIO(0x9888), 0x1a130000 }, + { _MMIO(0x9888), 0x541f0001 }, + { _MMIO(0x9888), 0x181f0000 }, + { _MMIO(0x9888), 0x4c1f0000 }, + { _MMIO(0x9888), 0x301f0000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "a291665e-244b-4b76-9b9a-01de9d3c8068", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "a291665e-244b-4b76-9b9a-01de9d3c8068"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h new file mode 100644 index 000000000000..24eaa97d61ba --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_ICL_H__ +#define __I915_OA_ICL_H__ + +extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c new file mode 100644 index 000000000000..168e49ab0d4d --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_kblgt2.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810013 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "baa3c7e4-52b6-4b85-801e-465a94b746dd", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "baa3c7e4-52b6-4b85-801e-465a94b746dd"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h new file mode 100644 index 000000000000..a55398a904de --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_KBLGT2_H__ +#define __I915_OA_KBLGT2_H__ + +extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c new file mode 100644 index 000000000000..6ffa553c388e --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_kblgt3.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810013 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "f1792f32-6db2-4b50-b4b2-557128f1688d", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "f1792f32-6db2-4b50-b4b2-557128f1688d"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h new file mode 100644 index 000000000000..3ddd3483b7cc --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_KBLGT3_H__ +#define __I915_OA_KBLGT3_H__ + +extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c new file mode 100644 index 000000000000..7ce6ee851d43 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_sklgt2.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810016 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "1651949f-0ac0-4cb1-a06f-dafd74a407d1", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "1651949f-0ac0-4cb1-a06f-dafd74a407d1"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h new file mode 100644 index 000000000000..be6256037239 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_SKLGT2_H__ +#define __I915_OA_SKLGT2_H__ + +extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c new file mode 100644 index 000000000000..086ca2631e1c --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_sklgt3.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810013 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "2b985803-d3c9-4629-8a4f-634bfecba0e8", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "2b985803-d3c9-4629-8a4f-634bfecba0e8"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h new file mode 100644 index 000000000000..650beb068e56 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_SKLGT3_H__ +#define __I915_OA_SKLGT3_H__ + +extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c new file mode 100644 index 000000000000..b291a6eb8a87 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include + +#include "i915_drv.h" +#include "i915_oa_sklgt4.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810013 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.oa.test_config.uuid, + "882fa433-1f4a-4a67-a962-c741888fe5f5", + sizeof(dev_priv->perf.oa.test_config.uuid)); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "882fa433-1f4a-4a67-a962-c741888fe5f5"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h new file mode 100644 index 000000000000..8dcf849d131e --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_SKLGT4_H__ +#define __I915_OA_SKLGT4_H__ + +extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); + +#endif -- cgit v1.2.3 From 9ef424e58e21b301bc46bbe965deacfac9f72de9 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:36:17 +0300 Subject: drm/i915: prefix header search path with $(srctree)/ Per commit 43068cb7ba1f ("drm: prefix header search paths with $(srctree)/") this is what we should be doing. Follow suit. Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626143618.21800-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0fbc3506b5fc..3bd8f0349a8a 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -35,7 +35,7 @@ subdir-ccflags-y += \ # Extra header tests include $(src)/Makefile.header-test -subdir-ccflags-y += -I$(src) +subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! -- cgit v1.2.3 From 7fcc7ca549d4889025d38c983681bfada3ff1f3a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:36:18 +0300 Subject: drm/i915: add header search path to subdir Makefiles MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the subdirectories we lost the ability to build individual files on the command line, for example: $ make drivers/gpu/drm/i915/display/intel_display.o This was due to the top level directory missing from header search path. Add the header search paths to subdir Makefiles. Note that none of the other options in the top level i915 Makefile are taken into account when building individual files. Usually this is not a concern. Reported-by: Imre Deak Reported-by: Ville Syrjälä Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626143618.21800-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/Makefile | 3 +++ drivers/gpu/drm/i915/gem/Makefile | 6 +++++- drivers/gpu/drm/i915/gt/Makefile | 3 +++ 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile index 1c75b5c9790c..eec6961015a1 100644 --- a/drivers/gpu/drm/i915/display/Makefile +++ b/drivers/gpu/drm/i915/display/Makefile @@ -1,2 +1,5 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. + # Extra header tests include $(src)/Makefile.header-test diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile index 07e7b8b840ea..eec6961015a1 100644 --- a/drivers/gpu/drm/i915/gem/Makefile +++ b/drivers/gpu/drm/i915/gem/Makefile @@ -1 +1,5 @@ -include $(src)/Makefile.header-test # Extra header tests +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. + +# Extra header tests +include $(src)/Makefile.header-test diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile index 1c75b5c9790c..eec6961015a1 100644 --- a/drivers/gpu/drm/i915/gt/Makefile +++ b/drivers/gpu/drm/i915/gt/Makefile @@ -1,2 +1,5 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. + # Extra header tests include $(src)/Makefile.header-test -- cgit v1.2.3 From c75299aea22a92abf5e097830915162655fb46b5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:10 +0300 Subject: drm/i915: make i915_fixed.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-4-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/i915_fixed.h | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index b1c3e642f621..8f617d5f5d59 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -6,6 +6,7 @@ header_test := \ i915_active_types.h \ i915_debugfs.h \ i915_drv.h \ + i915_fixed.h \ i915_gem_gtt.h \ i915_irq.h \ i915_params.h \ diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h index 6621595fe74c..a327094de2bd 100644 --- a/drivers/gpu/drm/i915/i915_fixed.h +++ b/drivers/gpu/drm/i915/i915_fixed.h @@ -6,6 +6,11 @@ #ifndef _I915_FIXED_H_ #define _I915_FIXED_H_ +#include +#include +#include +#include + typedef struct { u32 val; } uint_fixed_16_16_t; -- cgit v1.2.3 From f807d31a2174409c787e53fb6f0609d1b9783e3e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:12 +0300 Subject: drm/i915: make i915_globals.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-6-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/i915_globals.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 8f617d5f5d59..2f802d5a3777 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -8,6 +8,7 @@ header_test := \ i915_drv.h \ i915_fixed.h \ i915_gem_gtt.h \ + i915_globals.h \ i915_irq.h \ i915_params.h \ i915_priolist_types.h \ diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h index 04c1ce107fc0..2d199f411a4a 100644 --- a/drivers/gpu/drm/i915/i915_globals.h +++ b/drivers/gpu/drm/i915/i915_globals.h @@ -7,6 +7,8 @@ #ifndef _I915_GLOBALS_H_ #define _I915_GLOBALS_H_ +#include + typedef void (*i915_global_func_t)(void); struct i915_global { -- cgit v1.2.3 From f8daf6418e3041ac967b45fc066539ba1eec5132 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:13 +0300 Subject: drm/i915: make i915_pvinfo.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-7-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/i915_pvinfo.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 2f802d5a3777..c5f3e4703f72 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -12,6 +12,7 @@ header_test := \ i915_irq.h \ i915_params.h \ i915_priolist_types.h \ + i915_pvinfo.h \ i915_reg.h \ i915_scheduler_types.h \ i915_utils.h \ diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index ca4661e98f79..683e97ac2430 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -24,6 +24,8 @@ #ifndef _I915_PVINFO_H_ #define _I915_PVINFO_H_ +#include + /* The MMIO offset of the shared info between guest and host emulator */ #define VGT_PVINFO_PAGE 0x78000 #define VGT_PVINFO_SIZE 0x1000 -- cgit v1.2.3 From 961ebc9dd1f56e84dc4ef0aec2d05f63aefc898f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:14 +0300 Subject: drm/i915: make i915_vgpu.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-8-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/i915_vgpu.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index c5f3e4703f72..1c2d426ea913 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -16,6 +16,7 @@ header_test := \ i915_reg.h \ i915_scheduler_types.h \ i915_utils.h \ + i915_vgpu.h \ intel_csr.h \ intel_drv.h \ intel_pm.h \ diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 8dd7497eda15..8b3663dad193 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -24,6 +24,7 @@ #ifndef _I915_VGPU_H_ #define _I915_VGPU_H_ +#include "i915_drv.h" #include "i915_pvinfo.h" void i915_detect_vgpu(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From 016c1c8938b0402373b92606fecdaca623ab5006 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:15 +0300 Subject: drm/i915: make intel_guc_ct.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-9-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/intel_guc_ct.h | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 1c2d426ea913..b296be256913 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -19,6 +19,7 @@ header_test := \ i915_vgpu.h \ intel_csr.h \ intel_drv.h \ + intel_guc_ct.h \ intel_pm.h \ intel_runtime_pm.h \ intel_sideband.h \ diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h index 0ec17493d83b..8c1f6d133168 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/intel_guc_ct.h @@ -24,11 +24,14 @@ #ifndef _INTEL_GUC_CT_H_ #define _INTEL_GUC_CT_H_ -struct intel_guc; -struct i915_vma; +#include +#include #include "intel_guc_fwif.h" +struct i915_vma; +struct intel_guc; + /** * DOC: Command Transport (CT). * -- cgit v1.2.3 From 72629d11d56c44ddaae273dd513319a1fb5c03bd Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:16 +0300 Subject: drm/i915: make intel_guc_fwif.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-10-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/intel_guc_fwif.h | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index b296be256913..583fe76f2cdb 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -20,6 +20,7 @@ header_test := \ intel_csr.h \ intel_drv.h \ intel_guc_ct.h \ + intel_guc_fwif.h \ intel_pm.h \ intel_runtime_pm.h \ intel_sideband.h \ diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index f55f3bc8524d..92bd7ffb5b10 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -23,6 +23,10 @@ #ifndef _INTEL_GUC_FWIF_H #define _INTEL_GUC_FWIF_H +#include +#include +#include + #define GUC_CLIENT_PRIORITY_KMD_HIGH 0 #define GUC_CLIENT_PRIORITY_HIGH 1 #define GUC_CLIENT_PRIORITY_KMD_NORMAL 2 -- cgit v1.2.3 From 5e0cca98b4d154e235abbb29f7edda5e6b4f4b60 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:17 +0300 Subject: drm/i915: make intel_guc_reg.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. v2: also include i915_reg.h (Michal) Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-11-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/intel_guc_reg.h | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 583fe76f2cdb..bb6a9234b749 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -21,6 +21,7 @@ header_test := \ intel_drv.h \ intel_guc_ct.h \ intel_guc_fwif.h \ + intel_guc_reg.h \ intel_pm.h \ intel_runtime_pm.h \ intel_sideband.h \ diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index d90b88fadb5e..a5ab7bc5504c 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -24,6 +24,11 @@ #ifndef _INTEL_GUC_REG_H_ #define _INTEL_GUC_REG_H_ +#include +#include + +#include "i915_reg.h" + /* Definitions of GuC H/W registers, bits, etc */ #define GUC_STATUS _MMIO(0xc000) -- cgit v1.2.3 From ba740cfc2f262ff02853c0a76a5b4dd7bc628302 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:18 +0300 Subject: drm/i915: make intel_gvt.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-12-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/intel_gvt.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index bb6a9234b749..4aa588d314a0 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -22,6 +22,7 @@ header_test := \ intel_guc_ct.h \ intel_guc_fwif.h \ intel_guc_reg.h \ + intel_gvt.h \ intel_pm.h \ intel_runtime_pm.h \ intel_sideband.h \ diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 61b246470282..85ce37eb7cd6 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -24,7 +24,7 @@ #ifndef _INTEL_GVT_H_ #define _INTEL_GVT_H_ -struct intel_gvt; +struct drm_i915_private; #ifdef CONFIG_DRM_I915_GVT int intel_gvt_init(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From b40cf94c76732fbb826207a6456b5c1ac9e14d23 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 26 Jun 2019 17:40:19 +0300 Subject: drm/i915: make intel_uc_fw.h self-contained Add the minimal includes/declarations to make the header self-contained, and ensure it stays that way. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190626144020.2155-13-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile.header-test | 1 + drivers/gpu/drm/i915/intel_uc_fw.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 4aa588d314a0..2fd61869bdaa 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -26,6 +26,7 @@ header_test := \ intel_pm.h \ intel_runtime_pm.h \ intel_sideband.h \ + intel_uc_fw.h \ intel_uncore.h \ intel_wakeref.h diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h index ff98f8661d72..24e66469153c 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/intel_uc_fw.h @@ -25,6 +25,8 @@ #ifndef _INTEL_UC_FW_H_ #define _INTEL_UC_FW_H_ +#include + struct drm_printer; struct drm_i915_private; -- cgit v1.2.3 From 7218524d3ea00dda75d2f0989ab4dcb631f00b61 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 23:42:12 +0100 Subject: drm/i915: Make i945gm_vblank_work_func static MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/gpu/drm/i915/i915_irq.c:3382:6: warning: symbol 'i945gm_vblank_work_func' was not declared. Should it be static? CC [M] drivers/gpu/drm/i915/i915_irq.o drivers/gpu/drm/i915/i915_irq.c:3382:6: error: no previous prototype for ‘i945gm_vblank_work_func’ [-Werror=missing-prototypes] void i945gm_vblank_work_func(struct work_struct *work) Jani wrote the idential patch, so for posterity: The static keyword was apparently accidentally removed in commit 08fa8fd0faa5 ("drm/i915: Switch to per-crtc vblank vfuncs"), leading to sparse warning: drivers/gpu/drm/i915/i915_irq.c:3382:6: warning: symbol 'i945gm_vblank_work_func' was not declared. Should it be static? Make the function static again. Meanwhile, the 0-day kbuilder also spotted the mistake. Fixes: 08fa8fd0faa5 ("drm/i915: Switch to per-crtc vblank vfuncs") Reported-by: kbuild test robot Signed-off-by: Chris Wilson Signed-off-by: Jani Nikula Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190626224212.10141-1-chris@chris-wilson.co.uk Link: https://patchwork.freedesktop.org/patch/msgid/20190627091914.30795-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 1b83d6e2ae69..73f0338faf9f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3379,7 +3379,7 @@ void bdw_disable_vblank(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -void i945gm_vblank_work_func(struct work_struct *work) +static void i945gm_vblank_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, i945gm_vblank.work); -- cgit v1.2.3 From 501ec325f1ec4338c0b6d32001a6f4cdfc61a6b2 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 25 Jun 2019 10:54:11 -0700 Subject: drm/i915: rework reading pipe disable fuses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This prepares to have possibly more than 3 pipes. I didn't want to continue the previous approach since the check for "are the disabled pipes the last ones" poses a combinatory explosion. We need that check because in several places of the code we have that assumption. If that ever becomes false in a new HW, other parts of the code would have to change. Now we start by considering we have info->num_pipes enabled and disable each pipe that is marked as disabled. Then it's a simple matter of checking if we have at least one pipe and that all the enabled ones are the first pipes, i.e. there are no holes in the bitmask. Cc: Jose Souza Cc: Ville Syrjälä Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190625175437.14840-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_device_info.c | 36 ++++++++++++-------------------- 1 file changed, 13 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 7135d8dc32a7..e64536e1fd1b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -929,35 +929,25 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) } } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { u32 dfsm = I915_READ(SKL_DFSM); - u8 disabled_mask = 0; - bool invalid; - int num_bits; + u8 enabled_mask = BIT(info->num_pipes) - 1; if (dfsm & SKL_DFSM_PIPE_A_DISABLE) - disabled_mask |= BIT(PIPE_A); + enabled_mask &= ~BIT(PIPE_A); if (dfsm & SKL_DFSM_PIPE_B_DISABLE) - disabled_mask |= BIT(PIPE_B); + enabled_mask &= ~BIT(PIPE_B); if (dfsm & SKL_DFSM_PIPE_C_DISABLE) - disabled_mask |= BIT(PIPE_C); - - num_bits = hweight8(disabled_mask); - - switch (disabled_mask) { - case BIT(PIPE_A): - case BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_B): - case BIT(PIPE_A) | BIT(PIPE_C): - invalid = true; - break; - default: - invalid = false; - } + enabled_mask &= ~BIT(PIPE_C); - if (num_bits > info->num_pipes || invalid) - DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", - disabled_mask); + /* + * At least one pipe should be enabled and if there are + * disabled pipes, they should be the last ones, with no holes + * in the mask. + */ + if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1)) + DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n", + enabled_mask); else - info->num_pipes -= num_bits; + info->num_pipes = hweight8(enabled_mask); } /* Initialize slice/subslice/EU info */ -- cgit v1.2.3 From 69f786aea946230b98e2756769169eb6ddb329cb Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 28 Jun 2019 10:55:12 +0200 Subject: drm/i915: Pass intel_crtc_state to needs_modeset() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In i915 we should use intel_crtc_state as much as possible, pass intel_crtc_state to needs_modeset, before we clean up all other uses of drm_crtc_state. Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628085517.31886-2-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 68 ++++++++++++++-------------- 1 file changed, 34 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e55bd75528c1..90701d86fbeb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -515,9 +515,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, } static bool -needs_modeset(const struct drm_crtc_state *state) +needs_modeset(const struct intel_crtc_state *state) { - return drm_atomic_crtc_needs_modeset(state); + return drm_atomic_crtc_needs_modeset(&state->base); } /* @@ -5796,7 +5796,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s if (!old_crtc_state->ips_enabled) return false; - if (needs_modeset(&new_crtc_state->base)) + if (needs_modeset(new_crtc_state)) return true; /* @@ -5823,7 +5823,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s if (!new_crtc_state->ips_enabled) return false; - if (needs_modeset(&new_crtc_state->base)) + if (needs_modeset(new_crtc_state)) return true; /* @@ -5900,7 +5900,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) intel_fbc_post_update(crtc); if (new_primary_state->visible && - (needs_modeset(&pipe_config->base) || + (needs_modeset(pipe_config) || !old_primary_state->visible)) intel_post_enable_primary(&crtc->base, pipe_config); } @@ -5924,7 +5924,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_primary_state = drm_atomic_get_old_plane_state(old_state, primary); - bool modeset = needs_modeset(&pipe_config->base); + bool modeset = needs_modeset(pipe_config); struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); @@ -5984,7 +5984,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * If we're doing a modeset, we're done. No need to do any pre-vblank * watermark programming here. */ - if (needs_modeset(&pipe_config->base)) + if (needs_modeset(pipe_config)) return; /* @@ -11339,7 +11339,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat struct intel_plane *plane = to_intel_plane(plane_state->plane); struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - bool mode_changed = needs_modeset(crtc_state); + bool mode_changed = needs_modeset(pipe_config); bool was_crtc_enabled = old_crtc_state->base.active; bool is_crtc_enabled = crtc_state->active; bool turn_off, turn_on, visible, was_visible; @@ -11608,7 +11608,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); int ret; - bool mode_changed = needs_modeset(crtc_state); + bool mode_changed = needs_modeset(pipe_config); if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && mode_changed && !crtc_state->active) @@ -13088,7 +13088,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc, struct drm_crtc_state *old_state, struct drm_crtc_state *new_state) { - if (!needs_modeset(new_state) && + if (!needs_modeset(to_intel_crtc_state(new_state)) && !to_intel_crtc_state(new_state)->update_pipe) return; @@ -13180,7 +13180,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state) struct intel_shared_dpll *old_dpll = old_crtc_state->shared_dpll; - if (!needs_modeset(&new_crtc_state->base)) + if (!needs_modeset(new_crtc_state)) continue; new_crtc_state->shared_dpll = NULL; @@ -13210,7 +13210,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) /* look at all crtc's that are going to be enabled in during modeset */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->base.active || - !needs_modeset(&crtc_state->base)) + !needs_modeset(crtc_state)) continue; if (first_crtc_state) { @@ -13235,7 +13235,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) crtc_state->hsw_workaround_pipe = INVALID_PIPE; if (!crtc_state->base.active || - needs_modeset(&crtc_state->base)) + needs_modeset(crtc_state)) continue; /* 2 or more enabled crtcs means no need for w/a */ @@ -13285,7 +13285,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state) if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - if (!crtc_state->active || needs_modeset(crtc_state)) + if (!crtc_state->active || needs_modeset(to_intel_crtc_state(crtc_state))) continue; crtc_state->mode_changed = true; @@ -13362,12 +13362,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state) } if (is_power_of_2(state->active_crtcs)) { - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; pipe = ilog2(state->active_crtcs); - crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base; - crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (crtc_state && needs_modeset(crtc_state)) pipe = INVALID_PIPE; } else { @@ -13478,7 +13478,7 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(&new_crtc_state->base)) + if (!needs_modeset(new_crtc_state)) continue; if (!new_crtc_state->base.enable) { @@ -13492,7 +13492,7 @@ static int intel_atomic_check(struct drm_device *dev, intel_crtc_check_fastset(old_crtc_state, new_crtc_state); - if (needs_modeset(&new_crtc_state->base)) + if (needs_modeset(new_crtc_state)) any_ms = true; } @@ -13527,12 +13527,12 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(&new_crtc_state->base) && + if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) continue; intel_dump_pipe_config(new_crtc_state, state, - needs_modeset(&new_crtc_state->base) ? + needs_modeset(new_crtc_state) ? "[modeset]" : "[fastset]"); } @@ -13579,7 +13579,7 @@ static void intel_update_crtc(struct drm_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); - bool modeset = needs_modeset(new_crtc_state); + bool modeset = needs_modeset(pipe_config); struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(to_intel_atomic_state(state), to_intel_plane(crtc->primary)); @@ -13788,15 +13788,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); intel_crtc = to_intel_crtc(crtc); - if (needs_modeset(new_crtc_state) || - to_intel_crtc_state(new_crtc_state)->update_pipe) { + if (needs_modeset(new_intel_crtc_state) || + new_intel_crtc_state->update_pipe) { put_domains[intel_crtc->pipe] = modeset_get_crtc_power_domains(crtc, new_intel_crtc_state); } - if (!needs_modeset(new_crtc_state)) + if (!needs_modeset(new_intel_crtc_state)) continue; intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state); @@ -13855,7 +13855,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) /* Complete the events for pipes that have now been disabled */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - bool modeset = needs_modeset(new_crtc_state); + bool modeset = needs_modeset(to_intel_crtc_state(new_crtc_state)); /* Complete events for now disable pipes here. */ if (modeset && !new_crtc_state->active && new_crtc_state->event) { @@ -13891,7 +13891,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); if (new_crtc_state->active && - !needs_modeset(new_crtc_state) && + !needs_modeset(to_intel_crtc_state(new_crtc_state)) && (new_intel_crtc_state->base.color_mgmt_changed || new_intel_crtc_state->update_pipe)) intel_color_load_luts(new_intel_crtc_state); @@ -14238,9 +14238,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, int ret; if (old_obj) { - struct drm_crtc_state *crtc_state = - drm_atomic_get_new_crtc_state(new_state->state, - plane->state->crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(intel_state, + to_intel_crtc(plane->state->crtc)); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the @@ -14401,7 +14401,7 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state, intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - bool modeset = needs_modeset(&new_crtc_state->base); + bool modeset = needs_modeset(new_crtc_state); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); @@ -14454,7 +14454,7 @@ static void intel_finish_crtc_commit(struct intel_atomic_state *state, intel_pipe_update_end(new_crtc_state); if (new_crtc_state->update_pipe && - !needs_modeset(&new_crtc_state->base) && + !needs_modeset(new_crtc_state) && old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } @@ -14568,7 +14568,7 @@ intel_legacy_cursor_update(struct drm_plane *plane, * When crtc is inactive or there is a modeset pending, * wait for it to complete in the slowpath */ - if (!crtc_state->base.active || needs_modeset(&crtc_state->base) || + if (!crtc_state->base.active || needs_modeset(crtc_state) || crtc_state->update_pipe) goto slow; -- cgit v1.2.3 From 855e0d684a3ef8cd0c434edfa355279957020626 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 28 Jun 2019 10:55:13 +0200 Subject: drm/i915: Convert most of atomic commit to take more intel state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of passing along drm_crtc_state and drm_atomic_state, pass along more intel_atomic_state and intel_crtc_state. This will make the code more readable by not casting between drm state and intel state all the time. While at it, rename old_state to state, with the get_new/old helpers there is no point in distinguishing between state before and after swapping state any more. (Ville) Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628085517.31886-3-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 429 +++++++++++++-------------- drivers/gpu/drm/i915/i915_drv.h | 6 +- drivers/gpu/drm/i915/intel_pm.c | 11 +- drivers/gpu/drm/i915/intel_pm.h | 4 +- 4 files changed, 209 insertions(+), 241 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 90701d86fbeb..f411f57bcacb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -5877,13 +5877,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *old_state = old_crtc_state->base.state; + struct drm_atomic_state *state = old_crtc_state->base.state; struct intel_crtc_state *pipe_config = - intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state), + intel_atomic_get_new_crtc_state(to_intel_atomic_state(state), crtc); struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); + drm_atomic_get_old_plane_state(state, primary); intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); @@ -5895,7 +5895,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) if (old_primary_state) { struct drm_plane_state *new_primary_state = - drm_atomic_get_new_plane_state(old_state, primary); + drm_atomic_get_new_plane_state(state, primary); intel_fbc_post_update(crtc); @@ -5920,20 +5920,20 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_atomic_state *old_state = old_crtc_state->base.state; + struct drm_atomic_state *state = old_crtc_state->base.state; struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); + drm_atomic_get_old_plane_state(state, primary); bool modeset = needs_modeset(pipe_config); - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); + struct intel_atomic_state *intel_state = + to_intel_atomic_state(state); if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config)) hsw_disable_ips(old_crtc_state); if (old_primary_state) { struct intel_plane_state *new_primary_state = - intel_atomic_get_new_plane_state(old_intel_state, + intel_atomic_get_new_plane_state(intel_state, to_intel_plane(primary)); intel_fbc_pre_update(crtc, pipe_config, new_primary_state); @@ -6002,7 +6002,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, * us to. */ if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, + dev_priv->display.initial_watermarks(intel_state, pipe_config); else if (pipe_config->update_wm_pre) intel_update_watermarks(crtc); @@ -6036,19 +6036,19 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, intel_frontbuffer_flip(dev_priv, fb_bits); } -static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, +static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->pre_pll_enable) @@ -6056,19 +6056,19 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, } } -static void intel_encoders_pre_enable(struct drm_crtc *crtc, +static void intel_encoders_pre_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->pre_enable) @@ -6076,19 +6076,19 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc, } } -static void intel_encoders_enable(struct drm_crtc *crtc, +static void intel_encoders_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->enable) @@ -6097,19 +6097,19 @@ static void intel_encoders_enable(struct drm_crtc *crtc, } } -static void intel_encoders_disable(struct drm_crtc *crtc, +static void intel_encoders_disable(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; - for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); - if (old_conn_state->crtc != crtc) + if (old_conn_state->crtc != &crtc->base) continue; intel_opregion_notify_encoder(encoder, false); @@ -6118,19 +6118,19 @@ static void intel_encoders_disable(struct drm_crtc *crtc, } } -static void intel_encoders_post_disable(struct drm_crtc *crtc, +static void intel_encoders_post_disable(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; - for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); - if (old_conn_state->crtc != crtc) + if (old_conn_state->crtc != &crtc->base) continue; if (encoder->post_disable) @@ -6138,19 +6138,19 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc, } } -static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, +static void intel_encoders_post_pll_disable(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *old_conn_state; struct drm_connector *conn; int i; - for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { + for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(old_conn_state->best_encoder); - if (old_conn_state->crtc != crtc) + if (old_conn_state->crtc != &crtc->base) continue; if (encoder->post_pll_disable) @@ -6158,19 +6158,19 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, } } -static void intel_encoders_update_pipe(struct drm_crtc *crtc, +static void intel_encoders_update_pipe(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_connector_state *conn_state; struct drm_connector *conn; int i; - for_each_new_connector_in_state(old_state, conn, conn_state, i) { + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder); - if (conn_state->crtc != crtc) + if (conn_state->crtc != &crtc->base) continue; if (encoder->update_pipe) @@ -6187,15 +6187,13 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat } static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); if (WARN_ON(intel_crtc->active)) return; @@ -6231,7 +6229,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_crtc->active = true; - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); if (pipe_config->has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the @@ -6255,16 +6253,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, pipe_config); + dev_priv->display.initial_watermarks(state, pipe_config); intel_enable_pipe(pipe_config); if (pipe_config->has_pch_encoder) - ironlake_pch_enable(old_intel_state, pipe_config); + ironlake_pch_enable(state, pipe_config); assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); if (HAS_PCH_CPT(dev_priv)) cpt_verify_modeset(dev, intel_crtc->pipe); @@ -6317,26 +6315,24 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) } static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); bool psl_clkgate_wa; if (WARN_ON(intel_crtc->active)) return; - intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); + intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); if (pipe_config->shared_dpll) intel_enable_shared_dpll(pipe_config); - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); if (intel_crtc_has_dp_encoder(pipe_config)) intel_dp_set_m_n(pipe_config, M1_N1); @@ -6394,7 +6390,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_ddi_enable_transcoder_func(pipe_config); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, pipe_config); + dev_priv->display.initial_watermarks(state, pipe_config); if (INTEL_GEN(dev_priv) >= 11) icl_pipe_mbus_enable(intel_crtc); @@ -6404,7 +6400,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, intel_enable_pipe(pipe_config); if (pipe_config->has_pch_encoder) - lpt_pch_enable(old_intel_state, pipe_config); + lpt_pch_enable(state, pipe_config); if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(pipe_config, true); @@ -6412,7 +6408,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); if (psl_clkgate_wa) { intel_wait_for_vblank(dev_priv, pipe); @@ -6444,7 +6440,7 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) } static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_device *dev = crtc->dev; @@ -6460,7 +6456,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - intel_encoders_disable(crtc, old_crtc_state, old_state); + intel_encoders_disable(intel_crtc, old_crtc_state, state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -6472,7 +6468,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, if (old_crtc_state->has_pch_encoder) ironlake_fdi_disable(crtc); - intel_encoders_post_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_disable(intel_crtc, old_crtc_state, state); if (old_crtc_state->has_pch_encoder) { ironlake_disable_pch_transcoder(dev_priv, pipe); @@ -6503,14 +6499,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, } static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - intel_encoders_disable(crtc, old_crtc_state, old_state); + intel_encoders_disable(intel_crtc, old_crtc_state, state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -6532,9 +6528,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, else ironlake_pfit_disable(old_crtc_state); - intel_encoders_post_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_disable(intel_crtc, old_crtc_state, state); - intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -6633,14 +6629,13 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) } } -static u64 get_crtc_power_domains(struct drm_crtc *crtc, +static u64 get_crtc_power_domains(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_encoder *encoder; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; + enum pipe pipe = crtc->pipe; u64 mask; enum transcoder transcoder = crtc_state->cpu_transcoder; @@ -6669,16 +6664,15 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc, } static u64 -modeset_get_crtc_power_domains(struct drm_crtc *crtc, +modeset_get_crtc_power_domains(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain domain; u64 domains, new_domains, old_domains; - old_domains = intel_crtc->enabled_power_domains; - intel_crtc->enabled_power_domains = new_domains = + old_domains = crtc->enabled_power_domains; + crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc, crtc_state); domains = new_domains & ~old_domains; @@ -6699,10 +6693,8 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, } static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -6729,7 +6721,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); + intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state); if (IS_CHERRYVIEW(dev_priv)) { chv_prepare_pll(intel_crtc, pipe_config); @@ -6739,7 +6731,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, vlv_enable_pll(intel_crtc, pipe_config); } - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); i9xx_pfit_enable(pipe_config); @@ -6748,14 +6740,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, /* update DSPCNTR to configure gamma for pipe bottom color */ intel_disable_primary_plane(pipe_config); - dev_priv->display.initial_watermarks(old_intel_state, - pipe_config); + dev_priv->display.initial_watermarks(state, pipe_config); intel_enable_pipe(pipe_config); assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); } static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) @@ -6768,10 +6759,8 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) } static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { - struct intel_atomic_state *old_intel_state = - to_intel_atomic_state(old_state); struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -6796,7 +6785,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - intel_encoders_pre_enable(crtc, pipe_config, old_state); + intel_encoders_pre_enable(intel_crtc, pipe_config, state); i9xx_enable_pll(intel_crtc, pipe_config); @@ -6808,7 +6797,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(old_intel_state, + dev_priv->display.initial_watermarks(state, pipe_config); else intel_update_watermarks(intel_crtc); @@ -6817,7 +6806,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, assert_vblank_disabled(crtc); intel_crtc_vblank_on(pipe_config); - intel_encoders_enable(crtc, pipe_config, old_state); + intel_encoders_enable(intel_crtc, pipe_config, state); } static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) @@ -6836,7 +6825,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) } static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state) + struct intel_atomic_state *state) { struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_device *dev = crtc->dev; @@ -6851,7 +6840,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, if (IS_GEN(dev_priv, 2)) intel_wait_for_vblank(dev_priv, pipe); - intel_encoders_disable(crtc, old_crtc_state, old_state); + intel_encoders_disable(intel_crtc, old_crtc_state, state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -6860,7 +6849,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, i9xx_pfit_disable(old_crtc_state); - intel_encoders_post_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_disable(intel_crtc, old_crtc_state, state); if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { if (IS_CHERRYVIEW(dev_priv)) @@ -6871,7 +6860,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, i9xx_disable_pll(old_crtc_state); } - intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); + intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state); if (!IS_GEN(dev_priv, 2)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); @@ -6925,7 +6914,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, WARN_ON(IS_ERR(crtc_state) || ret); - dev_priv->display.crtc_disable(crtc_state, state); + dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state)); drm_atomic_state_put(state); @@ -12931,15 +12920,15 @@ verify_crtc_state(struct drm_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc_state *pipe_config, *sw_config; - struct drm_atomic_state *old_state; + struct drm_atomic_state *state; bool active; - old_state = old_crtc_state->state; + state = old_crtc_state->state; __drm_atomic_helper_crtc_destroy_state(old_crtc_state); pipe_config = to_intel_crtc_state(old_crtc_state); memset(pipe_config, 0, sizeof(*pipe_config)); pipe_config->base.crtc = crtc; - pipe_config->base.state = old_state; + pipe_config->base.state = state; DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); @@ -13083,19 +13072,18 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, } static void -intel_modeset_verify_crtc(struct drm_crtc *crtc, - struct drm_atomic_state *state, - struct drm_crtc_state *old_state, - struct drm_crtc_state *new_state) +intel_modeset_verify_crtc(struct intel_crtc *crtc, + struct intel_atomic_state *state, + struct intel_crtc_state *old_state, + struct intel_crtc_state *new_state) { - if (!needs_modeset(to_intel_crtc_state(new_state)) && - !to_intel_crtc_state(new_state)->update_pipe) + if (!needs_modeset(new_state) && !new_state->update_pipe) return; - verify_wm_state(crtc, new_state); - verify_connector_state(crtc->dev, state, crtc); - verify_crtc_state(crtc, old_state, new_state); - verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); + verify_wm_state(&crtc->base, &new_state->base); + verify_connector_state(crtc->base.dev, &state->base, &crtc->base); + verify_crtc_state(&crtc->base, &old_state->base, &new_state->base); + verify_shared_dpll_state(crtc->base.dev, &crtc->base, &old_state->base, &new_state->base); } static void @@ -13110,10 +13098,10 @@ verify_disabled_dpll_state(struct drm_device *dev) static void intel_modeset_verify_disabled(struct drm_device *dev, - struct drm_atomic_state *state) + struct intel_atomic_state *state) { - verify_encoder_state(dev, state); - verify_connector_state(dev, state, NULL); + verify_encoder_state(dev, &state->base); + verify_connector_state(dev, &state->base, NULL); verify_disabled_dpll_state(dev); } @@ -13570,57 +13558,54 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) return crtc->base.funcs->get_vblank_counter(&crtc->base); } -static void intel_update_crtc(struct drm_crtc *crtc, - struct drm_atomic_state *state, - struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state) +static void intel_update_crtc(struct intel_crtc *crtc, + struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); - bool modeset = needs_modeset(pipe_config); + bool modeset = needs_modeset(new_crtc_state); struct intel_plane_state *new_plane_state = - intel_atomic_get_new_plane_state(to_intel_atomic_state(state), - to_intel_plane(crtc->primary)); + intel_atomic_get_new_plane_state(state, + to_intel_plane(crtc->base.primary)); if (modeset) { - update_scanline_offset(pipe_config); - dev_priv->display.crtc_enable(pipe_config, state); + update_scanline_offset(new_crtc_state); + dev_priv->display.crtc_enable(new_crtc_state, state); /* vblanks work again, re-enable pipe CRC. */ - intel_crtc_enable_pipe_crc(intel_crtc); + intel_crtc_enable_pipe_crc(crtc); } else { - intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), - pipe_config); + intel_pre_plane_update(old_crtc_state, new_crtc_state); - if (pipe_config->update_pipe) - intel_encoders_update_pipe(crtc, pipe_config, state); + if (new_crtc_state->update_pipe) + intel_encoders_update_pipe(crtc, new_crtc_state, state); } - if (pipe_config->update_pipe && !pipe_config->enable_fbc) - intel_fbc_disable(intel_crtc); + if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) + intel_fbc_disable(crtc); else if (new_plane_state) - intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); + intel_fbc_enable(crtc, new_crtc_state, new_plane_state); - intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc); + intel_begin_crtc_commit(state, crtc); if (INTEL_GEN(dev_priv) >= 9) - skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); + skl_update_planes_on_crtc(state, crtc); else - i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc); + i9xx_update_planes_on_crtc(state, crtc); - intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc); + intel_finish_crtc_commit(state, crtc); } -static void intel_update_crtcs(struct drm_atomic_state *state) +static void intel_update_crtcs(struct intel_atomic_state *state) { - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc *crtc; + struct intel_crtc_state *old_crtc_state, *new_crtc_state; int i; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!new_crtc_state->active) + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (!new_crtc_state->base.active) continue; intel_update_crtc(crtc, state, old_crtc_state, @@ -13628,26 +13613,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state) } } -static void skl_update_crtcs(struct drm_atomic_state *state) +static void skl_update_crtcs(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct intel_crtc_state *cstate; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + struct intel_crtc_state *old_crtc_state, *new_crtc_state; unsigned int updated = 0; bool progress; enum pipe pipe; int i; u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; - u8 required_slices = intel_state->wm_results.ddb.enabled_slices; + u8 required_slices = state->wm_results.ddb.enabled_slices; struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) /* ignore allocations for crtc's that have been turned off. */ - if (new_crtc_state->active) - entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; + if (new_crtc_state->base.active) + entries[i] = old_crtc_state->wm.skl.ddb; /* If 2nd DBuf slice required, enable it here */ if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) @@ -13662,24 +13644,22 @@ static void skl_update_crtcs(struct drm_atomic_state *state) do { progress = false; - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { bool vbl_wait = false; - unsigned int cmask = drm_crtc_mask(crtc); + unsigned int cmask = drm_crtc_mask(&crtc->base); - intel_crtc = to_intel_crtc(crtc); - cstate = to_intel_crtc_state(new_crtc_state); - pipe = intel_crtc->pipe; + pipe = crtc->pipe; - if (updated & cmask || !cstate->base.active) + if (updated & cmask || !new_crtc_state->base.active) continue; - if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb, + if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, entries, INTEL_INFO(dev_priv)->num_pipes, i)) continue; updated |= cmask; - entries[i] = cstate->wm.skl.ddb; + entries[i] = new_crtc_state->wm.skl.ddb; /* * If this is an already active pipe, it's DDB changed, @@ -13687,10 +13667,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state) * then we need to wait for a vblank to pass for the * new ddb allocation to take effect. */ - if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, - &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && - !new_crtc_state->active_changed && - intel_state->wm_results.dirty_pipes != updated) + if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, + &old_crtc_state->wm.skl.ddb) && + !new_crtc_state->base.active_changed && + state->wm_results.dirty_pipes != updated) vbl_wait = true; intel_update_crtc(crtc, state, old_crtc_state, @@ -13763,57 +13743,50 @@ static void intel_atomic_cleanup_work(struct work_struct *work) intel_atomic_helper_free_state(i915); } -static void intel_atomic_commit_tail(struct drm_atomic_state *state) +static void intel_atomic_commit_tail(struct intel_atomic_state *state) { - struct drm_device *dev = state->dev; - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; u64 put_domains[I915_MAX_PIPES] = {}; intel_wakeref_t wakeref = 0; int i; - intel_atomic_commit_fence_wait(intel_state); + intel_atomic_commit_fence_wait(state); - drm_atomic_helper_wait_for_dependencies(state); + drm_atomic_helper_wait_for_dependencies(&state->base); - if (intel_state->modeset) + if (state->modeset) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - old_intel_crtc_state = to_intel_crtc_state(old_crtc_state); - new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); - intel_crtc = to_intel_crtc(crtc); + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (needs_modeset(new_crtc_state) || + new_crtc_state->update_pipe) { - if (needs_modeset(new_intel_crtc_state) || - new_intel_crtc_state->update_pipe) { - - put_domains[intel_crtc->pipe] = + put_domains[crtc->pipe] = modeset_get_crtc_power_domains(crtc, - new_intel_crtc_state); + new_crtc_state); } - if (!needs_modeset(new_intel_crtc_state)) + if (!needs_modeset(new_crtc_state)) continue; - intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state); + intel_pre_plane_update(old_crtc_state, new_crtc_state); - if (old_crtc_state->active) { - intel_crtc_disable_planes(intel_state, intel_crtc); + if (old_crtc_state->base.active) { + intel_crtc_disable_planes(state, crtc); /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. */ - intel_crtc_disable_pipe_crc(intel_crtc); + intel_crtc_disable_pipe_crc(crtc); - dev_priv->display.crtc_disable(old_intel_crtc_state, state); - intel_crtc->active = false; - intel_fbc_disable(intel_crtc); - intel_disable_shared_dpll(old_intel_crtc_state); + dev_priv->display.crtc_disable(old_crtc_state, state); + crtc->active = false; + intel_fbc_disable(crtc); + intel_disable_shared_dpll(old_crtc_state); /* * Underruns don't always raise @@ -13823,25 +13796,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_pch_fifo_underruns(dev_priv); /* FIXME unify this for all platforms */ - if (!new_crtc_state->active && + if (!new_crtc_state->base.active && !HAS_GMCH(dev_priv) && dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(intel_state, - new_intel_crtc_state); + dev_priv->display.initial_watermarks(state, + new_crtc_state); } } - /* FIXME: Eventually get rid of our intel_crtc->config pointer */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) - to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state); + /* FIXME: Eventually get rid of our crtc->config pointer */ + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) + crtc->config = new_crtc_state; - if (intel_state->modeset) { - drm_atomic_helper_update_legacy_modeset_state(state->dev, state); + if (state->modeset) { + drm_atomic_helper_update_legacy_modeset_state(dev, &state->base); intel_set_cdclk_pre_plane_update(dev_priv, - &intel_state->cdclk.actual, + &state->cdclk.actual, &dev_priv->cdclk.actual, - intel_state->cdclk.pipe); + state->cdclk.pipe); /* * SKL workaround: bspec recommends we disable the SAGV when we @@ -13854,27 +13827,27 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) } /* Complete the events for pipes that have now been disabled */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - bool modeset = needs_modeset(to_intel_crtc_state(new_crtc_state)); + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + bool modeset = needs_modeset(new_crtc_state); /* Complete events for now disable pipes here. */ - if (modeset && !new_crtc_state->active && new_crtc_state->event) { + if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) { spin_lock_irq(&dev->event_lock); - drm_crtc_send_vblank_event(crtc, new_crtc_state->event); + drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event); spin_unlock_irq(&dev->event_lock); - new_crtc_state->event = NULL; + new_crtc_state->base.event = NULL; } } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.update_crtcs(state); - if (intel_state->modeset) + if (state->modeset) intel_set_cdclk_post_plane_update(dev_priv, - &intel_state->cdclk.actual, + &state->cdclk.actual, &dev_priv->cdclk.actual, - intel_state->cdclk.pipe); + state->cdclk.pipe); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To @@ -13885,16 +13858,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * - switch over to the vblank wait helper in the core after that since * we don't need out special handling any more. */ - drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_wait_for_flip_done(dev, &state->base); - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); - - if (new_crtc_state->active && - !needs_modeset(to_intel_crtc_state(new_crtc_state)) && - (new_intel_crtc_state->base.color_mgmt_changed || - new_intel_crtc_state->update_pipe)) - intel_color_load_luts(new_intel_crtc_state); + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + if (new_crtc_state->base.active && + !needs_modeset(new_crtc_state) && + (new_crtc_state->base.color_mgmt_changed || + new_crtc_state->update_pipe)) + intel_color_load_luts(new_crtc_state); } /* @@ -13904,16 +13875,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * * TODO: Move this (and other cleanup) to an async worker eventually. */ - for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { - new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); - + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (dev_priv->display.optimize_watermarks) - dev_priv->display.optimize_watermarks(intel_state, - new_intel_crtc_state); + dev_priv->display.optimize_watermarks(state, + new_crtc_state); } - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + intel_post_plane_update(old_crtc_state); if (put_domains[i]) modeset_put_power_domains(dev_priv, put_domains[i]); @@ -13921,15 +13890,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); } - if (intel_state->modeset) - intel_verify_planes(intel_state); + if (state->modeset) + intel_verify_planes(state); - if (intel_state->modeset && intel_can_enable_sagv(state)) + if (state->modeset && intel_can_enable_sagv(state)) intel_enable_sagv(dev_priv); - drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_commit_hw_done(&state->base); - if (intel_state->modeset) { + if (state->modeset) { /* As one of the primary mmio accessors, KMS has a high * likelihood of triggering bugs in unclaimed access. After we * finish modesetting, see if an error has been flagged, and if @@ -13939,7 +13908,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } - intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -13949,14 +13918,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * schedule point (cond_resched()) here anyway to keep latencies * down. */ - INIT_WORK(&state->commit_work, intel_atomic_cleanup_work); - queue_work(system_highpri_wq, &state->commit_work); + INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work); + queue_work(system_highpri_wq, &state->base.commit_work); } static void intel_atomic_commit_work(struct work_struct *work) { - struct drm_atomic_state *state = - container_of(work, struct drm_atomic_state, commit_work); + struct intel_atomic_state *state = + container_of(work, struct intel_atomic_state, base.commit_work); intel_atomic_commit_tail(state); } @@ -14099,7 +14068,7 @@ static int intel_atomic_commit(struct drm_device *dev, } else { if (intel_state->modeset) flush_workqueue(dev_priv->modeset_wq); - intel_atomic_commit_tail(state); + intel_atomic_commit_tail(intel_state); } return 0; @@ -16878,7 +16847,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, u64 put_domains; crtc_state = to_intel_crtc_state(crtc->base.state); - put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state); + put_domains = modeset_get_crtc_power_domains(crtc, crtc_state); if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 118c5d2d834b..049a821cbefb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -307,10 +307,10 @@ struct drm_i915_display_funcs { int (*crtc_compute_clock)(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); void (*crtc_enable)(struct intel_crtc_state *pipe_config, - struct drm_atomic_state *old_state); + struct intel_atomic_state *old_state); void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, - struct drm_atomic_state *old_state); - void (*update_crtcs)(struct drm_atomic_state *state); + struct intel_atomic_state *old_state); + void (*update_crtcs)(struct intel_atomic_state *state); void (*audio_codec_enable)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d9a7a13ce32a..4116de2a77fd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3736,11 +3736,10 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; } -bool intel_can_enable_sagv(struct drm_atomic_state *state) +bool intel_can_enable_sagv(struct intel_atomic_state *state) { - struct drm_device *dev = state->dev; + struct drm_device *dev = state->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct intel_crtc *crtc; struct intel_plane *plane; struct intel_crtc_state *cstate; @@ -3761,18 +3760,18 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) /* * If there are no active CRTCs, no additional checks need be performed */ - if (hweight32(intel_state->active_crtcs) == 0) + if (hweight32(state->active_crtcs) == 0) return true; /* * SKL+ workaround: bspec recommends we disable SAGV when we have * more then one pipe enabled */ - if (hweight32(intel_state->active_crtcs) > 1) + if (hweight32(state->active_crtcs) > 1) return false; /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(intel_state->active_crtcs) - 1; + pipe = ffs(state->active_crtcs) - 1; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); cstate = to_intel_crtc_state(crtc->base.state); diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 1b489fa399e1..e3573e1e16e3 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -10,10 +10,10 @@ #include "i915_reg.h" -struct drm_atomic_state; struct drm_device; struct drm_i915_private; struct i915_request; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_plane; @@ -52,7 +52,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, struct skl_pipe_wm *out); void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv); -bool intel_can_enable_sagv(struct drm_atomic_state *state); +bool intel_can_enable_sagv(struct intel_atomic_state *state); int intel_enable_sagv(struct drm_i915_private *dev_priv); int intel_disable_sagv(struct drm_i915_private *dev_priv); bool skl_wm_level_equals(const struct skl_wm_level *l1, -- cgit v1.2.3 From 3b4bf24d27e031078f167afdeb1a31933129c781 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 28 Jun 2019 10:55:14 +0200 Subject: drm/i915: Convert hw state verifier to take more intel state, v2. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Like the rest of the intel atomic functions we should pass along intel_crtc_state, and dereference drm_crtc_state only through intel_crtc_state->base While at it, rename old/new_state to old/new_crtc_state. (Ville) Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628085517.31886-4-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 160 +++++++++++++-------------- 1 file changed, 77 insertions(+), 83 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f411f57bcacb..6be2fd714e37 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6977,7 +6977,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder) /* Cross check the actual hw state with our own modeset state tracking (and it's * internal consistency). */ -static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, +static void intel_connector_verify_state(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -6995,7 +6995,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, if (!crtc_state) return; - I915_STATE_WARN(!crtc_state->active, + I915_STATE_WARN(!crtc_state->base.active, "connector is active, but attached crtc isn't\n"); if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) @@ -7007,7 +7007,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, "attached encoder crtc differs from connector crtc\n"); } else { - I915_STATE_WARN(crtc_state && crtc_state->active, + I915_STATE_WARN(crtc_state && crtc_state->base.active, "attached crtc is active, but connector isn't\n"); I915_STATE_WARN(!crtc_state && conn_state->best_encoder, "best encoder set without crtc!\n"); @@ -12695,10 +12695,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, } } -static void verify_wm_state(struct drm_crtc *crtc, - struct drm_crtc_state *new_state) +static void verify_wm_state(struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct skl_hw_state { struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; @@ -12708,21 +12708,20 @@ static void verify_wm_state(struct drm_crtc *crtc, struct skl_ddb_allocation *sw_ddb; struct skl_pipe_wm *sw_wm; struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - const enum pipe pipe = intel_crtc->pipe; + const enum pipe pipe = crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || !new_state->active) + if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active) return; hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) return; - skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm); - sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; + skl_pipe_wm_get_hw_state(crtc, &hw->wm); + sw_wm = &new_crtc_state->wm.skl.optimal; - skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv); + skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv); skl_ddb_get_hw_state(dev_priv, &hw->ddb); sw_ddb = &dev_priv->wm.skl_hw.ddb; @@ -12770,7 +12769,7 @@ static void verify_wm_state(struct drm_crtc *crtc, /* DDB */ hw_ddb_entry = &hw->ddb_y[plane]; - sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane]; + sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", @@ -12822,7 +12821,7 @@ static void verify_wm_state(struct drm_crtc *crtc, /* DDB */ hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; - sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR]; + sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", @@ -12836,23 +12835,22 @@ static void verify_wm_state(struct drm_crtc *crtc, } static void -verify_connector_state(struct drm_device *dev, - struct drm_atomic_state *state, - struct drm_crtc *crtc) +verify_connector_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_connector *connector; struct drm_connector_state *new_conn_state; int i; - for_each_new_connector_in_state(state, connector, new_conn_state, i) { + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct drm_encoder *encoder = connector->encoder; - struct drm_crtc_state *crtc_state = NULL; + struct intel_crtc_state *crtc_state = NULL; - if (new_conn_state->crtc != crtc) + if (new_conn_state->crtc != &crtc->base) continue; if (crtc) - crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); intel_connector_verify_state(crtc_state, new_conn_state); @@ -12862,14 +12860,14 @@ verify_connector_state(struct drm_device *dev, } static void -verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) +verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) { struct intel_encoder *encoder; struct drm_connector *connector; struct drm_connector_state *old_conn_state, *new_conn_state; int i; - for_each_intel_encoder(dev, encoder) { + for_each_intel_encoder(&dev_priv->drm, encoder) { bool enabled = false, found = false; enum pipe pipe; @@ -12877,7 +12875,7 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) encoder->base.base.id, encoder->base.name); - for_each_oldnew_connector_in_state(state, connector, old_conn_state, + for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state, new_conn_state, i) { if (old_conn_state->best_encoder == &encoder->base) found = true; @@ -12911,50 +12909,49 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) } static void -verify_crtc_state(struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state) +verify_crtc_state(struct intel_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *pipe_config, *sw_config; + struct intel_crtc_state *pipe_config; struct drm_atomic_state *state; bool active; - state = old_crtc_state->state; - __drm_atomic_helper_crtc_destroy_state(old_crtc_state); - pipe_config = to_intel_crtc_state(old_crtc_state); + state = old_crtc_state->base.state; + __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base); + pipe_config = old_crtc_state; memset(pipe_config, 0, sizeof(*pipe_config)); - pipe_config->base.crtc = crtc; + pipe_config->base.crtc = &crtc->base; pipe_config->base.state = state; - DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); + DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name); - active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); + active = dev_priv->display.get_pipe_config(crtc, pipe_config); /* we keep both pipes enabled on 830 */ if (IS_I830(dev_priv)) - active = new_crtc_state->active; + active = new_crtc_state->base.active; - I915_STATE_WARN(new_crtc_state->active != active, + I915_STATE_WARN(new_crtc_state->base.active != active, "crtc active state doesn't match with hw state " - "(expected %i, found %i)\n", new_crtc_state->active, active); + "(expected %i, found %i)\n", new_crtc_state->base.active, active); - I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, + I915_STATE_WARN(crtc->active != new_crtc_state->base.active, "transitional active state does not match atomic hw state " - "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); + "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active); - for_each_encoder_on_crtc(dev, crtc, encoder) { + for_each_encoder_on_crtc(dev, &crtc->base, encoder) { enum pipe pipe; active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(active != new_crtc_state->active, + I915_STATE_WARN(active != new_crtc_state->base.active, "[ENCODER:%i] active %i with crtc active %i\n", - encoder->base.base.id, active, new_crtc_state->active); + encoder->base.base.id, active, new_crtc_state->base.active); - I915_STATE_WARN(active && intel_crtc->pipe != pipe, + I915_STATE_WARN(active && crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); @@ -12964,16 +12961,16 @@ verify_crtc_state(struct drm_crtc *crtc, intel_crtc_compute_pixel_rate(pipe_config); - if (!new_crtc_state->active) + if (!new_crtc_state->base.active) return; intel_pipe_config_sanity_check(dev_priv, pipe_config); - sw_config = to_intel_crtc_state(new_crtc_state); - if (!intel_pipe_config_compare(sw_config, pipe_config, false)) { + if (!intel_pipe_config_compare(new_crtc_state, + pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(pipe_config, NULL, "[hw state]"); - intel_dump_pipe_config(sw_config, NULL, "[sw state]"); + intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]"); } } @@ -12993,8 +12990,8 @@ intel_verify_planes(struct intel_atomic_state *state) static void verify_single_dpll_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, - struct drm_crtc *crtc, - struct drm_crtc_state *new_state) + struct intel_crtc *crtc, + struct intel_crtc_state *new_crtc_state) { struct intel_dpll_hw_state dpll_hw_state; unsigned int crtc_mask; @@ -13024,16 +13021,16 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, return; } - crtc_mask = drm_crtc_mask(crtc); + crtc_mask = drm_crtc_mask(&crtc->base); - if (new_state->active) + if (new_crtc_state->base.active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); else I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask); I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", @@ -13046,50 +13043,47 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, } static void -verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, - struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state) +verify_shared_dpll_state(struct intel_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); - struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (new_state->shared_dpll) - verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); + if (new_crtc_state->shared_dpll) + verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state); - if (old_state->shared_dpll && - old_state->shared_dpll != new_state->shared_dpll) { - unsigned int crtc_mask = drm_crtc_mask(crtc); - struct intel_shared_dpll *pll = old_state->shared_dpll; + if (old_crtc_state->shared_dpll && + old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) { + unsigned int crtc_mask = drm_crtc_mask(&crtc->base); + struct intel_shared_dpll *pll = old_crtc_state->shared_dpll; I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(drm_crtc_index(&crtc->base))); I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(drm_crtc_index(&crtc->base))); } } static void intel_modeset_verify_crtc(struct intel_crtc *crtc, struct intel_atomic_state *state, - struct intel_crtc_state *old_state, - struct intel_crtc_state *new_state) + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - if (!needs_modeset(new_state) && !new_state->update_pipe) + if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) return; - verify_wm_state(&crtc->base, &new_state->base); - verify_connector_state(crtc->base.dev, &state->base, &crtc->base); - verify_crtc_state(&crtc->base, &old_state->base, &new_state->base); - verify_shared_dpll_state(crtc->base.dev, &crtc->base, &old_state->base, &new_state->base); + verify_wm_state(crtc, new_crtc_state); + verify_connector_state(state, crtc); + verify_crtc_state(crtc, old_crtc_state, new_crtc_state); + verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state); } static void -verify_disabled_dpll_state(struct drm_device *dev) +verify_disabled_dpll_state(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); int i; for (i = 0; i < dev_priv->num_shared_dpll; i++) @@ -13097,12 +13091,12 @@ verify_disabled_dpll_state(struct drm_device *dev) } static void -intel_modeset_verify_disabled(struct drm_device *dev, +intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, struct intel_atomic_state *state) { - verify_encoder_state(dev, &state->base); - verify_connector_state(dev, &state->base, NULL); - verify_disabled_dpll_state(dev); + verify_encoder_state(dev_priv, state); + verify_connector_state(state, NULL); + verify_disabled_dpll_state(dev_priv); } static void update_scanline_offset(const struct intel_crtc_state *crtc_state) @@ -13823,7 +13817,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (!intel_can_enable_sagv(state)) intel_disable_sagv(dev_priv); - intel_modeset_verify_disabled(dev, state); + intel_modeset_verify_disabled(dev_priv, state); } /* Complete the events for pipes that have now been disabled */ -- cgit v1.2.3 From 49743e1dfb2a1f345f21ad84104663d92b4ba440 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 28 Jun 2019 10:55:15 +0200 Subject: drm/i915: Use intel_crtc_state in sanitize_watermarks() too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get rid of all instances of drm_crtc_state, and rename cstate to crtc_state for more clarity. Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628085517.31886-5-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 6be2fd714e37..4ea63026d9c4 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15812,8 +15812,8 @@ static void sanitize_watermarks(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state; struct intel_atomic_state *intel_state; - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; struct drm_modeset_acquire_ctx ctx; int ret; int i; @@ -15868,13 +15868,11 @@ retry: } /* Write calculated watermark values back */ - for_each_new_crtc_in_state(state, crtc, cstate, i) { - struct intel_crtc_state *cs = to_intel_crtc_state(cstate); - - cs->wm.need_postvbl_update = true; - dev_priv->display.optimize_watermarks(intel_state, cs); + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + crtc_state->wm.need_postvbl_update = true; + dev_priv->display.optimize_watermarks(intel_state, crtc_state); - to_intel_crtc_state(crtc->state)->wm = cs->wm; + to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; } put_state: -- cgit v1.2.3 From 4f25720b2c04a386e7698937f8b8468e223a5ce4 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 28 Jun 2019 10:55:16 +0200 Subject: drm/i915: Pass intel state to plane functions as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass along the correct state as much as possible, instead of relying on the drm state internally. This is required to rely on hw state internally soon. While at it, clean up intel_plane_atomic_check slightly, by using a helper function to get the intel_crtc. (Ville) Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628085517.31886-6-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 56 ++++++++++++++-------- drivers/gpu/drm/i915/display/intel_atomic_plane.h | 5 +- drivers/gpu/drm/i915/display/intel_display.c | 58 ++++++++++------------- 3 files changed, 64 insertions(+), 55 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 30bd4e76fff9..ab411d5e093c 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -176,33 +176,49 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->data_rate[plane->id] = intel_plane_data_rate(new_crtc_state, new_plane_state); - return intel_plane_atomic_calc_changes(old_crtc_state, - &new_crtc_state->base, - old_plane_state, - &new_plane_state->base); + return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state, + old_plane_state, new_plane_state); } -static int intel_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *new_plane_state) +static struct intel_crtc * +get_crtc_from_states(const struct intel_plane_state *old_plane_state, + const struct intel_plane_state *new_plane_state) { - struct drm_atomic_state *state = new_plane_state->state; - const struct drm_plane_state *old_plane_state = - drm_atomic_get_old_plane_state(state, plane); - struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; - const struct drm_crtc_state *old_crtc_state; - struct drm_crtc_state *new_crtc_state; - - new_plane_state->visible = false; + if (new_plane_state->base.crtc) + return to_intel_crtc(new_plane_state->base.crtc); + + if (old_plane_state->base.crtc) + return to_intel_crtc(old_plane_state->base.crtc); + + return NULL; +} + +static int intel_plane_atomic_check(struct drm_plane *_plane, + struct drm_plane_state *_new_plane_state) +{ + struct intel_plane *plane = to_intel_plane(_plane); + struct intel_atomic_state *state = + to_intel_atomic_state(_new_plane_state->state); + struct intel_plane_state *new_plane_state = + to_intel_plane_state(_new_plane_state); + const struct intel_plane_state *old_plane_state = + intel_atomic_get_old_plane_state(state, plane); + struct intel_crtc *crtc = + get_crtc_from_states(old_plane_state, new_plane_state); + const struct intel_crtc_state *old_crtc_state; + struct intel_crtc_state *new_crtc_state; + + new_plane_state->base.visible = false; if (!crtc) return 0; - old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); - new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state), - to_intel_crtc_state(new_crtc_state), - to_intel_plane_state(old_plane_state), - to_intel_plane_state(new_plane_state)); + return intel_plane_atomic_check_with_state(old_crtc_state, + new_crtc_state, + old_plane_state, + new_plane_state); } static struct intel_plane * diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 1437a8797e10..cb7ef4f9eafd 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -8,7 +8,6 @@ #include -struct drm_crtc_state; struct drm_plane; struct drm_property; struct intel_atomic_state; @@ -43,8 +42,8 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct drm_crtc_state *crtc_state, + struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, - struct drm_plane_state *plane_state); + struct intel_plane_state *plane_state); #endif /* __INTEL_ATOMIC_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 4ea63026d9c4..c97c0a9f6208 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -11286,7 +11286,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc) * * Returns true or false. */ -static bool intel_wm_need_update(struct intel_plane_state *cur, +static bool intel_wm_need_update(const struct intel_plane_state *cur, struct intel_plane_state *new) { /* Update watermarks on tiling or size changes. */ @@ -11318,33 +11318,28 @@ static bool needs_scaling(const struct intel_plane_state *state) } int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, - struct drm_crtc_state *crtc_state, + struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, - struct drm_plane_state *plane_state) + struct intel_plane_state *plane_state) { - struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); - struct drm_crtc *crtc = crtc_state->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_plane *plane = to_intel_plane(plane_state->plane); - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); - bool mode_changed = needs_modeset(pipe_config); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = old_crtc_state->base.active; - bool is_crtc_enabled = crtc_state->active; + bool is_crtc_enabled = crtc_state->base.active; bool turn_off, turn_on, visible, was_visible; - struct drm_framebuffer *fb = plane_state->fb; + struct drm_framebuffer *fb = plane_state->base.fb; int ret; if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { - ret = skl_update_scaler_plane( - to_intel_crtc_state(crtc_state), - to_intel_plane_state(plane_state)); + ret = skl_update_scaler_plane(crtc_state, plane_state); if (ret) return ret; } was_visible = old_plane_state->base.visible; - visible = plane_state->visible; + visible = plane_state->base.visible; if (!was_crtc_enabled && WARN_ON(was_visible)) was_visible = false; @@ -11360,22 +11355,22 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) { - plane_state->visible = visible = false; - to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id); - to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0; + plane_state->base.visible = visible = false; + crtc_state->active_planes &= ~BIT(plane->id); + crtc_state->data_rate[plane->id] = 0; } if (!was_visible && !visible) return 0; if (fb != old_plane_state->base.fb) - pipe_config->fb_changed = true; + crtc_state->fb_changed = true; turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", - intel_crtc->base.base.id, intel_crtc->base.name, + crtc->base.base.id, crtc->base.name, plane->base.base.id, plane->base.name, fb ? fb->base.id : -1); @@ -11386,29 +11381,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat if (turn_on) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) - pipe_config->update_wm_pre = true; + crtc_state->update_wm_pre = true; /* must disable cxsr around plane enable/disable */ if (plane->id != PLANE_CURSOR) - pipe_config->disable_cxsr = true; + crtc_state->disable_cxsr = true; } else if (turn_off) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) - pipe_config->update_wm_post = true; + crtc_state->update_wm_post = true; /* must disable cxsr around plane enable/disable */ if (plane->id != PLANE_CURSOR) - pipe_config->disable_cxsr = true; - } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state), - to_intel_plane_state(plane_state))) { + crtc_state->disable_cxsr = true; + } else if (intel_wm_need_update(old_plane_state, plane_state)) { if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { /* FIXME bollocks */ - pipe_config->update_wm_pre = true; - pipe_config->update_wm_post = true; + crtc_state->update_wm_pre = true; + crtc_state->update_wm_post = true; } } if (visible || was_visible) - pipe_config->fb_bits |= plane->frontbuffer_bit; + crtc_state->fb_bits |= plane->frontbuffer_bit; /* * ILK/SNB DVSACNTR/Sprite Enable @@ -11447,8 +11441,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat (IS_GEN_RANGE(dev_priv, 5, 6) || IS_IVYBRIDGE(dev_priv)) && (turn_on || (!needs_scaling(old_plane_state) && - needs_scaling(to_intel_plane_state(plane_state))))) - pipe_config->disable_lp_wm = true; + needs_scaling(plane_state)))) + crtc_state->disable_lp_wm = true; return 0; } -- cgit v1.2.3 From ec193640819e014385c0ccc87e66909e44b4705e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 28 Jun 2019 10:55:17 +0200 Subject: drm/i915: Use intel state as much as possible in wm code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of directly referencing drm_crtc_state, convert to intel_ctc_state and use the base struct. This is useful when we're making the split between uapi and hw state, and also makes the code slightly more readable. A lot of places also use cstate, instead of the more common crtc_state. Clean those up to use crtc_state. Same for pstate vs plane_state. (Ville) Signed-off-by: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628085517.31886-7-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.h | 12 +- drivers/gpu/drm/i915/intel_pm.c | 402 +++++++++++++++++++--------------------- 2 files changed, 200 insertions(+), 214 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 049a821cbefb..04723a2e0713 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -287,14 +287,14 @@ struct drm_i915_display_funcs { enum pipe pipe); int (*get_fifo_size)(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane); - int (*compute_pipe_wm)(struct intel_crtc_state *cstate); - int (*compute_intermediate_wm)(struct intel_crtc_state *newstate); + int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state); + int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state); void (*initial_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *cstate); + struct intel_crtc_state *crtc_state); void (*atomic_update_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *cstate); + struct intel_crtc_state *crtc_state); void (*optimize_watermarks)(struct intel_atomic_state *state, - struct intel_crtc_state *cstate); + struct intel_crtc_state *crtc_state); int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct intel_atomic_state *state); @@ -1646,7 +1646,7 @@ struct drm_i915_private { /* * Should be held around atomic WM register writing; also * protects * intel_crtc->wm.active and - * cstate->wm.need_postvbl_update. + * crtc_state->wm.need_postvbl_update. */ struct mutex wm_mutex; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4116de2a77fd..d10c62d3f10c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1198,8 +1198,8 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, return dirty; } -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 pri_val); static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, @@ -2493,8 +2493,8 @@ struct ilk_wm_maximums { * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 mem_value, bool is_lp) { u32 method1, method2; @@ -2503,19 +2503,19 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, if (mem_value == 0) return U32_MAX; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value); + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); if (!is_lp) return method1; - method2 = ilk_wm_method2(cstate->pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->base.dst), + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->base.adjusted_mode.crtc_htotal, + drm_rect_width(&plane_state->base.dst), cpp, mem_value); return min(method1, method2); @@ -2525,8 +2525,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 mem_value) { u32 method1, method2; @@ -2535,15 +2535,15 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, if (mem_value == 0) return U32_MAX; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value); - method2 = ilk_wm_method2(cstate->pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->base.dst), + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->base.adjusted_mode.crtc_htotal, + drm_rect_width(&plane_state->base.dst), cpp, mem_value); return min(method1, method2); } @@ -2552,8 +2552,8 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate, * For both WM_PIPE and WM_LP. * mem_value must be in 0.1us units. */ -static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 mem_value) { int cpp; @@ -2561,29 +2561,29 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate, if (mem_value == 0) return U32_MAX; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - return ilk_wm_method2(cstate->pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, - pstate->base.crtc_w, cpp, mem_value); + return ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->base.adjusted_mode.crtc_htotal, + plane_state->base.crtc_w, cpp, mem_value); } /* Only for WM_LP. */ -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, u32 pri_val) { int cpp; - if (!intel_wm_plane_visible(cstate, pstate)) + if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; - cpp = pstate->base.fb->format->cpp[0]; + cpp = plane_state->base.fb->format->cpp[0]; - return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp); + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp); } static unsigned int @@ -2752,7 +2752,7 @@ static bool ilk_validate_wm_level(int level, static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, const struct intel_crtc *intel_crtc, int level, - struct intel_crtc_state *cstate, + struct intel_crtc_state *crtc_state, const struct intel_plane_state *pristate, const struct intel_plane_state *sprstate, const struct intel_plane_state *curstate, @@ -2770,30 +2770,30 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, } if (pristate) { - result->pri_val = ilk_compute_pri_wm(cstate, pristate, + result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, pri_latency, level); - result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); + result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); } if (sprstate) - result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); + result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); if (curstate) - result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); + result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); result->enable = true; } static u32 -hsw_compute_linetime_wm(const struct intel_crtc_state *cstate) +hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state) { const struct intel_atomic_state *intel_state = - to_intel_atomic_state(cstate->base.state); + to_intel_atomic_state(crtc_state->base.state); const struct drm_display_mode *adjusted_mode = - &cstate->base.adjusted_mode; + &crtc_state->base.adjusted_mode; u32 linetime, ips_linetime; - if (!cstate->base.active) + if (!crtc_state->base.active) return 0; if (WARN_ON(adjusted_mode->crtc_clock == 0)) return 0; @@ -3101,10 +3101,10 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, } /* Compute new watermarks for the pipe */ -static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) +static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) { - struct drm_atomic_state *state = cstate->base.state; - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_atomic_state *state = crtc_state->base.state; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_pipe_wm *pipe_wm; struct drm_device *dev = state->dev; const struct drm_i915_private *dev_priv = to_i915(dev); @@ -3116,9 +3116,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) int level, max_level = ilk_wm_max_level(dev_priv), usable_level; struct ilk_wm_maximums max; - pipe_wm = &cstate->wm.ilk.optimal; + pipe_wm = &crtc_state->wm.ilk.optimal; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) { + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) { const struct intel_plane_state *ps = to_intel_plane_state(plane_state); if (plane->type == DRM_PLANE_TYPE_PRIMARY) @@ -3129,7 +3129,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) curstate = ps; } - pipe_wm->pipe_enabled = cstate->base.active; + pipe_wm->pipe_enabled = crtc_state->base.active; if (sprstate) { pipe_wm->sprites_enabled = sprstate->base.visible; pipe_wm->sprites_scaled = sprstate->base.visible && @@ -3148,11 +3148,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) usable_level = 0; memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); - ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, + ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state, pristate, sprstate, curstate, &pipe_wm->wm[0]); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - pipe_wm->linetime = hsw_compute_linetime_wm(cstate); + pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state); if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) return -EINVAL; @@ -3162,7 +3162,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) for (level = 1; level <= usable_level; level++) { struct intel_wm_level *wm = &pipe_wm->wm[level]; - ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, + ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state, pristate, sprstate, curstate, wm); /* @@ -3742,7 +3742,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; struct intel_plane *plane; - struct intel_crtc_state *cstate; + struct intel_crtc_state *crtc_state; enum pipe pipe; int level, latency; int sagv_block_time_us; @@ -3773,14 +3773,14 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) /* Since we're now guaranteed to only have one active CRTC... */ pipe = ffs(state->active_crtcs) - 1; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - cstate = to_intel_crtc_state(crtc->base.state); + crtc_state = to_intel_crtc_state(crtc->base.state); if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) return false; for_each_intel_plane_on_crtc(dev, crtc, plane) { struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane->id]; + &crtc_state->wm.skl.optimal.planes[plane->id]; /* Skip this plane if it's not enabled */ if (!wm->wm[0].plane_en) @@ -3811,7 +3811,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) } static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, + const struct intel_crtc_state *crtc_state, const u64 total_data_rate, const int num_active, struct skl_ddb_allocation *ddb) @@ -3825,7 +3825,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) < 11) return ddb_size - 4; /* 4 blocks for bypass path allocation */ - adjusted_mode = &cstate->base.adjusted_mode; + adjusted_mode = &crtc_state->base.adjusted_mode; total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode); /* @@ -3848,23 +3848,22 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, static void skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, + const struct intel_crtc_state *crtc_state, const u64 total_data_rate, struct skl_ddb_allocation *ddb, struct skl_ddb_entry *alloc, /* out */ int *num_active /* out */) { - struct drm_atomic_state *state = cstate->base.state; + struct drm_atomic_state *state = crtc_state->base.state; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_crtc *for_crtc = cstate->base.crtc; - const struct drm_crtc_state *crtc_state; - const struct drm_crtc *crtc; + struct drm_crtc *for_crtc = crtc_state->base.crtc; + const struct intel_crtc *crtc; u32 pipe_width = 0, total_width = 0, width_before_pipe = 0; enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe; u16 ddb_size; u32 i; - if (WARN_ON(!state) || !cstate->base.active) { + if (WARN_ON(!state) || !crtc_state->base.active) { alloc->start = 0; alloc->end = 0; *num_active = hweight32(dev_priv->active_crtcs); @@ -3876,7 +3875,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, else *num_active = hweight32(dev_priv->active_crtcs); - ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate, + ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, *num_active, ddb); /* @@ -3901,16 +3900,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, * framebuffer, So instead of allocating DDB equally among pipes * distribute DDB based on resolution/width of the display. */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) { - const struct drm_display_mode *adjusted_mode; + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + enum pipe pipe = crtc->pipe; int hdisplay, vdisplay; - enum pipe pipe; - if (!crtc_state->enable) + if (!crtc_state->base.enable) continue; - pipe = to_intel_crtc(crtc)->pipe; - adjusted_mode = &crtc_state->adjusted_mode; drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay); total_width += hdisplay; @@ -3929,7 +3927,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane); -static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, int level, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, @@ -4061,15 +4059,15 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, * Caller should take care of dividing & rounding off the value. */ static uint_fixed_16_16_t -skl_plane_downscale_amount(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate) +skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(pstate->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); u32 src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; - if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) + if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) return u32_to_fixed16(0); /* n.b., src is 16.16 fixed point, dst is whole integer */ @@ -4078,20 +4076,20 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate, * Cursors only support 0/180 degree rotation, * hence no need to account for rotation here. */ - src_w = pstate->base.src_w >> 16; - src_h = pstate->base.src_h >> 16; - dst_w = pstate->base.crtc_w; - dst_h = pstate->base.crtc_h; + src_w = plane_state->base.src_w >> 16; + src_h = plane_state->base.src_h >> 16; + dst_w = plane_state->base.crtc_w; + dst_h = plane_state->base.crtc_h; } else { /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - src_w = drm_rect_width(&pstate->base.src) >> 16; - src_h = drm_rect_height(&pstate->base.src) >> 16; - dst_w = drm_rect_width(&pstate->base.dst); - dst_h = drm_rect_height(&pstate->base.dst); + src_w = drm_rect_width(&plane_state->base.src) >> 16; + src_h = drm_rect_height(&plane_state->base.src) >> 16; + dst_w = drm_rect_width(&plane_state->base.dst); + dst_h = drm_rect_height(&plane_state->base.dst); } fp_w_ratio = div_fixed16(src_w, dst_w); @@ -4136,49 +4134,46 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) } int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct drm_crtc_state *crtc_state = &cstate->base; - struct drm_atomic_state *state = crtc_state->state; + struct drm_atomic_state *state = crtc_state->base.state; struct drm_plane *plane; - const struct drm_plane_state *pstate; - struct intel_plane_state *intel_pstate; + const struct drm_plane_state *drm_plane_state; int crtc_clock, dotclk; u32 pipe_max_pixel_rate; uint_fixed_16_16_t pipe_downscale; uint_fixed_16_16_t max_downscale = u32_to_fixed16(1); - if (!cstate->base.enable) + if (!crtc_state->base.enable) return 0; - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { uint_fixed_16_16_t plane_downscale; uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8); int bpp; + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); - if (!intel_wm_plane_visible(cstate, - to_intel_plane_state(pstate))) + if (!intel_wm_plane_visible(crtc_state, plane_state)) continue; - if (WARN_ON(!pstate->fb)) + if (WARN_ON(!plane_state->base.fb)) return -EINVAL; - intel_pstate = to_intel_plane_state(pstate); - plane_downscale = skl_plane_downscale_amount(cstate, - intel_pstate); - bpp = pstate->fb->format->cpp[0] * 8; + plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state); + bpp = plane_state->base.fb->format->cpp[0] * 8; if (bpp == 64) plane_downscale = mul_fixed16(plane_downscale, fp_9_div_8); max_downscale = max_fixed16(plane_downscale, max_downscale); } - pipe_downscale = skl_pipe_downscale_amount(cstate); + pipe_downscale = skl_pipe_downscale_amount(crtc_state); pipe_downscale = mul_fixed16(pipe_downscale, max_downscale); - crtc_clock = crtc_state->adjusted_mode.crtc_clock; + crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk; if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) @@ -4195,12 +4190,11 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, } static u64 -skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, +skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, const int plane) { - struct intel_plane *intel_plane = - to_intel_plane(intel_pstate->base.plane); + struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); u32 data_rate; u32 width = 0, height = 0; struct drm_framebuffer *fb; @@ -4208,10 +4202,10 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, uint_fixed_16_16_t down_scale_amount; u64 rate; - if (!intel_pstate->base.visible) + if (!plane_state->base.visible) return 0; - fb = intel_pstate->base.fb; + fb = plane_state->base.fb; format = fb->format->format; if (intel_plane->id == PLANE_CURSOR) @@ -4224,8 +4218,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - width = drm_rect_width(&intel_pstate->base.src) >> 16; - height = drm_rect_height(&intel_pstate->base.src) >> 16; + width = drm_rect_width(&plane_state->base.src) >> 16; + height = drm_rect_height(&plane_state->base.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ if (plane == 1 && is_planar_yuv_format(format)) { @@ -4235,7 +4229,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, data_rate = width * height; - down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); + down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state); rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); @@ -4244,35 +4238,32 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, } static u64 -skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, +skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate, u64 *uv_plane_data_rate) { - struct drm_crtc_state *cstate = &intel_cstate->base; - struct drm_atomic_state *state = cstate->state; + struct drm_atomic_state *state = crtc_state->base.state; struct drm_plane *plane; - const struct drm_plane_state *pstate; + const struct drm_plane_state *drm_plane_state; u64 total_data_rate = 0; if (WARN_ON(!state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { enum plane_id plane_id = to_intel_plane(plane)->id; + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); u64 rate; - const struct intel_plane_state *intel_pstate = - to_intel_plane_state(pstate); /* packed/y */ - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 0); + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); plane_data_rate[plane_id] = rate; total_data_rate += rate; /* uv-plane */ - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 1); + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); uv_plane_data_rate[plane_id] = rate; total_data_rate += rate; } @@ -4281,28 +4272,25 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, } static u64 -icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, +icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate) { - struct drm_crtc_state *cstate = &intel_cstate->base; - struct drm_atomic_state *state = cstate->state; struct drm_plane *plane; - const struct drm_plane_state *pstate; + const struct drm_plane_state *drm_plane_state; u64 total_data_rate = 0; - if (WARN_ON(!state)) + if (WARN_ON(!crtc_state->base.state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { - const struct intel_plane_state *intel_pstate = - to_intel_plane_state(pstate); + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); enum plane_id plane_id = to_intel_plane(plane)->id; u64 rate; - if (!intel_pstate->linked_plane) { - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 0); + if (!plane_state->linked_plane) { + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); plane_data_rate[plane_id] = rate; total_data_rate += rate; } else { @@ -4315,18 +4303,16 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, * NULL if we try get_new_plane_state(), so we * always calculate from the master. */ - if (intel_pstate->slave) + if (plane_state->slave) continue; /* Y plane rate is calculated on the slave */ - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 0); - y_plane_id = intel_pstate->linked_plane->id; + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); + y_plane_id = plane_state->linked_plane->id; plane_data_rate[y_plane_id] = rate; total_data_rate += rate; - rate = skl_plane_relative_data_rate(intel_cstate, - intel_pstate, 1); + rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1); plane_data_rate[plane_id] = rate; total_data_rate += rate; } @@ -4336,14 +4322,14 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, } static int -skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, +skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state, struct skl_ddb_allocation *ddb /* out */) { - struct drm_atomic_state *state = cstate->base.state; - struct drm_crtc *crtc = cstate->base.crtc; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_crtc *crtc = crtc_state->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; + struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb; u16 alloc_size, start = 0; u16 total[I915_MAX_PLANES] = {}; u16 uv_total[I915_MAX_PLANES] = {}; @@ -4356,40 +4342,40 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, int level; /* Clear the partitioning for disabled planes. */ - memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y)); - memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv)); + memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y)); + memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv)); if (WARN_ON(!state)) return 0; - if (!cstate->base.active) { + if (!crtc_state->base.active) { alloc->start = alloc->end = 0; return 0; } if (INTEL_GEN(dev_priv) >= 11) total_data_rate = - icl_get_total_relative_data_rate(cstate, + icl_get_total_relative_data_rate(crtc_state, plane_data_rate); else total_data_rate = - skl_get_total_relative_data_rate(cstate, + skl_get_total_relative_data_rate(crtc_state, plane_data_rate, uv_plane_data_rate); - skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate, + skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate, ddb, alloc, &num_active); alloc_size = skl_ddb_entry_size(alloc); if (alloc_size == 0) return 0; /* Allocate fixed number of blocks for cursor. */ - total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active); + total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); alloc_size -= total[PLANE_CURSOR]; - cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = + crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - total[PLANE_CURSOR]; - cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; + crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; if (total_data_rate == 0) return 0; @@ -4402,7 +4388,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, blocks = 0; for_each_plane_id_on_crtc(intel_crtc, plane_id) { const struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { if (WARN_ON(wm->wm[level].min_ddb_alloc > @@ -4437,7 +4423,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, */ for_each_plane_id_on_crtc(intel_crtc, plane_id) { const struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; u64 rate; u16 extra; @@ -4476,9 +4462,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, start = alloc->start; for_each_plane_id_on_crtc(intel_crtc, plane_id) { struct skl_ddb_entry *plane_alloc = - &cstate->wm.skl.plane_ddb_y[plane_id]; + &crtc_state->wm.skl.plane_ddb_y[plane_id]; struct skl_ddb_entry *uv_plane_alloc = - &cstate->wm.skl.plane_ddb_uv[plane_id]; + &crtc_state->wm.skl.plane_ddb_uv[plane_id]; if (plane_id == PLANE_CURSOR) continue; @@ -4509,7 +4495,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, for (level++; level <= ilk_wm_max_level(dev_priv); level++) { for_each_plane_id_on_crtc(intel_crtc, plane_id) { struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; /* * We only disable the watermarks for each plane if @@ -4546,7 +4532,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, */ for_each_plane_id_on_crtc(intel_crtc, plane_id) { struct skl_plane_wm *wm = - &cstate->wm.skl.optimal.planes[plane_id]; + &crtc_state->wm.skl.optimal.planes[plane_id]; if (wm->trans_wm.plane_res_b >= total[plane_id]) memset(&wm->trans_wm, 0, sizeof(wm->trans_wm)); @@ -4598,43 +4584,43 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency, } static uint_fixed_16_16_t -intel_get_linetime_us(const struct intel_crtc_state *cstate) +intel_get_linetime_us(const struct intel_crtc_state *crtc_state) { u32 pixel_rate; u32 crtc_htotal; uint_fixed_16_16_t linetime_us; - if (!cstate->base.active) + if (!crtc_state->base.active) return u32_to_fixed16(0); - pixel_rate = cstate->pixel_rate; + pixel_rate = crtc_state->pixel_rate; if (WARN_ON(pixel_rate == 0)) return u32_to_fixed16(0); - crtc_htotal = cstate->base.adjusted_mode.crtc_htotal; + crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal; linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate); return linetime_us; } static u32 -skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate) +skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { u64 adjusted_pixel_rate; uint_fixed_16_16_t downscale_amount; /* Shouldn't reach here on disabled planes... */ - if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) + if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) return 0; /* * Adjusted plane pixel rate is just the pipe's adjusted pixel rate * with additional adjustments for plane-specific scaling. */ - adjusted_pixel_rate = cstate->pixel_rate; - downscale_amount = skl_plane_downscale_amount(cstate, pstate); + adjusted_pixel_rate = crtc_state->pixel_rate; + downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state); return mul_round_up_u32_fixed16(adjusted_pixel_rate, downscale_amount); @@ -4767,13 +4753,13 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) return level > 0; } -static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, +static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, int level, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); u32 latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; @@ -4799,14 +4785,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, wp->cpp, latency, wp->dbuf_block_size); method2 = skl_wm_method2(wp->plane_pixel_rate, - cstate->base.adjusted_mode.crtc_htotal, + crtc_state->base.adjusted_mode.crtc_htotal, latency, wp->plane_blocks_per_line); if (wp->y_tiled) { selected_result = max_fixed16(method2, wp->y_tile_minimum); } else { - if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / + if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal / wp->dbuf_block_size < 1) && (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { selected_result = method2; @@ -4893,18 +4879,18 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, } static void -skl_compute_wm_levels(const struct intel_crtc_state *cstate, +skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level *result_prev = &levels[0]; for (level = 0; level <= max_level; level++) { struct skl_wm_level *result = &levels[level]; - skl_compute_plane_wm(cstate, level, wm_params, + skl_compute_plane_wm(crtc_state, level, wm_params, result_prev, result); result_prev = result; @@ -4912,14 +4898,14 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate, } static u32 -skl_compute_linetime_wm(const struct intel_crtc_state *cstate) +skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state) { - struct drm_atomic_state *state = cstate->base.state; + struct drm_atomic_state *state = crtc_state->base.state; struct drm_i915_private *dev_priv = to_i915(state->dev); uint_fixed_16_16_t linetime_us; u32 linetime_wm; - linetime_us = intel_get_linetime_us(cstate); + linetime_us = intel_get_linetime_us(crtc_state); linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us)); /* Display WA #1135: BXT:ALL GLK:ALL */ @@ -4929,11 +4915,11 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate) return linetime_wm; } -static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, +static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, const struct skl_wm_params *wp, struct skl_plane_wm *wm) { - struct drm_device *dev = cstate->base.crtc->dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); u16 trans_min, trans_y_tile_min; const u16 trans_amount = 10; /* This is configurable amount */ @@ -5091,13 +5077,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; } -static int skl_build_pipe_wm(struct intel_crtc_state *cstate) +static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); - struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; - struct drm_crtc_state *crtc_state = &cstate->base; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; struct drm_plane *plane; - const struct drm_plane_state *pstate; + const struct drm_plane_state *drm_plane_state; int ret; /* @@ -5106,19 +5091,20 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate) */ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); - drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { - const struct intel_plane_state *intel_pstate = - to_intel_plane_state(pstate); + drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, + &crtc_state->base) { + const struct intel_plane_state *plane_state = + to_intel_plane_state(drm_plane_state); if (INTEL_GEN(dev_priv) >= 11) - ret = icl_build_plane_wm(cstate, intel_pstate); + ret = icl_build_plane_wm(crtc_state, plane_state); else - ret = skl_build_plane_wm(cstate, intel_pstate); + ret = skl_build_plane_wm(crtc_state, plane_state); if (ret) return ret; } - pipe_wm->linetime = skl_compute_linetime_wm(cstate); + pipe_wm->linetime = skl_compute_linetime_wm(crtc_state); return 0; } @@ -5272,10 +5258,10 @@ static u32 pipes_modified(struct intel_atomic_state *state) { struct intel_crtc *crtc; - struct intel_crtc_state *cstate; + struct intel_crtc_state *crtc_state; u32 i, ret = 0; - for_each_new_intel_crtc_in_state(state, crtc, cstate, i) + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) ret |= drm_crtc_mask(&crtc->base); return ret; @@ -5651,11 +5637,11 @@ skl_compute_wm(struct intel_atomic_state *state) } static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; + struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; enum pipe pipe = crtc->pipe; if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) @@ -5665,9 +5651,9 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, } static void skl_initial_wm(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_values *results = &state->wm_results; @@ -5677,8 +5663,8 @@ static void skl_initial_wm(struct intel_atomic_state *state, mutex_lock(&dev_priv->wm.wm_mutex); - if (cstate->base.active_changed) - skl_atomic_update_crtc_wm(state, cstate); + if (crtc_state->base.active_changed) + skl_atomic_update_crtc_wm(state, crtc_state); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5734,26 +5720,26 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv) } static void ilk_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate; + intel_crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } static void ilk_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc_state *cstate) + struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); mutex_lock(&dev_priv->wm.wm_mutex); - if (cstate->wm.need_postvbl_update) { - intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal; + if (crtc_state->wm.need_postvbl_update) { + intel_crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; ilk_program_watermarks(dev_priv); } mutex_unlock(&dev_priv->wm.wm_mutex); @@ -5811,13 +5797,13 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) struct skl_ddb_values *hw = &dev_priv->wm.skl_hw; struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; struct intel_crtc *crtc; - struct intel_crtc_state *cstate; + struct intel_crtc_state *crtc_state; skl_ddb_get_hw_state(dev_priv, ddb); for_each_intel_crtc(&dev_priv->drm, crtc) { - cstate = to_intel_crtc_state(crtc->base.state); + crtc_state = to_intel_crtc_state(crtc->base.state); - skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal); + skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); if (crtc->active) hw->dirty_pipes |= drm_crtc_mask(&crtc->base); @@ -5834,8 +5820,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct ilk_wm_values *hw = &dev_priv->wm.hw; - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state); - struct intel_pipe_wm *active = &cstate->wm.ilk.optimal; + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; static const i915_reg_t wm0_pipe_reg[] = { [PIPE_A] = WM0_PIPEA_ILK, @@ -7191,7 +7177,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv) * next request to execute. If the idle hysteresis is less than that * interrupt service latency, the hardware will automatically gate * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from intel_pstate is that we + * the service latency. A similar guide from plane_state is that we * do not want the enable hysteresis to less than the wakeup latency. * * igt/gem_exec_nop/sequential provides a rough estimate for the @@ -7270,7 +7256,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) * next request to execute. If the idle hysteresis is less than that * interrupt service latency, the hardware will automatically gate * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from intel_pstate is that we + * the service latency. A similar guide from plane_state is that we * do not want the enable hysteresis to less than the wakeup latency. * * igt/gem_exec_nop/sequential provides a rough estimate for the -- cgit v1.2.3 From 39a5883f8670360401876505eaaa64f030df7852 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:13 +0300 Subject: drm/i915/icl: Add support to read out the TBT PLL HW state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support to read out the TBT PLL HW state. Cc: Vandita Kulkarni Cc: Paulo Zanoni Cc: Lucas De Marchi Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-2-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c97c0a9f6208..d65a580fe527 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9928,13 +9928,20 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, enum intel_dpll_id id; u32 temp; - /* TODO: TBT pll not implemented. */ if (intel_port_is_combophy(dev_priv, port)) { temp = I915_READ(DPCLKA_CFGCR0_ICL) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); } else if (intel_port_is_tc(dev_priv, port)) { - id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port)); + u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; + + if (clk_sel == DDI_CLK_SEL_MG) { + id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, + port)); + } else { + WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); + id = DPLL_ID_ICL_TBTPLL; + } } else { WARN(1, "Invalid port %x\n", port); return; -- cgit v1.2.3 From 28212321622d79cbab640d80d0ce0670fd9eaf72 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:14 +0300 Subject: drm/i915: Tune down WARNs about TBT AUX power well enabling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The HW completion flag for the TBT AUX power well enabling/disabling gets stuck if the firmware tears down the TBT DP tunnel before the completion. We shouldn't complain about the timeout, since it's expected to happen and doesn't cause further issues. We suppress the disabling timeout already, do the same for enabling. v2: - Make the debug message more precise. (José) Cc: José Roberto de Souza Cc: Rodrigo Vivi Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-3-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c93ad512014c..217787d10718 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -269,11 +269,17 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, int pw_idx = power_well->desc->hsw.idx; /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - WARN_ON(intel_wait_for_register(&dev_priv->uncore, - regs->driver, - HSW_PWR_WELL_CTL_STATE(pw_idx), - HSW_PWR_WELL_CTL_STATE(pw_idx), - 1)); + if (intel_wait_for_register(&dev_priv->uncore, + regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), + HSW_PWR_WELL_CTL_STATE(pw_idx), + 1)) { + DRM_DEBUG_KMS("%s power well enable timeout\n", + power_well->desc->name); + + /* An AUX timeout is expected if the TBT DP tunnel is down. */ + WARN_ON(!power_well->desc->hsw.is_tc_tbt); + } } static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From bc85328ff431e41bea3fe88e74e738f2d372e6d1 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:15 +0300 Subject: drm/i915: Move the TypeC port handling code to a separate file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the TypeC port handling functions to a new file for clarity. While at it: - s/icl_tc_port_connected()/intel_tc_port_connected()/ icl_tc_phy_disconnect(), will be unexported later. - s/intel_dp_get_fia_supported_lane_count()/ intel_tc_port_fia_max_lane_count()/ It's used for HDMI legacy mode too. - Simplify function interfaces by passing only dig_port to them. No functional changes. v2: - Fix checkpatch issues: +1/-1 empty lines in intel_tc.c and add missing SPDX to intel_tc.h. (Jani) Cc: Animesh Manna Cc: Paulo Zanoni Cc: José Roberto de Souza Cc: Jani Nikula Signed-off-by: Imre Deak Acked-by: Jani Nikula Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-4-imre.deak@intel.com --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/display/intel_ddi.c | 6 +- drivers/gpu/drm/i915/display/intel_dp.c | 227 +----------------------------- drivers/gpu/drm/i915/display/intel_dp.h | 2 - drivers/gpu/drm/i915/display/intel_tc.c | 230 +++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_tc.h | 18 +++ 6 files changed, 256 insertions(+), 230 deletions(-) create mode 100644 drivers/gpu/drm/i915/display/intel_tc.c create mode 100644 drivers/gpu/drm/i915/display/intel_tc.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3bd8f0349a8a..82c49ad16361 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -175,7 +175,8 @@ i915-y += \ display/intel_overlay.o \ display/intel_psr.o \ display/intel_quirks.o \ - display/intel_sprite.o + display/intel_sprite.o \ + display/intel_tc.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 7925a176f900..d06f121281ef 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -45,6 +45,7 @@ #include "intel_lspcon.h" #include "intel_panel.h" #include "intel_psr.h" +#include "intel_tc.h" #include "intel_vdsc.h" struct ddi_buf_trans { @@ -3917,7 +3918,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - struct drm_i915_private *i915 = to_i915(encoder->base.dev); intel_dp_encoder_suspend(encoder); @@ -3927,7 +3927,7 @@ static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) * even if the sink has disappeared while being suspended. */ if (dig_port->tc_legacy_port) - icl_tc_phy_disconnect(i915, dig_port); + icl_tc_phy_disconnect(dig_port); } static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder) @@ -3949,7 +3949,7 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) intel_dp_encoder_flush_work(encoder); if (intel_port_is_tc(i915, dig_port->base.port)) - icl_tc_phy_disconnect(i915, dig_port); + icl_tc_phy_disconnect(dig_port); drm_encoder_cleanup(encoder); kfree(dig_port); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4336df46fe78..348c70b75403 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -62,6 +62,7 @@ #include "intel_panel.h" #include "intel_psr.h" #include "intel_sideband.h" +#include "intel_tc.h" #include "intel_vdsc.h" #define DP_DPRX_ESI_LEN 14 @@ -211,46 +212,13 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) return intel_dp->common_rates[intel_dp->num_common_rates - 1]; } -static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) -{ - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - intel_wakeref_t wakeref; - u32 lane_info; - - if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC) - return 4; - - lane_info = 0; - with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) - lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & - DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); - - switch (lane_info) { - default: - MISSING_CASE(lane_info); - case 1: - case 2: - case 4: - case 8: - return 1; - case 3: - case 12: - return 2; - case 15: - return 4; - } -} - /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); int source_max = intel_dig_port->max_lanes; int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); - int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp); + int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); return min3(source_max, sink_max, fia_max); } @@ -5232,195 +5200,6 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); } -static const char *tc_type_name(enum tc_port_type type) -{ - static const char * const names[] = { - [TC_PORT_UNKNOWN] = "unknown", - [TC_PORT_LEGACY] = "legacy", - [TC_PORT_TYPEC] = "typec", - [TC_PORT_TBT] = "tbt", - }; - - if (WARN_ON(type >= ARRAY_SIZE(names))) - type = TC_PORT_UNKNOWN; - - return names[type]; -} - -static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, - struct intel_digital_port *intel_dig_port, - bool is_legacy, bool is_typec, bool is_tbt) -{ - enum port port = intel_dig_port->base.port; - enum tc_port_type old_type = intel_dig_port->tc_type; - - WARN_ON(is_legacy + is_typec + is_tbt != 1); - - if (is_legacy) - intel_dig_port->tc_type = TC_PORT_LEGACY; - else if (is_typec) - intel_dig_port->tc_type = TC_PORT_TYPEC; - else if (is_tbt) - intel_dig_port->tc_type = TC_PORT_TBT; - else - return; - - /* Types are not supposed to be changed at runtime. */ - WARN_ON(old_type != TC_PORT_UNKNOWN && - old_type != intel_dig_port->tc_type); - - if (old_type != intel_dig_port->tc_type) - DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), - tc_type_name(intel_dig_port->tc_type)); -} - -/* - * This function implements the first part of the Connect Flow described by our - * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading - * lanes, EDID, etc) is done as needed in the typical places. - * - * Unlike the other ports, type-C ports are not available to use as soon as we - * get a hotplug. The type-C PHYs can be shared between multiple controllers: - * display, USB, etc. As a result, handshaking through FIA is required around - * connect and disconnect to cleanly transfer ownership with the controller and - * set the type-C power state. - * - * We could opt to only do the connect flow when we actually try to use the AUX - * channels or do a modeset, then immediately run the disconnect flow after - * usage, but there are some implications on this for a dynamic environment: - * things may go away or change behind our backs. So for now our driver is - * always trying to acquire ownership of the controller as soon as it gets an - * interrupt (or polls state and sees a port is connected) and only gives it - * back when it sees a disconnect. Implementation of a more fine-grained model - * will require a lot of coordination with user space and thorough testing for - * the extra possible cases. - */ -static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port) -{ - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - u32 val; - - if (dig_port->tc_type != TC_PORT_LEGACY && - dig_port->tc_type != TC_PORT_TYPEC) - return true; - - val = I915_READ(PORT_TX_DFLEXDPPMS); - if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { - DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); - WARN_ON(dig_port->tc_legacy_port); - return false; - } - - /* - * This function may be called many times in a row without an HPD event - * in between, so try to avoid the write when we can. - */ - val = I915_READ(PORT_TX_DFLEXDPCSSS); - if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) { - val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); - } - - /* - * Now we have to re-check the live state, in case the port recently - * became disconnected. Not necessary for legacy mode. - */ - if (dig_port->tc_type == TC_PORT_TYPEC && - !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { - DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); - icl_tc_phy_disconnect(dev_priv, dig_port); - return false; - } - - return true; -} - -/* - * See the comment at the connect function. This implements the Disconnect - * Flow. - */ -void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port) -{ - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - - if (dig_port->tc_type == TC_PORT_UNKNOWN) - return; - - /* - * TBT disconnection flow is read the live status, what was done in - * caller. - */ - if (dig_port->tc_type == TC_PORT_TYPEC || - dig_port->tc_type == TC_PORT_LEGACY) { - u32 val; - - val = I915_READ(PORT_TX_DFLEXDPCSSS); - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); - } - - DRM_DEBUG_KMS("Port %c TC type %s disconnected\n", - port_name(dig_port->base.port), - tc_type_name(dig_port->tc_type)); - - dig_port->tc_type = TC_PORT_UNKNOWN; -} - -/* - * The type-C ports are different because even when they are connected, they may - * not be available/usable by the graphics driver: see the comment on - * icl_tc_phy_connect(). So in our driver instead of adding the additional - * concept of "usable" and make everything check for "connected and usable" we - * define a port as "connected" when it is not only connected, but also when it - * is usable by the rest of the driver. That maintains the old assumption that - * connected ports are usable, and avoids exposing to the users objects they - * can't really use. - */ -static bool icl_tc_port_connected(struct drm_i915_private *dev_priv, - struct intel_digital_port *intel_dig_port) -{ - enum port port = intel_dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - bool is_legacy, is_typec, is_tbt; - u32 dpsp; - - /* - * Complain if we got a legacy port HPD, but VBT didn't mark the port as - * legacy. Treat the port as legacy from now on. - */ - if (!intel_dig_port->tc_legacy_port && - I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) { - DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n", - port_name(port)); - intel_dig_port->tc_legacy_port = true; - } - is_legacy = intel_dig_port->tc_legacy_port; - - /* - * The spec says we shouldn't be using the ISR bits for detecting - * between TC and TBT. We should use DFLEXDPSP. - */ - dpsp = I915_READ(PORT_TX_DFLEXDPSP); - is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); - is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); - - if (!is_legacy && !is_typec && !is_tbt) { - icl_tc_phy_disconnect(dev_priv, intel_dig_port); - - return false; - } - - icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec, - is_tbt); - - if (!icl_tc_phy_connect(dev_priv, intel_dig_port)) - return false; - - return true; -} - static bool icl_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -5429,7 +5208,7 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder) if (intel_port_is_combophy(dev_priv, encoder->port)) return icl_combo_port_connected(dev_priv, dig_port); else if (intel_port_is_tc(dev_priv, encoder->port)) - return icl_tc_port_connected(dev_priv, dig_port); + return intel_tc_port_connected(dig_port); else MISSING_CASE(encoder->hpd_pin); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index da70b1a41c83..657bbb1f5ed0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -112,8 +112,6 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); bool intel_digital_port_connected(struct intel_encoder *encoder); -void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, - struct intel_digital_port *dig_port); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) { diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c new file mode 100644 index 000000000000..4fa9ea695d51 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "intel_display.h" +#include "i915_drv.h" +#include "intel_tc.h" + +static const char *tc_type_name(enum tc_port_type type) +{ + static const char * const names[] = { + [TC_PORT_UNKNOWN] = "unknown", + [TC_PORT_LEGACY] = "legacy", + [TC_PORT_TYPEC] = "typec", + [TC_PORT_TBT] = "tbt", + }; + + if (WARN_ON(type >= ARRAY_SIZE(names))) + type = TC_PORT_UNKNOWN; + + return names[type]; +} + +int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + intel_wakeref_t wakeref; + u32 lane_info; + + if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC) + return 4; + + lane_info = 0; + with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) + lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & + DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); + + switch (lane_info) { + default: + MISSING_CASE(lane_info); + case 1: + case 2: + case 4: + case 8: + return 1; + case 3: + case 12: + return 2; + case 15: + return 4; + } +} + +/* + * This function implements the first part of the Connect Flow described by our + * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading + * lanes, EDID, etc) is done as needed in the typical places. + * + * Unlike the other ports, type-C ports are not available to use as soon as we + * get a hotplug. The type-C PHYs can be shared between multiple controllers: + * display, USB, etc. As a result, handshaking through FIA is required around + * connect and disconnect to cleanly transfer ownership with the controller and + * set the type-C power state. + * + * We could opt to only do the connect flow when we actually try to use the AUX + * channels or do a modeset, then immediately run the disconnect flow after + * usage, but there are some implications on this for a dynamic environment: + * things may go away or change behind our backs. So for now our driver is + * always trying to acquire ownership of the controller as soon as it gets an + * interrupt (or polls state and sees a port is connected) and only gives it + * back when it sees a disconnect. Implementation of a more fine-grained model + * will require a lot of coordination with user space and thorough testing for + * the extra possible cases. + */ +static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + if (dig_port->tc_type != TC_PORT_LEGACY && + dig_port->tc_type != TC_PORT_TYPEC) + return true; + + val = I915_READ(PORT_TX_DFLEXDPPMS); + if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { + DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); + WARN_ON(dig_port->tc_legacy_port); + return false; + } + + /* + * This function may be called many times in a row without an HPD event + * in between, so try to avoid the write when we can. + */ + val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) { + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } + + /* + * Now we have to re-check the live state, in case the port recently + * became disconnected. Not necessary for legacy mode. + */ + if (dig_port->tc_type == TC_PORT_TYPEC && + !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { + DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); + icl_tc_phy_disconnect(dig_port); + return false; + } + + return true; +} + +/* + * See the comment at the connect function. This implements the Disconnect + * Flow. + */ +void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + + if (dig_port->tc_type == TC_PORT_UNKNOWN) + return; + + /* + * TBT disconnection flow is read the live status, what was done in + * caller. + */ + if (dig_port->tc_type == TC_PORT_TYPEC || + dig_port->tc_type == TC_PORT_LEGACY) { + u32 val; + + val = I915_READ(PORT_TX_DFLEXDPCSSS); + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + } + + DRM_DEBUG_KMS("Port %c TC type %s disconnected\n", + port_name(dig_port->base.port), + tc_type_name(dig_port->tc_type)); + + dig_port->tc_type = TC_PORT_UNKNOWN; +} + +static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, + struct intel_digital_port *intel_dig_port, + bool is_legacy, bool is_typec, bool is_tbt) +{ + enum port port = intel_dig_port->base.port; + enum tc_port_type old_type = intel_dig_port->tc_type; + + WARN_ON(is_legacy + is_typec + is_tbt != 1); + + if (is_legacy) + intel_dig_port->tc_type = TC_PORT_LEGACY; + else if (is_typec) + intel_dig_port->tc_type = TC_PORT_TYPEC; + else if (is_tbt) + intel_dig_port->tc_type = TC_PORT_TBT; + else + return; + + /* Types are not supposed to be changed at runtime. */ + WARN_ON(old_type != TC_PORT_UNKNOWN && + old_type != intel_dig_port->tc_type); + + if (old_type != intel_dig_port->tc_type) + DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), + tc_type_name(intel_dig_port->tc_type)); +} + +/* + * The type-C ports are different because even when they are connected, they may + * not be available/usable by the graphics driver: see the comment on + * icl_tc_phy_connect(). So in our driver instead of adding the additional + * concept of "usable" and make everything check for "connected and usable" we + * define a port as "connected" when it is not only connected, but also when it + * is usable by the rest of the driver. That maintains the old assumption that + * connected ports are usable, and avoids exposing to the users objects they + * can't really use. + */ +bool intel_tc_port_connected(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + bool is_legacy, is_typec, is_tbt; + u32 dpsp; + + /* + * Complain if we got a legacy port HPD, but VBT didn't mark the port as + * legacy. Treat the port as legacy from now on. + */ + if (!dig_port->tc_legacy_port && + I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) { + DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n", + port_name(port)); + dig_port->tc_legacy_port = true; + } + is_legacy = dig_port->tc_legacy_port; + + /* + * The spec says we shouldn't be using the ISR bits for detecting + * between TC and TBT. We should use DFLEXDPSP. + */ + dpsp = I915_READ(PORT_TX_DFLEXDPSP); + is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); + is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); + + if (!is_legacy && !is_typec && !is_tbt) { + icl_tc_phy_disconnect(dig_port); + + return false; + } + + icl_update_tc_port_type(dev_priv, dig_port, is_legacy, is_typec, + is_tbt); + + if (!icl_tc_phy_connect(dig_port)) + return false; + + return true; +} + diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h new file mode 100644 index 000000000000..0c65675394e5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_TC_H__ +#define __INTEL_TC_H__ + +#include + +struct intel_digital_port; + +void icl_tc_phy_disconnect(struct intel_digital_port *dig_port); + +bool intel_tc_port_connected(struct intel_digital_port *dig_port); +int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); + +#endif /* __INTEL_TC_H__ */ -- cgit v1.2.3 From e9b7e1422d409bb7d3423b8d1e59865227083669 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:16 +0300 Subject: drm/i915: Sanitize the terminology used for TypeC port modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The TypeC port mode can switch dynamically, to reflect that better call the port's mode as 'mode' rather than 'type'. While at it: - s/TC_PORT_TBT/TC_PORT_TBT_ALT/ and s/TC_PORT_TYPEC/TC_PORT_DP_ALT/. 'TYPEC' is ambiguous, TBT_ALT and DP_ALT better match the reality. - Remove the 'unknown' TypeC port mode. The mode is always known, it's the TBT-alt/safe mode after HW reset and after disconnecting the PHY. Simplify the tc_port/tc_type checks accordingly. - Don't WARN if the port mode changes, that can happen normally. No functional changes. Cc: Animesh Manna Cc: Paulo Zanoni Cc: Anusha Srivatsa Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Imre Deak Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-5-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 11 +++--- drivers/gpu/drm/i915/display/intel_display.h | 7 ++-- drivers/gpu/drm/i915/display/intel_dp.c | 2 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 2 +- drivers/gpu/drm/i915/display/intel_tc.c | 48 +++++++++++---------------- drivers/gpu/drm/i915/intel_drv.h | 2 +- 6 files changed, 31 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index d06f121281ef..442cd3997109 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2999,14 +2999,14 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) enum tc_port tc_port = intel_port_to_tc(dev_priv, port); u32 ln0, ln1, lane_info; - if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) + if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) return; ln0 = I915_READ(MG_DP_MODE(0, port)); ln1 = I915_READ(MG_DP_MODE(1, port)); - switch (intel_dig_port->tc_type) { - case TC_PORT_TYPEC: + switch (intel_dig_port->tc_mode) { + case TC_PORT_DP_ALT: ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); @@ -3049,7 +3049,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) break; default: - MISSING_CASE(intel_dig_port->tc_type); + MISSING_CASE(intel_dig_port->tc_mode); return; } @@ -3643,8 +3643,7 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, * Program the lane count for static/dynamic connections on Type-C ports. * Skip this step for TBT. */ - if (dig_port->tc_type == TC_PORT_UNKNOWN || - dig_port->tc_type == TC_PORT_TBT) + if (dig_port->tc_mode == TC_PORT_TBT_ALT) return; intel_ddi_set_fia_lane_count(encoder, crtc_state, port); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index ee6b8194a459..d296556ed82e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -189,10 +189,9 @@ enum tc_port { I915_MAX_TC_PORTS }; -enum tc_port_type { - TC_PORT_UNKNOWN = 0, - TC_PORT_TYPEC, - TC_PORT_TBT, +enum tc_port_mode { + TC_PORT_TBT_ALT, + TC_PORT_DP_ALT, TC_PORT_LEGACY, }; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 348c70b75403..0c6afec78f93 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1176,7 +1176,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); - if (intel_dig_port->tc_type == TC_PORT_TBT) + if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) ret |= DP_AUX_CH_CTL_TBT_IO; return ret; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 2d4e7b9a7b9d..bf66261c8bf0 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2817,7 +2817,7 @@ icl_get_dpll(struct intel_crtc_state *crtc_state, intel_dig_port = enc_to_dig_port(&encoder->base); } - if (intel_dig_port->tc_type == TC_PORT_TBT) { + if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) { min = DPLL_ID_ICL_TBTPLL; max = min; ret = icl_calc_dpll_state(crtc_state, encoder); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 4fa9ea695d51..59aad3e49f93 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -7,19 +7,18 @@ #include "i915_drv.h" #include "intel_tc.h" -static const char *tc_type_name(enum tc_port_type type) +static const char *tc_port_mode_name(enum tc_port_mode mode) { static const char * const names[] = { - [TC_PORT_UNKNOWN] = "unknown", + [TC_PORT_TBT_ALT] = "tbt-alt", + [TC_PORT_DP_ALT] = "dp-alt", [TC_PORT_LEGACY] = "legacy", - [TC_PORT_TYPEC] = "typec", - [TC_PORT_TBT] = "tbt", }; - if (WARN_ON(type >= ARRAY_SIZE(names))) - type = TC_PORT_UNKNOWN; + if (WARN_ON(mode >= ARRAY_SIZE(names))) + mode = TC_PORT_TBT_ALT; - return names[type]; + return names[mode]; } int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) @@ -29,7 +28,7 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) intel_wakeref_t wakeref; u32 lane_info; - if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC) + if (dig_port->tc_mode != TC_PORT_DP_ALT) return 4; lane_info = 0; @@ -81,8 +80,8 @@ static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); u32 val; - if (dig_port->tc_type != TC_PORT_LEGACY && - dig_port->tc_type != TC_PORT_TYPEC) + if (dig_port->tc_mode != TC_PORT_LEGACY && + dig_port->tc_mode != TC_PORT_DP_ALT) return true; val = I915_READ(PORT_TX_DFLEXDPPMS); @@ -106,7 +105,7 @@ static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) * Now we have to re-check the live state, in case the port recently * became disconnected. Not necessary for legacy mode. */ - if (dig_port->tc_type == TC_PORT_TYPEC && + if (dig_port->tc_mode == TC_PORT_DP_ALT && !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); icl_tc_phy_disconnect(dig_port); @@ -125,15 +124,12 @@ void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - if (dig_port->tc_type == TC_PORT_UNKNOWN) - return; - /* * TBT disconnection flow is read the live status, what was done in * caller. */ - if (dig_port->tc_type == TC_PORT_TYPEC || - dig_port->tc_type == TC_PORT_LEGACY) { + if (dig_port->tc_mode == TC_PORT_DP_ALT || + dig_port->tc_mode == TC_PORT_LEGACY) { u32 val; val = I915_READ(PORT_TX_DFLEXDPCSSS); @@ -143,9 +139,9 @@ void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) DRM_DEBUG_KMS("Port %c TC type %s disconnected\n", port_name(dig_port->base.port), - tc_type_name(dig_port->tc_type)); + tc_port_mode_name(dig_port->tc_mode)); - dig_port->tc_type = TC_PORT_UNKNOWN; + dig_port->tc_mode = TC_PORT_TBT_ALT; } static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, @@ -153,26 +149,22 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, bool is_legacy, bool is_typec, bool is_tbt) { enum port port = intel_dig_port->base.port; - enum tc_port_type old_type = intel_dig_port->tc_type; + enum tc_port_mode old_mode = intel_dig_port->tc_mode; WARN_ON(is_legacy + is_typec + is_tbt != 1); if (is_legacy) - intel_dig_port->tc_type = TC_PORT_LEGACY; + intel_dig_port->tc_mode = TC_PORT_LEGACY; else if (is_typec) - intel_dig_port->tc_type = TC_PORT_TYPEC; + intel_dig_port->tc_mode = TC_PORT_DP_ALT; else if (is_tbt) - intel_dig_port->tc_type = TC_PORT_TBT; + intel_dig_port->tc_mode = TC_PORT_TBT_ALT; else return; - /* Types are not supposed to be changed at runtime. */ - WARN_ON(old_type != TC_PORT_UNKNOWN && - old_type != intel_dig_port->tc_type); - - if (old_type != intel_dig_port->tc_type) + if (old_mode != intel_dig_port->tc_mode) DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), - tc_type_name(intel_dig_port->tc_type)); + tc_port_mode_name(intel_dig_port->tc_mode)); } /* diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1d58f7ec5d84..7159f709a7f2 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1225,7 +1225,7 @@ struct intel_digital_port { enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; bool tc_legacy_port:1; - enum tc_port_type tc_type; + enum tc_port_mode tc_mode; void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, -- cgit v1.2.3 From 3b2ed431342948646ed1ecf2ad73e4b54e88572b Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:17 +0300 Subject: drm/i915: Don't enable the DDI-IO power in the TypeC TBT-alt mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to the spec we should not enable the DDI-IO power domain if the TypeC port is in the TBT-alt mode, so do that only in the other TypeC modes or for non-TypeC ports. See the internal BSpec Index/22243. v2: - Add the internal BSpec reference to the log message. (José) Cc: Manasi Navare Cc: Anusha Srivatsa Cc: José Roberto de Souza Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-6-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 442cd3997109..c16fa789a931 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3124,7 +3124,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_clk_select(encoder, crtc_state); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + if (!intel_port_is_tc(dev_priv, port) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port); icl_disable_phy_clock_gating(dig_port); @@ -3306,8 +3309,10 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); - intel_display_power_put_unchecked(dev_priv, - dig_port->ddi_io_power_domain); + if (!intel_port_is_tc(dev_priv, encoder->port) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_put_unchecked(dev_priv, + dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); } -- cgit v1.2.3 From 29ae36abf08f943b76a2959f5000c44efa335be7 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:18 +0300 Subject: drm/i915: Fix the TBT AUX power well enabling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the mapping from a TBT AUX power well index to the DP_AUX_CH_CTL register. Fixes: c7375d9542f1 ("drm/i915: Configure AUX_CH_CTL when enabling the AUX power domain") Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-7-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 217787d10718..fd13cd68deae 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -444,16 +444,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, #define ICL_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) +#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); + int pw_idx = power_well->desc->hsw.idx; + bool is_tbt = power_well->desc->hsw.is_tc_tbt; + enum aux_ch aux_ch; u32 val; + aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); val = I915_READ(DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; - if (power_well->desc->hsw.is_tc_tbt) + if (is_tbt) val |= DP_AUX_CH_CTL_TBT_IO; I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); -- cgit v1.2.3 From dd7239c545b1715dbe785a93b6722f66becd9248 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:19 +0300 Subject: drm/i915: Use the correct AUX power domain in TypeC TBT-alt mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the TypeC TBT-alt port mode we must use the TBT AUX power domain, fix that. Cc: Manasi Navare Cc: Anusha Srivatsa Cc: José Roberto de Souza Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-8-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d65a580fe527..6f93bedb331a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6610,6 +6610,25 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + + if (intel_port_is_tc(dev_priv, dig_port->base.port) && + dig_port->tc_mode == TC_PORT_TBT_ALT) { + switch (dig_port->aux_ch) { + case AUX_CH_C: + return POWER_DOMAIN_AUX_TBT1; + case AUX_CH_D: + return POWER_DOMAIN_AUX_TBT2; + case AUX_CH_E: + return POWER_DOMAIN_AUX_TBT3; + case AUX_CH_F: + return POWER_DOMAIN_AUX_TBT4; + default: + MISSING_CASE(dig_port->aux_ch); + return POWER_DOMAIN_AUX_TBT1; + } + } + switch (dig_port->aux_ch) { case AUX_CH_A: return POWER_DOMAIN_AUX_A; -- cgit v1.2.3 From ab7bc4e1a5508e5cb974985fba325d80086b3f10 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:20 +0300 Subject: drm/i915: Unify the TypeC port notation in debug/error messages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unify the TypeC port notation in log messages, so that it matches the spec. For instance the first ICL TypeC port will read as 'Port C/TC#1'. v2: - Format print the name only once. (José) Cc: José Roberto de Souza Cc: Rodrigo Vivi Cc: Paulo Zanoni Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-9-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 9 ++++++--- drivers/gpu/drm/i915/display/intel_tc.c | 32 ++++++++++++++++++++++++-------- drivers/gpu/drm/i915/display/intel_tc.h | 2 ++ drivers/gpu/drm/i915/intel_drv.h | 1 + 4 files changed, 33 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c16fa789a931..c9143e2a6994 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4265,9 +4265,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); - intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) && - !port_info->supports_typec_usb && - !port_info->supports_tbt; + if (intel_port_is_tc(dev_priv, port)) { + bool is_legacy = !port_info->supports_typec_usb && + !port_info->supports_tbt; + + intel_tc_port_init(intel_dig_port, is_legacy); + } switch (port) { case PORT_A: diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 59aad3e49f93..ca3b11e26474 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -86,7 +86,8 @@ static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) val = I915_READ(PORT_TX_DFLEXDPPMS); if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { - DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port); + DRM_DEBUG_KMS("Port %s: PHY not ready\n", + dig_port->tc_port_name); WARN_ON(dig_port->tc_legacy_port); return false; } @@ -107,7 +108,8 @@ static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) */ if (dig_port->tc_mode == TC_PORT_DP_ALT && !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { - DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); + DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n", + dig_port->tc_port_name); icl_tc_phy_disconnect(dig_port); return false; } @@ -137,8 +139,8 @@ void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) I915_WRITE(PORT_TX_DFLEXDPCSSS, val); } - DRM_DEBUG_KMS("Port %c TC type %s disconnected\n", - port_name(dig_port->base.port), + DRM_DEBUG_KMS("Port %s: mode %s disconnected\n", + dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); dig_port->tc_mode = TC_PORT_TBT_ALT; @@ -148,7 +150,6 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, struct intel_digital_port *intel_dig_port, bool is_legacy, bool is_typec, bool is_tbt) { - enum port port = intel_dig_port->base.port; enum tc_port_mode old_mode = intel_dig_port->tc_mode; WARN_ON(is_legacy + is_typec + is_tbt != 1); @@ -163,7 +164,8 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, return; if (old_mode != intel_dig_port->tc_mode) - DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port), + DRM_DEBUG_KMS("Port %s: port has mode %s\n", + intel_dig_port->tc_port_name, tc_port_mode_name(intel_dig_port->tc_mode)); } @@ -191,8 +193,8 @@ bool intel_tc_port_connected(struct intel_digital_port *dig_port) */ if (!dig_port->tc_legacy_port && I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) { - DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n", - port_name(port)); + DRM_ERROR("Port %s: VBT incorrectly claims port is not TypeC legacy\n", + dig_port->tc_port_name); dig_port->tc_legacy_port = true; } is_legacy = dig_port->tc_legacy_port; @@ -220,3 +222,17 @@ bool intel_tc_port_connected(struct intel_digital_port *dig_port) return true; } +void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(i915, port); + + if (WARN_ON(tc_port == PORT_TC_NONE)) + return; + + snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), + "%c/TC#%d", port_name(port), tc_port + 1); + + dig_port->tc_legacy_port = is_legacy; +} diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 0c65675394e5..ca1735303252 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -15,4 +15,6 @@ void icl_tc_phy_disconnect(struct intel_digital_port *dig_port); bool intel_tc_port_connected(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); +void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); + #endif /* __INTEL_TC_H__ */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7159f709a7f2..19f6a360acde 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1225,6 +1225,7 @@ struct intel_digital_port { enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; bool tc_legacy_port:1; + char tc_port_name[8]; enum tc_port_mode tc_mode; void (*write_infoframe)(struct intel_encoder *encoder, -- cgit v1.2.3 From c905eb28bd3f43e96ef9180971f7809738caaa27 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:21 +0300 Subject: drm/i915: Factor out common parts from TypeC port handling functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Factor out helpers reading/parsing the TypeC specific registers, making current users of them clearer and letting us use them later. While at it also: - Simplify icl_tc_phy_connect() with an early return in legacy mode. - Simplify the live status check using one bitmask for all HPD bits. - Remove a micro-optimisation of the repeated safe-mode clearing. - Make sure we fix the legacy port flag in all cases. Except for the last two, no functional changes. v2: - Don't do reg reads at variable declarations. (Jani) - Prevent constant truncated compiler warning when assigning the valid_hpd_mask. (Nick) - s/intel_tc_port_get_lane_info/intel_tc_port_get_lane_mask/ (Ville) v3: - Make valid_hpd_mask init clear. (Ville) Cc: José Roberto de Souza Cc: Rodrigo Vivi Cc: Paulo Zanoni Cc: Jani Nikula Cc: Nick Desaulniers Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-10-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 11 +- drivers/gpu/drm/i915/display/intel_tc.c | 188 +++++++++++++++++++------------ drivers/gpu/drm/i915/display/intel_tc.h | 1 + 3 files changed, 119 insertions(+), 81 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c9143e2a6994..2be7cdc319ba 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2996,8 +2996,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) { struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 ln0, ln1, lane_info; + u32 ln0, ln1, lane_mask; if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) return; @@ -3010,11 +3009,9 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & - DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); + lane_mask = intel_tc_port_get_lane_mask(intel_dig_port); - switch (lane_info) { + switch (lane_mask) { case 0x1: case 0x4: break; @@ -3039,7 +3036,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) MG_DP_MODE_CFG_DP_X2_MODE; break; default: - MISSING_CASE(lane_info); + MISSING_CASE(lane_mask); } break; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index ca3b11e26474..f0688c7450c7 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -21,25 +21,34 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) return names[mode]; } -int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) +u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 lane_mask; + + lane_mask = I915_READ(PORT_TX_DFLEXDPSP); + + return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >> + DP_LANE_ASSIGNMENT_SHIFT(tc_port); +} + +int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); intel_wakeref_t wakeref; - u32 lane_info; + u32 lane_mask; if (dig_port->tc_mode != TC_PORT_DP_ALT) return 4; - lane_info = 0; + lane_mask = 0; with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) - lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & - DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); + lane_mask = intel_tc_port_get_lane_mask(dig_port); - switch (lane_info) { + switch (lane_mask) { default: - MISSING_CASE(lane_info); + MISSING_CASE(lane_mask); case 1: case 2: case 4: @@ -53,6 +62,76 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) } } +static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, + u32 live_status_mask) +{ + u32 valid_hpd_mask; + + if (dig_port->tc_legacy_port) + valid_hpd_mask = BIT(TC_PORT_LEGACY); + else + valid_hpd_mask = BIT(TC_PORT_DP_ALT) | + BIT(TC_PORT_TBT_ALT); + + if (!(live_status_mask & ~valid_hpd_mask)) + return; + + /* If live status mismatches the VBT flag, trust the live status. */ + DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n", + dig_port->tc_port_name, live_status_mask); + + dig_port->tc_legacy_port = !dig_port->tc_legacy_port; +} + +static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 mask = 0; + u32 val; + + val = I915_READ(PORT_TX_DFLEXDPSP); + + if (val & TC_LIVE_STATE_TBT(tc_port)) + mask |= BIT(TC_PORT_TBT_ALT); + if (val & TC_LIVE_STATE_TC(tc_port)) + mask |= BIT(TC_PORT_DP_ALT); + + if (I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) + mask |= BIT(TC_PORT_LEGACY); + + /* The sink can be connected only in a single mode. */ + if (!WARN_ON(hweight32(mask) > 1)) + tc_port_fixup_legacy_flag(dig_port, mask); + + return mask; +} + +static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + + return I915_READ(PORT_TX_DFLEXDPPMS) & + DP_PHY_MODE_STATUS_COMPLETED(tc_port); +} + +static void icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, + bool enable) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + val = I915_READ(PORT_TX_DFLEXDPCSSS); + + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + if (!enable) + val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + + I915_WRITE(PORT_TX_DFLEXDPCSSS, val); +} + /* * This function implements the first part of the Connect Flow described by our * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading @@ -76,38 +155,31 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) */ static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - u32 val; + u32 live_status_mask; if (dig_port->tc_mode != TC_PORT_LEGACY && dig_port->tc_mode != TC_PORT_DP_ALT) return true; - val = I915_READ(PORT_TX_DFLEXDPPMS); - if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) { + if (!icl_tc_phy_status_complete(dig_port)) { DRM_DEBUG_KMS("Port %s: PHY not ready\n", dig_port->tc_port_name); WARN_ON(dig_port->tc_legacy_port); return false; } - /* - * This function may be called many times in a row without an HPD event - * in between, so try to avoid the write when we can. - */ - val = I915_READ(PORT_TX_DFLEXDPCSSS); - if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) { - val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); - } + icl_tc_phy_set_safe_mode(dig_port, false); + + if (dig_port->tc_mode == TC_PORT_LEGACY) + return true; + + live_status_mask = tc_port_live_status_mask(dig_port); /* * Now we have to re-check the live state, in case the port recently * became disconnected. Not necessary for legacy mode. */ - if (dig_port->tc_mode == TC_PORT_DP_ALT && - !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { + if (!(live_status_mask & BIT(TC_PORT_DP_ALT))) { DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n", dig_port->tc_port_name); icl_tc_phy_disconnect(dig_port); @@ -123,46 +195,35 @@ static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) */ void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); - - /* - * TBT disconnection flow is read the live status, what was done in - * caller. - */ - if (dig_port->tc_mode == TC_PORT_DP_ALT || - dig_port->tc_mode == TC_PORT_LEGACY) { - u32 val; - - val = I915_READ(PORT_TX_DFLEXDPCSSS); - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + switch (dig_port->tc_mode) { + case TC_PORT_LEGACY: + case TC_PORT_DP_ALT: + icl_tc_phy_set_safe_mode(dig_port, true); + dig_port->tc_mode = TC_PORT_TBT_ALT; + break; + case TC_PORT_TBT_ALT: + /* Nothing to do, we stay in TBT-alt mode */ + break; + default: + MISSING_CASE(dig_port->tc_mode); } DRM_DEBUG_KMS("Port %s: mode %s disconnected\n", dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); - - dig_port->tc_mode = TC_PORT_TBT_ALT; } static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, struct intel_digital_port *intel_dig_port, - bool is_legacy, bool is_typec, bool is_tbt) + u32 live_status_mask) { enum tc_port_mode old_mode = intel_dig_port->tc_mode; - WARN_ON(is_legacy + is_typec + is_tbt != 1); - - if (is_legacy) - intel_dig_port->tc_mode = TC_PORT_LEGACY; - else if (is_typec) - intel_dig_port->tc_mode = TC_PORT_DP_ALT; - else if (is_tbt) - intel_dig_port->tc_mode = TC_PORT_TBT_ALT; - else + if (!live_status_mask) return; + intel_dig_port->tc_mode = fls(live_status_mask) - 1; + if (old_mode != intel_dig_port->tc_mode) DRM_DEBUG_KMS("Port %s: port has mode %s\n", intel_dig_port->tc_port_name, @@ -182,40 +243,19 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, bool intel_tc_port_connected(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - bool is_legacy, is_typec, is_tbt; - u32 dpsp; - - /* - * Complain if we got a legacy port HPD, but VBT didn't mark the port as - * legacy. Treat the port as legacy from now on. - */ - if (!dig_port->tc_legacy_port && - I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) { - DRM_ERROR("Port %s: VBT incorrectly claims port is not TypeC legacy\n", - dig_port->tc_port_name); - dig_port->tc_legacy_port = true; - } - is_legacy = dig_port->tc_legacy_port; + u32 live_status_mask = tc_port_live_status_mask(dig_port); /* * The spec says we shouldn't be using the ISR bits for detecting * between TC and TBT. We should use DFLEXDPSP. */ - dpsp = I915_READ(PORT_TX_DFLEXDPSP); - is_typec = dpsp & TC_LIVE_STATE_TC(tc_port); - is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port); - - if (!is_legacy && !is_typec && !is_tbt) { + if (!live_status_mask && !dig_port->tc_legacy_port) { icl_tc_phy_disconnect(dig_port); return false; } - icl_update_tc_port_type(dev_priv, dig_port, is_legacy, is_typec, - is_tbt); - + icl_update_tc_port_type(dev_priv, dig_port, live_status_mask); if (!icl_tc_phy_connect(dig_port)) return false; diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index ca1735303252..8c338c45796d 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -13,6 +13,7 @@ struct intel_digital_port; void icl_tc_phy_disconnect(struct intel_digital_port *dig_port); bool intel_tc_port_connected(struct intel_digital_port *dig_port); +u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); -- cgit v1.2.3 From ddec362724f98a6725aeafda7701a40635c15909 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:22 +0300 Subject: drm/i915: Wait for TypeC PHY complete flag to clear in safe mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The PHY status complete flag normally clears when disconnecting the PHY in DP-alt mode (achieved by switching to safe mode), so wait for the flag to clear. v2: - Use DRM_DEBUG_KMS instead of DRM_DEBUG_DRIVER. (José) Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-11-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index f0688c7450c7..4243db6d25a7 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -130,6 +130,10 @@ static void icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + + if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10)) + DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n", + dig_port->tc_port_name); } /* -- cgit v1.2.3 From 424f109f5c5d9c82a16d0c8dc033ff3d513c6e0f Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:23 +0300 Subject: drm/i915: Handle the TCCOLD power-down event MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Based on a recent BSpec update (Index/21750) we must handle the TCCOLD event associated with the DP-alt mode. We can detect this event by reading an invalid all-1s value from FIA registers. After detecting TCCOLD we will: - fall back to TBT-alt mode when attempting to switch to DP-alt mode - conclude that nothing is connected during live status detection - WARN when already in unsafe mode, since then TCCOLD is unexpected v2: - Use DRM_DEBUG_KMS instead of DRM_DEBUG_DRIVER. (José) v3: - Use 0xffffffff instead of -1 as invalid FIA reg value. (José, Ville) - Check for TCCOLD in icl_tc_phy_status_complete() too. (Ville) Cc: José Roberto de Souza Cc: Rodrigo Vivi Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-12-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 33 +++++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 4243db6d25a7..96855250a5be 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -29,6 +29,8 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) lane_mask = I915_READ(PORT_TX_DFLEXDPSP); + WARN_ON(lane_mask == 0xffffffff); + return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >> DP_LANE_ASSIGNMENT_SHIFT(tc_port); } @@ -92,6 +94,12 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) val = I915_READ(PORT_TX_DFLEXDPSP); + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n", + dig_port->tc_port_name); + return mask; + } + if (val & TC_LIVE_STATE_TBT(tc_port)) mask |= BIT(TC_PORT_TBT_ALT); if (val & TC_LIVE_STATE_TC(tc_port)) @@ -111,12 +119,19 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; - return I915_READ(PORT_TX_DFLEXDPPMS) & - DP_PHY_MODE_STATUS_COMPLETED(tc_port); + val = I915_READ(PORT_TX_DFLEXDPPMS); + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n", + dig_port->tc_port_name); + return false; + } + + return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port); } -static void icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, +static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, bool enable) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); @@ -124,6 +139,13 @@ static void icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, u32 val; val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", + dig_port->tc_port_name, + enableddisabled(enable)); + + return false; + } val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); if (!enable) @@ -134,6 +156,8 @@ static void icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10)) DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n", dig_port->tc_port_name); + + return true; } /* @@ -172,7 +196,8 @@ static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) return false; } - icl_tc_phy_set_safe_mode(dig_port, false); + if (!icl_tc_phy_set_safe_mode(dig_port, false)) + return false; if (dig_port->tc_mode == TC_PORT_LEGACY) return true; -- cgit v1.2.3 From 1cd5ef6ee23cdde8afd53dd72b9f2f789c22f58c Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:24 +0300 Subject: drm/i915: Sanitize the TypeC connect/detect sequences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the order during detection more consistent: first reset the TypeC port mode if needed (adding new helpers for this), then detect any connected sink. To check if a port mode reset is needed determine first the target port mode based on the live status if a sink is already connected or the PHY status complete flag otherwise. Add a WARN in legacy mode if unexpectedly we can't set the unsafe mode or if the FIA doesn't provide the 4 lanes required. Cc: José Roberto de Souza Cc: Rodrigo Vivi Cc: Paulo Zanoni Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-13-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 96 ++++++++++++++++----------------- 1 file changed, 47 insertions(+), 49 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 96855250a5be..f63ddf39b369 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -181,41 +181,43 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, * will require a lot of coordination with user space and thorough testing for * the extra possible cases. */ -static bool icl_tc_phy_connect(struct intel_digital_port *dig_port) +static void icl_tc_phy_connect(struct intel_digital_port *dig_port) { - u32 live_status_mask; - - if (dig_port->tc_mode != TC_PORT_LEGACY && - dig_port->tc_mode != TC_PORT_DP_ALT) - return true; - if (!icl_tc_phy_status_complete(dig_port)) { DRM_DEBUG_KMS("Port %s: PHY not ready\n", dig_port->tc_port_name); - WARN_ON(dig_port->tc_legacy_port); - return false; + goto out_set_tbt_alt_mode; } - if (!icl_tc_phy_set_safe_mode(dig_port, false)) - return false; + if (!icl_tc_phy_set_safe_mode(dig_port, false) && + !WARN_ON(dig_port->tc_legacy_port)) + goto out_set_tbt_alt_mode; - if (dig_port->tc_mode == TC_PORT_LEGACY) - return true; + if (dig_port->tc_legacy_port) { + WARN_ON(intel_tc_port_fia_max_lane_count(dig_port) != 4); + dig_port->tc_mode = TC_PORT_LEGACY; - live_status_mask = tc_port_live_status_mask(dig_port); + return; + } /* * Now we have to re-check the live state, in case the port recently * became disconnected. Not necessary for legacy mode. */ - if (!(live_status_mask & BIT(TC_PORT_DP_ALT))) { + if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) { DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n", dig_port->tc_port_name); - icl_tc_phy_disconnect(dig_port); - return false; + goto out_set_safe_mode; } - return true; + dig_port->tc_mode = TC_PORT_DP_ALT; + + return; + +out_set_safe_mode: + icl_tc_phy_set_safe_mode(dig_port, true); +out_set_tbt_alt_mode: + dig_port->tc_mode = TC_PORT_TBT_ALT; } /* @@ -236,27 +238,37 @@ void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) default: MISSING_CASE(dig_port->tc_mode); } +} - DRM_DEBUG_KMS("Port %s: mode %s disconnected\n", - dig_port->tc_port_name, - tc_port_mode_name(dig_port->tc_mode)); +static enum tc_port_mode +intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) +{ + u32 live_status_mask = tc_port_live_status_mask(dig_port); + + if (live_status_mask) + return fls(live_status_mask) - 1; + + return icl_tc_phy_status_complete(dig_port) && + dig_port->tc_legacy_port ? TC_PORT_LEGACY : + TC_PORT_TBT_ALT; } -static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, - struct intel_digital_port *intel_dig_port, - u32 live_status_mask) +static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port) { - enum tc_port_mode old_mode = intel_dig_port->tc_mode; + enum tc_port_mode old_tc_mode = dig_port->tc_mode; - if (!live_status_mask) - return; + icl_tc_phy_disconnect(dig_port); + icl_tc_phy_connect(dig_port); - intel_dig_port->tc_mode = fls(live_status_mask) - 1; + DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n", + dig_port->tc_port_name, + tc_port_mode_name(old_tc_mode), + tc_port_mode_name(dig_port->tc_mode)); +} - if (old_mode != intel_dig_port->tc_mode) - DRM_DEBUG_KMS("Port %s: port has mode %s\n", - intel_dig_port->tc_port_name, - tc_port_mode_name(intel_dig_port->tc_mode)); +static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) +{ + return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; } /* @@ -271,24 +283,10 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, */ bool intel_tc_port_connected(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - u32 live_status_mask = tc_port_live_status_mask(dig_port); - - /* - * The spec says we shouldn't be using the ISR bits for detecting - * between TC and TBT. We should use DFLEXDPSP. - */ - if (!live_status_mask && !dig_port->tc_legacy_port) { - icl_tc_phy_disconnect(dig_port); - - return false; - } - - icl_update_tc_port_type(dev_priv, dig_port, live_status_mask); - if (!icl_tc_phy_connect(dig_port)) - return false; + if (intel_tc_port_needs_reset(dig_port)) + intel_tc_port_reset_mode(dig_port); - return true; + return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); } void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) -- cgit v1.2.3 From 32691b58d157584b18faabf25cce755c1e31c370 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:25 +0300 Subject: drm/i915: Fix the TypeC port mode sanitization during loading/resume MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For using the correct AUX power domains we have to sanitize the TypeC port mode early, so move that before encoder sanitization. To do this properly read out the actual port mode instead of just relying on the VBT legacy port flag (which can be incorrect). We also verify that the PHY is connected as expected if the port is active. In case the port is inactive we connect the PHY in case of a legacy port - as we did so far. The PHY will be connected during detection for DP-alt mode - as it was done so far. For TBT-alt mode nothing needs to be done to connect the PHY. v2: - Use DRM_DEBUG_KMS instead of DRM_DEBUG_DRIVER. (José) v3: - Detect TCCOLD any time PORT_TX_DFLEXDPCSSS is read. (Ville) Cc: José Roberto de Souza Cc: Rodrigo Vivi Cc: Paulo Zanoni Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-14-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 16 +----- drivers/gpu/drm/i915/display/intel_display.c | 10 ++++ drivers/gpu/drm/i915/display/intel_dp_mst.h | 8 ++- drivers/gpu/drm/i915/display/intel_tc.c | 84 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_tc.h | 2 + 5 files changed, 104 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2be7cdc319ba..0c5bfbd66b19 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3931,17 +3931,6 @@ static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) icl_tc_phy_disconnect(dig_port); } -static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder) -{ - struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder); - struct drm_i915_private *i915 = to_i915(drm_encoder->dev); - - if (intel_port_is_tc(i915, dig_port->base.port)) - intel_digital_port_connected(&dig_port->base); - - intel_dp_encoder_reset(drm_encoder); -} - static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); @@ -3957,7 +3946,7 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) } static const struct drm_encoder_funcs intel_ddi_funcs = { - .reset = intel_ddi_encoder_reset, + .reset = intel_dp_encoder_reset, .destroy = intel_ddi_encoder_destroy, }; @@ -4328,9 +4317,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_infoframe_init(intel_dig_port); - if (intel_port_is_tc(dev_priv, port)) - intel_digital_port_connected(intel_encoder); - return; err: diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 6f93bedb331a..2b6732e0d02c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -78,6 +78,7 @@ #include "intel_quirks.h" #include "intel_sideband.h" #include "intel_sprite.h" +#include "intel_tc.h" /* Primary plane formats for gen <= 3 */ static const u32 i8xx_primary_formats[] = { @@ -16799,6 +16800,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev, intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ + + /* Sanitize the TypeC port mode upfront, encoders depend on this */ + for_each_intel_encoder(dev, encoder) { + /* We need to sanitize only the MST primary port. */ + if (encoder->type != INTEL_OUTPUT_DP_MST && + intel_port_is_tc(dev_priv, encoder->port)) + intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); + } + get_encoder_power_domains(dev_priv); if (HAS_PCH_IBX(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index 1470c6e0514b..6754c211205a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -6,9 +6,15 @@ #ifndef __INTEL_DP_MST_H__ #define __INTEL_DP_MST_H__ -struct intel_digital_port; +#include "intel_drv.h" int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); +static inline int +intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) +{ + return intel_dig_port->dp.active_mst_links; +} + #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index f63ddf39b369..78340115b994 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -4,6 +4,7 @@ */ #include "intel_display.h" +#include "intel_dp_mst.h" #include "i915_drv.h" #include "intel_tc.h" @@ -160,6 +161,22 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, return true; } +static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val; + + val = I915_READ(PORT_TX_DFLEXDPCSSS); + if (val == 0xffffffff) { + DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n", + dig_port->tc_port_name); + return true; + } + + return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)); +} + /* * This function implements the first part of the Connect Flow described by our * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading @@ -240,6 +257,46 @@ void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) } } +static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port) +{ + if (!icl_tc_phy_status_complete(dig_port)) { + DRM_DEBUG_KMS("Port %s: PHY status not complete\n", + dig_port->tc_port_name); + return dig_port->tc_mode == TC_PORT_TBT_ALT; + } + + if (icl_tc_phy_is_in_safe_mode(dig_port)) { + DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n", + dig_port->tc_port_name); + + return false; + } + + return dig_port->tc_mode == TC_PORT_DP_ALT || + dig_port->tc_mode == TC_PORT_LEGACY; +} + +static enum tc_port_mode +intel_tc_port_get_current_mode(struct intel_digital_port *dig_port) +{ + u32 live_status_mask = tc_port_live_status_mask(dig_port); + bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port); + enum tc_port_mode mode; + + if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port))) + return TC_PORT_TBT_ALT; + + mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT; + if (live_status_mask) { + enum tc_port_mode live_mode = fls(live_status_mask) - 1; + + if (!WARN_ON(live_mode == TC_PORT_TBT_ALT)) + mode = live_mode; + } + + return mode; +} + static enum tc_port_mode intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) { @@ -266,6 +323,33 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port) tc_port_mode_name(dig_port->tc_mode)); } +void intel_tc_port_sanitize(struct intel_digital_port *dig_port) +{ + struct intel_encoder *encoder = &dig_port->base; + int active_links = 0; + + dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); + if (dig_port->dp.is_mst) + active_links = intel_dp_mst_encoder_active_links(dig_port); + else if (encoder->base.crtc) + active_links = to_intel_crtc(encoder->base.crtc)->active; + + if (active_links) { + if (!icl_tc_phy_is_connected(dig_port)) + DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n", + dig_port->tc_port_name, active_links); + goto out; + } + + if (dig_port->tc_legacy_port) + icl_tc_phy_connect(dig_port); + +out: + DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n", + dig_port->tc_port_name, + tc_port_mode_name(dig_port->tc_mode)); +} + static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) { return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode; diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 8c338c45796d..5a7876a74522 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -16,6 +16,8 @@ bool intel_tc_port_connected(struct intel_digital_port *dig_port); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); +void intel_tc_port_sanitize(struct intel_digital_port *dig_port); + void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); #endif /* __INTEL_TC_H__ */ -- cgit v1.2.3 From 8c10e2262663951ab4d43d4f74e282566c04e00c Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:26 +0300 Subject: drm/i915: Keep the TypeC port mode fixed for detect/AUX transfers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We must keep the TypeC port mode fixed for the duration of the connector detection and each AUX transfers. Add a new TypeC lock holding it around these two sequences. For consistency also hold the lock during the port mode sanitization. Whenever resetting the port mode (only during the detection for now) the port's AUX power domain must be disabled already. Flush the async power domain disabling work to ensure this. A follow-up patch will make the port mode changing more robust by postponing the change for active ports. v2: - Fix checkpatch issue: missing annotation for tc_lock. Cc: José Roberto de Souza Cc: Rodrigo Vivi Cc: Paulo Zanoni Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-15-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 7 +++++++ drivers/gpu/drm/i915/display/intel_tc.c | 30 +++++++++++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_tc.h | 2 ++ drivers/gpu/drm/i915/intel_drv.h | 1 + 4 files changed, 39 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0c6afec78f93..8f7188d71d08 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1192,6 +1192,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_uncore *uncore = &i915->uncore; + bool is_tc_port = intel_port_is_tc(i915, intel_dig_port->base.port); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; enum intel_display_power_domain aux_domain = @@ -1207,6 +1208,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (i = 0; i < ARRAY_SIZE(ch_data); i++) ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); + if (is_tc_port) + intel_tc_port_lock(intel_dig_port); + aux_wakeref = intel_display_power_get(i915, aux_domain); pps_wakeref = pps_lock(intel_dp); @@ -1359,6 +1363,9 @@ out: pps_unlock(intel_dp, pps_wakeref); intel_display_power_put_async(i915, aux_domain, aux_wakeref); + if (is_tc_port) + intel_tc_port_unlock(intel_dig_port); + return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 78340115b994..18a599a71bd3 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -312,8 +312,11 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port) { + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum tc_port_mode old_tc_mode = dig_port->tc_mode; + intel_display_power_flush_work(dev_priv); + icl_tc_phy_disconnect(dig_port); icl_tc_phy_connect(dig_port); @@ -328,6 +331,8 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) struct intel_encoder *encoder = &dig_port->base; int active_links = 0; + mutex_lock(&dig_port->tc_lock); + dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port); if (dig_port->dp.is_mst) active_links = intel_dp_mst_encoder_active_links(dig_port); @@ -348,6 +353,8 @@ out: DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n", dig_port->tc_port_name, tc_port_mode_name(dig_port->tc_mode)); + + mutex_unlock(&dig_port->tc_lock); } static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) @@ -367,10 +374,30 @@ static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port) */ bool intel_tc_port_connected(struct intel_digital_port *dig_port) { + bool is_connected; + + mutex_lock(&dig_port->tc_lock); + if (intel_tc_port_needs_reset(dig_port)) intel_tc_port_reset_mode(dig_port); - return tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); + is_connected = tc_port_live_status_mask(dig_port) & + BIT(dig_port->tc_mode); + + mutex_unlock(&dig_port->tc_lock); + + return is_connected; +} + +void intel_tc_port_lock(struct intel_digital_port *dig_port) +{ + mutex_lock(&dig_port->tc_lock); + /* TODO: reset the TypeC port mode if needed */ +} + +void intel_tc_port_unlock(struct intel_digital_port *dig_port) +{ + mutex_unlock(&dig_port->tc_lock); } void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) @@ -385,5 +412,6 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name), "%c/TC#%d", port_name(port), tc_port + 1); + mutex_init(&dig_port->tc_lock); dig_port->tc_legacy_port = is_legacy; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 5a7876a74522..b5af2fe60b22 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -17,6 +17,8 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); void intel_tc_port_sanitize(struct intel_digital_port *dig_port); +void intel_tc_port_lock(struct intel_digital_port *dig_port); +void intel_tc_port_unlock(struct intel_digital_port *dig_port); void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 19f6a360acde..d9e7d011ed4a 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1224,6 +1224,7 @@ struct intel_digital_port { /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; + struct mutex tc_lock; /* protects the TypeC port mode */ bool tc_legacy_port:1; char tc_port_name[8]; enum tc_port_mode tc_mode; -- cgit v1.2.3 From 4f36afb26cbeab7658b80350127b54667d31d618 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:27 +0300 Subject: drm/i915: Sanitize the TypeC FIA lane configuration decoding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use hex numbers, since that makes more sense when decoding a bit pattern. No functional change. Suggested-by: Ville Syrjälä Cc: Animesh Manna Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-16-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 18a599a71bd3..58a228ba7696 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -52,15 +52,16 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) switch (lane_mask) { default: MISSING_CASE(lane_mask); - case 1: - case 2: - case 4: - case 8: + /* fall-through */ + case 0x1: + case 0x2: + case 0x4: + case 0x8: return 1; - case 3: - case 12: + case 0x3: + case 0xc: return 2; - case 15: + case 0xf: return 4; } } -- cgit v1.2.3 From 866955fa452e1f3d2164bd731d71348d4978e8bc Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:28 +0300 Subject: drm/i915: Sanitize the shared DPLL reserve/release interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For consistency s/intel_get_shared_dpll()/intel_reserve_shared_dplls()/ to better match intel_release_shared_dplls(). Also, pass to the reserve/release and get_dplls/put_dplls hooks the intel_atomic_state and CRTC object, that way these functions can look up the old or new state as needed. Also release the PLLs from the atomic state via a new put_dplls->intel_unreference_shared_dpll() call chain for better symmetry with the reservation via the get_dplls->intel_reference_shared_dpll() call chain. Since nothing uses the PLL returned by intel_reserve_shared_dplls(), make it return only a bool. While at it also clarify the reserve/release function docbook headers making it clear that multiple DPLLs will be reserved/released and whether the new or old atomic CRTC state is affected. This refactoring is also a preparation for a follow-up change that needs to reserve multiple DPLLs. Kudos to Ville for the idea to pass intel_atomic_state around, to make things clearer locally where an object's old/new atomic state is required. No functional changes. v2: - Fix checkpatch issue: typo in code comment. v3: - Rebase on drm-tip. Cc: Ville Syrjälä Cc: Daniel Vetter Cc: Maarten Lankhorst Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-17-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 19 +-- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 221 ++++++++++++++++---------- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 13 +- 3 files changed, 153 insertions(+), 100 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 2b6732e0d02c..b27cdaad611f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9493,6 +9493,8 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_atomic_state *state = + to_intel_atomic_state(crtc_state->base.state); const struct intel_limit *limit; int refclk = 120000; @@ -9534,7 +9536,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, ironlake_compute_dpll(crtc, crtc_state, NULL); - if (!intel_get_shared_dpll(crtc_state, NULL)) { + if (!intel_reserve_shared_dplls(state, crtc, NULL)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -9915,7 +9917,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - if (!intel_get_shared_dpll(crtc_state, encoder)) { + if (!intel_reserve_shared_dplls(state, crtc, encoder)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -13171,27 +13173,20 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state) static void intel_modeset_clear_plls(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; if (!dev_priv->display.crtc_compute_clock) return; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - struct intel_shared_dpll *old_dpll = - old_crtc_state->shared_dpll; - + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) continue; new_crtc_state->shared_dpll = NULL; - if (!old_dpll) - continue; - - intel_release_shared_dpll(old_dpll, crtc, &state->base); + intel_release_shared_dplls(state, crtc); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index bf66261c8bf0..3fbc975851fa 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -36,9 +36,10 @@ * This file provides an abstraction over display PLLs. The function * intel_shared_dpll_init() initializes the PLLs for the given platform. The * users of a PLL are tracked and that tracking is integrated with the atomic - * modest interface. During an atomic operation, a PLL can be requested for a - * given CRTC and encoder configuration by calling intel_get_shared_dpll() and - * a previously used PLL can be released with intel_release_shared_dpll(). + * modset interface. During an atomic operation, required PLLs can be reserved + * for a given CRTC and encoder configuration by calling + * intel_reserve_shared_dplls() and previously reserved PLLs can be released + * with intel_release_shared_dplls(). * Changes to the users are first staged in the atomic state, and then made * effective by calling intel_shared_dpll_swap_state() during the atomic * commit phase. @@ -309,6 +310,28 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll, shared_dpll[id].crtc_mask |= 1 << crtc->pipe; } +static void intel_unreference_shared_dpll(struct intel_atomic_state *state, + const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll) +{ + struct intel_shared_dpll_state *shared_dpll; + + shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); + shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe); +} + +static void intel_put_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + + if (!crtc_state->shared_dpll) + return; + + intel_unreference_shared_dpll(state, crtc, crtc_state->shared_dpll); +} + /** * intel_shared_dpll_swap_state - make atomic DPLL configuration effective * @state: atomic state @@ -421,11 +444,12 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, udelay(200); } -static struct intel_shared_dpll * -ibx_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool ibx_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id i; @@ -445,12 +469,12 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state, } if (!pll) - return NULL; + return false; /* reference the pll */ intel_reference_shared_dpll(pll, crtc_state); - return pll; + return true; } static void ibx_dump_hw_state(struct drm_i915_private *dev_priv, @@ -821,10 +845,12 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) return pll; } -static struct intel_shared_dpll * -hsw_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool hsw_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; memset(&crtc_state->dpll_hw_state, 0, @@ -836,7 +862,7 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state, pll = hsw_ddi_dp_get_dpll(crtc_state); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { if (WARN_ON(crtc_state->port_clock / 2 != 135000)) - return NULL; + return false; crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; @@ -844,15 +870,15 @@ hsw_get_dpll(struct intel_crtc_state *crtc_state, pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SPLL, DPLL_ID_SPLL); } else { - return NULL; + return false; } if (!pll) - return NULL; + return false; intel_reference_shared_dpll(pll, crtc_state); - return pll; + return true; } static void hsw_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1385,10 +1411,12 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } -static struct intel_shared_dpll * -skl_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool skl_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; bool bret; @@ -1396,16 +1424,16 @@ skl_get_dpll(struct intel_crtc_state *crtc_state, bret = skl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); - return NULL; + return false; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); - return NULL; + return false; } } else { - return NULL; + return false; } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) @@ -1417,11 +1445,11 @@ skl_get_dpll(struct intel_crtc_state *crtc_state, DPLL_ID_SKL_DPLL1, DPLL_ID_SKL_DPLL3); if (!pll) - return NULL; + return false; intel_reference_shared_dpll(pll, crtc_state); - return pll; + return true; } static void skl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1827,22 +1855,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } -static struct intel_shared_dpll * -bxt_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool bxt_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id id; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state)) - return NULL; + return false; if (intel_crtc_has_dp_encoder(crtc_state) && !bxt_ddi_dp_set_dpll_hw_state(crtc_state)) - return NULL; + return false; /* 1:1 mapping between ports and PLLs */ id = (enum intel_dpll_id) encoder->port; @@ -1853,7 +1882,7 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state, intel_reference_shared_dpll(pll, crtc_state); - return pll; + return true; } static void bxt_dump_hw_state(struct drm_i915_private *dev_priv, @@ -1884,8 +1913,11 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { struct intel_dpll_mgr { const struct dpll_info *dpll_info; - struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder); + bool (*get_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); + void (*put_dplls)(struct intel_atomic_state *state, + struct intel_crtc *crtc); void (*dump_hw_state)(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state); @@ -1899,7 +1931,8 @@ static const struct dpll_info pch_plls[] = { static const struct intel_dpll_mgr pch_pll_mgr = { .dpll_info = pch_plls, - .get_dpll = ibx_get_dpll, + .get_dplls = ibx_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = ibx_dump_hw_state, }; @@ -1915,7 +1948,8 @@ static const struct dpll_info hsw_plls[] = { static const struct intel_dpll_mgr hsw_pll_mgr = { .dpll_info = hsw_plls, - .get_dpll = hsw_get_dpll, + .get_dplls = hsw_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = hsw_dump_hw_state, }; @@ -1929,7 +1963,8 @@ static const struct dpll_info skl_plls[] = { static const struct intel_dpll_mgr skl_pll_mgr = { .dpll_info = skl_plls, - .get_dpll = skl_get_dpll, + .get_dplls = skl_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = skl_dump_hw_state, }; @@ -1942,7 +1977,8 @@ static const struct dpll_info bxt_plls[] = { static const struct intel_dpll_mgr bxt_pll_mgr = { .dpll_info = bxt_plls, - .get_dpll = bxt_get_dpll, + .get_dplls = bxt_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = bxt_dump_hw_state, }; @@ -2332,10 +2368,12 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) return true; } -static struct intel_shared_dpll * -cnl_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool cnl_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; bool bret; @@ -2343,18 +2381,18 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state, bret = cnl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); - return NULL; + return false; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); - return NULL; + return false; } } else { DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n", crtc_state->output_types); - return NULL; + return false; } pll = intel_find_shared_dpll(crtc_state, @@ -2362,12 +2400,12 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state, DPLL_ID_SKL_DPLL2); if (!pll) { DRM_DEBUG_KMS("No PLL selected\n"); - return NULL; + return false; } intel_reference_shared_dpll(pll, crtc_state); - return pll; + return true; } static void cnl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -2394,7 +2432,8 @@ static const struct dpll_info cnl_plls[] = { static const struct intel_dpll_mgr cnl_pll_mgr = { .dpll_info = cnl_plls, - .get_dpll = cnl_get_dpll, + .get_dplls = cnl_get_dpll, + .put_dplls = intel_put_dpll, .dump_hw_state = cnl_dump_hw_state, }; @@ -2792,11 +2831,13 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state) return true; } -static struct intel_shared_dpll * -icl_get_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +static bool icl_get_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_digital_port *intel_dig_port; struct intel_shared_dpll *pll; enum port port = encoder->port; @@ -2831,24 +2872,24 @@ icl_get_dpll(struct intel_crtc_state *crtc_state, } } else { MISSING_CASE(port); - return NULL; + return false; } if (!ret) { DRM_DEBUG_KMS("Could not calculate PLL state.\n"); - return NULL; + return false; } pll = intel_find_shared_dpll(crtc_state, min, max); if (!pll) { DRM_DEBUG_KMS("No PLL selected\n"); - return NULL; + return false; } intel_reference_shared_dpll(pll, crtc_state); - return pll; + return true; } static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, @@ -3223,7 +3264,8 @@ static const struct dpll_info icl_plls[] = { static const struct intel_dpll_mgr icl_pll_mgr = { .dpll_info = icl_plls, - .get_dpll = icl_get_dpll, + .get_dplls = icl_get_dplls, + .put_dplls = intel_put_dpll, .dump_hw_state = icl_dump_hw_state, }; @@ -3235,7 +3277,8 @@ static const struct dpll_info ehl_plls[] = { static const struct intel_dpll_mgr ehl_pll_mgr = { .dpll_info = ehl_plls, - .get_dpll = icl_get_dpll, + .get_dplls = icl_get_dplls, + .put_dplls = intel_put_dpll, .dump_hw_state = icl_dump_hw_state, }; @@ -3287,50 +3330,64 @@ void intel_shared_dpll_init(struct drm_device *dev) } /** - * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination - * @crtc_state: atomic state for the crtc + * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination + * @state: atomic state + * @crtc: CRTC to reserve DPLLs for * @encoder: encoder * - * Find an appropriate DPLL for the given CRTC and encoder combination. A - * reference from the @crtc_state to the returned pll is registered in the - * atomic state. That configuration is made effective by calling - * intel_shared_dpll_swap_state(). The reference should be released by calling - * intel_release_shared_dpll(). + * This function reserves all required DPLLs for the given CRTC and encoder + * combination in the current atomic commit @state and the new @crtc atomic + * state. + * + * The new configuration in the atomic commit @state is made effective by + * calling intel_shared_dpll_swap_state(). + * + * The reserved DPLLs should be released by calling + * intel_release_shared_dplls(). * * Returns: - * A shared DPLL to be used by @crtc_state and @encoder. + * True if all required DPLLs were successfully reserved. */ -struct intel_shared_dpll * -intel_get_shared_dpll(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +bool intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; if (WARN_ON(!dpll_mgr)) - return NULL; + return false; - return dpll_mgr->get_dpll(crtc_state, encoder); + return dpll_mgr->get_dplls(state, crtc, encoder); } /** - * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state - * @dpll: dpll in use by @crtc - * @crtc: crtc + * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state * @state: atomic state + * @crtc: crtc from which the DPLLs are to be released * - * This function releases the reference from @crtc to @dpll from the - * atomic @state. The new configuration is made effective by calling - * intel_shared_dpll_swap_state(). + * This function releases all DPLLs reserved by intel_reserve_shared_dplls() + * from the current atomic commit @state and the old @crtc atomic state. + * + * The new configuration in the atomic commit @state is made effective by + * calling intel_shared_dpll_swap_state(). */ -void intel_release_shared_dpll(struct intel_shared_dpll *dpll, - struct intel_crtc *crtc, - struct drm_atomic_state *state) +void intel_release_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc) { - struct intel_shared_dpll_state *shared_dpll_state; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + + /* + * FIXME: this function is called for every platform having a + * compute_clock hook, even though the platform doesn't yet support + * the shared DPLL framework and intel_reserve_shared_dplls() is not + * called on those. + */ + if (!dpll_mgr) + return; - shared_dpll_state = intel_atomic_get_shared_dpll_state(state); - shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe); + dpll_mgr->put_dplls(state, crtc); } /** diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index d0570414f3d1..16ddab138574 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -39,6 +39,7 @@ struct drm_atomic_state; struct drm_device; struct drm_i915_private; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; struct intel_encoder; @@ -195,7 +196,7 @@ struct intel_dpll_hw_state { * future state which would be applied by an atomic mode set (stored in * a struct &intel_atomic_state). * - * See also intel_get_shared_dpll() and intel_release_shared_dpll(). + * See also intel_reserve_shared_dplls() and intel_release_shared_dplls(). */ struct intel_shared_dpll_state { /** @@ -331,11 +332,11 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) -struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state, - struct intel_encoder *encoder); -void intel_release_shared_dpll(struct intel_shared_dpll *dpll, - struct intel_crtc *crtc, - struct drm_atomic_state *state); +bool intel_reserve_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); +void intel_release_shared_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc); void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); -- cgit v1.2.3 From 01b24f50b67f49e40172c58d5e8034630f776110 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:29 +0300 Subject: drm/i915: Sanitize the shared DPLL find/reference interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass the PLL HW state to the PLL find/reference functions making it clearer what is their input. Also pass to these the atomic state and the CRTC object instead of the CRTC state, since they don't require the latter. Move setting the PLL in the crtc_state to the get_dpll() hook, which is the more logical place for this, where the related PLL HW state was also set. This refactoring is also a preparation for a follow-up patch that will have to find/reference multiple PLLs. No functional changes. Cc: Ville Syrjälä Cc: Daniel Vetter Cc: Maarten Lankhorst Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-18-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 107 +++++++++++++++++--------- 1 file changed, 70 insertions(+), 37 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 3fbc975851fa..14bbab45836d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -244,17 +244,18 @@ out: } static struct intel_shared_dpll * -intel_find_shared_dpll(struct intel_crtc_state *crtc_state, +intel_find_shared_dpll(struct intel_atomic_state *state, + const struct intel_crtc *crtc, + const struct intel_dpll_hw_state *pll_state, enum intel_dpll_id range_min, enum intel_dpll_id range_max) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll, *unused_pll = NULL; struct intel_shared_dpll_state *shared_dpll; enum intel_dpll_id i; - shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); + shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); for (i = range_min; i <= range_max; i++) { pll = &dev_priv->shared_dplls[i]; @@ -266,9 +267,9 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state, continue; } - if (memcmp(&crtc_state->dpll_hw_state, + if (memcmp(pll_state, &shared_dpll[i].hw_state, - sizeof(crtc_state->dpll_hw_state)) == 0) { + sizeof(*pll_state)) == 0) { DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n", crtc->base.base.id, crtc->base.name, pll->info->name, @@ -290,20 +291,19 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state, } static void -intel_reference_shared_dpll(struct intel_shared_dpll *pll, - struct intel_crtc_state *crtc_state) +intel_reference_shared_dpll(struct intel_atomic_state *state, + const struct intel_crtc *crtc, + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state) { struct intel_shared_dpll_state *shared_dpll; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); const enum intel_dpll_id id = pll->info->id; - shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); + shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); if (shared_dpll[id].crtc_mask == 0) - shared_dpll[id].hw_state = - crtc_state->dpll_hw_state; + shared_dpll[id].hw_state = *pll_state; - crtc_state->shared_dpll = pll; DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name, pipe_name(crtc->pipe)); @@ -463,7 +463,8 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, crtc->base.base.id, crtc->base.name, pll->info->name); } else { - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_B); } @@ -472,7 +473,10 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, return false; /* reference the pll */ - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); + + crtc_state->shared_dpll = pll; return true; } @@ -791,8 +795,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state) +static struct intel_shared_dpll * +hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_shared_dpll *pll; u32 val; unsigned int p, n2, r2; @@ -805,7 +813,8 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state * crtc_state->dpll_hw_state.wrpll = val; - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); if (!pll) @@ -857,7 +866,7 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, sizeof(crtc_state->dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - pll = hsw_ddi_hdmi_get_dpll(crtc_state); + pll = hsw_ddi_hdmi_get_dpll(state, crtc); } else if (intel_crtc_has_dp_encoder(crtc_state)) { pll = hsw_ddi_dp_get_dpll(crtc_state); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { @@ -867,7 +876,8 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC; - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SPLL, DPLL_ID_SPLL); } else { return false; @@ -876,7 +886,10 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, if (!pll) return false; - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); + + crtc_state->shared_dpll = pll; return true; } @@ -1437,17 +1450,22 @@ static bool skl_get_dpll(struct intel_atomic_state *state, } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL0); else - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SKL_DPLL1, DPLL_ID_SKL_DPLL3); if (!pll) return false; - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); + + crtc_state->shared_dpll = pll; return true; } @@ -1880,7 +1898,10 @@ static bool bxt_get_dpll(struct intel_atomic_state *state, DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); + + crtc_state->shared_dpll = pll; return true; } @@ -2395,7 +2416,8 @@ static bool cnl_get_dpll(struct intel_atomic_state *state, return false; } - pll = intel_find_shared_dpll(crtc_state, + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL2); if (!pll) { @@ -2403,7 +2425,10 @@ static bool cnl_get_dpll(struct intel_atomic_state *state, return false; } - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); + + crtc_state->shared_dpll = pll; return true; } @@ -2545,7 +2570,8 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, } static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) + struct intel_encoder *encoder, + struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); u32 cfgcr0, cfgcr1; @@ -2572,11 +2598,10 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, DPLL_CFGCR1_PDIV(pll_params.pdiv) | DPLL_CFGCR1_CENTRAL_FREQ_8400; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); + memset(pll_state, 0, sizeof(*pll_state)); - crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; - crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; + pll_state->cfgcr0 = cfgcr0; + pll_state->cfgcr1 = cfgcr1; return true; } @@ -2666,10 +2691,10 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, * The specification for this function uses real numbers, so the math had to be * adapted to integer-only calculation, that's why it looks so different. */ -static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state) +static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, + struct intel_dpll_hw_state *pll_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state; int refclk_khz = dev_priv->cdclk.hw.ref; int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; @@ -2847,7 +2872,8 @@ static bool icl_get_dplls(struct intel_atomic_state *state, if (intel_port_is_combophy(dev_priv, port)) { min = DPLL_ID_ICL_DPLL0; max = DPLL_ID_ICL_DPLL1; - ret = icl_calc_dpll_state(crtc_state, encoder); + ret = icl_calc_dpll_state(crtc_state, encoder, + &crtc_state->dpll_hw_state); } else if (intel_port_is_tc(dev_priv, port)) { if (encoder->type == INTEL_OUTPUT_DP_MST) { struct intel_dp_mst_encoder *mst_encoder; @@ -2861,14 +2887,16 @@ static bool icl_get_dplls(struct intel_atomic_state *state, if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) { min = DPLL_ID_ICL_TBTPLL; max = min; - ret = icl_calc_dpll_state(crtc_state, encoder); + ret = icl_calc_dpll_state(crtc_state, encoder, + &crtc_state->dpll_hw_state); } else { enum tc_port tc_port; tc_port = intel_port_to_tc(dev_priv, port); min = icl_tc_port_to_pll_id(tc_port); max = min; - ret = icl_calc_mg_pll_state(crtc_state); + ret = icl_calc_mg_pll_state(crtc_state, + &crtc_state->dpll_hw_state); } } else { MISSING_CASE(port); @@ -2881,13 +2909,18 @@ static bool icl_get_dplls(struct intel_atomic_state *state, } - pll = intel_find_shared_dpll(crtc_state, min, max); + pll = intel_find_shared_dpll(state, crtc, + &crtc_state->dpll_hw_state, + min, max); if (!pll) { DRM_DEBUG_KMS("No PLL selected\n"); return false; } - intel_reference_shared_dpll(pll, crtc_state); + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); + + crtc_state->shared_dpll = pll; return true; } -- cgit v1.2.3 From 726ca99666dba067ac8fad32bed9d3efcfc57f9d Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:30 +0300 Subject: drm/i915/icl: Split getting the DPLLs to port type specific functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For clarity factor out the combo PHY and TypeC PHY specific code from icl_get_dplls() into their own functions. No functional changes. Cc: Ville Syrjälä Cc: Daniel Vetter Cc: Maarten Lankhorst Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-19-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 100 +++++++++++++++++--------- 1 file changed, 66 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 14bbab45836d..85c38eed93a8 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2856,51 +2856,66 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, return true; } -static bool icl_get_dplls(struct intel_atomic_state *state, - struct intel_crtc *crtc, - struct intel_encoder *encoder) +static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_shared_dpll *pll; + + if (!icl_calc_dpll_state(crtc_state, encoder, + &crtc_state->dpll_hw_state)) { + DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n"); + + return false; + } + + pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, + DPLL_ID_ICL_DPLL0, + DPLL_ID_ICL_DPLL1); + if (!pll) { + DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n", + port_name(encoder->port)); + return false; + } + + intel_reference_shared_dpll(state, crtc, + pll, &crtc_state->dpll_hw_state); + + crtc_state->shared_dpll = pll; + + return true; +} + +static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct intel_digital_port *intel_dig_port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); + struct intel_digital_port *dig_port; struct intel_shared_dpll *pll; - enum port port = encoder->port; enum intel_dpll_id min, max; bool ret; - if (intel_port_is_combophy(dev_priv, port)) { - min = DPLL_ID_ICL_DPLL0; - max = DPLL_ID_ICL_DPLL1; + if (encoder->type == INTEL_OUTPUT_DP_MST) + dig_port = enc_to_mst(&encoder->base)->primary; + else + dig_port = enc_to_dig_port(&encoder->base); + + if (dig_port->tc_mode == TC_PORT_TBT_ALT) { + min = DPLL_ID_ICL_TBTPLL; + max = min; ret = icl_calc_dpll_state(crtc_state, encoder, &crtc_state->dpll_hw_state); - } else if (intel_port_is_tc(dev_priv, port)) { - if (encoder->type == INTEL_OUTPUT_DP_MST) { - struct intel_dp_mst_encoder *mst_encoder; - - mst_encoder = enc_to_mst(&encoder->base); - intel_dig_port = mst_encoder->primary; - } else { - intel_dig_port = enc_to_dig_port(&encoder->base); - } - - if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) { - min = DPLL_ID_ICL_TBTPLL; - max = min; - ret = icl_calc_dpll_state(crtc_state, encoder, - &crtc_state->dpll_hw_state); - } else { - enum tc_port tc_port; - - tc_port = intel_port_to_tc(dev_priv, port); - min = icl_tc_port_to_pll_id(tc_port); - max = min; - ret = icl_calc_mg_pll_state(crtc_state, - &crtc_state->dpll_hw_state); - } } else { - MISSING_CASE(port); - return false; + min = icl_tc_port_to_pll_id(tc_port); + max = min; + ret = icl_calc_mg_pll_state(crtc_state, + &crtc_state->dpll_hw_state); } if (!ret) { @@ -2925,6 +2940,23 @@ static bool icl_get_dplls(struct intel_atomic_state *state, return true; } +static bool icl_get_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + enum port port = encoder->port; + + if (intel_port_is_combophy(dev_priv, port)) + return icl_get_combo_phy_dpll(state, crtc, encoder); + else if (intel_port_is_tc(dev_priv, port)) + return icl_get_tc_phy_dplls(state, crtc, encoder); + + MISSING_CASE(port); + + return false; +} + static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) -- cgit v1.2.3 From eea72c4c2161269a9046184607b71c5cfaabe477 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:31 +0300 Subject: drm/i915/icl: Reserve all required PLLs for TypeC ports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When enabling a TypeC port we need to reserve all the required PLLs for it, the TBT PLL for TBT-alt and the MG PHY PLL for DP-alt/legacy sinks. We can select the proper PLL for the current port mode from the reserved PLLs only once we selected and locked down the port mode for the whole duration of the port's active state. Resetting and locking down the port mode can in turn happen only during the modeset commit phase once we disabled the given port and the PLL it used. To support the above reserve-and-select PLL semantic we store the reserved PLLs along with their HW state in the CRTC state and provide a way to select the active PLL from these. The selected PLL along with its HW state will be pointed at by crtc_state->shared_dpll/dpll_hw_state as in the case of other port types. Besides reserving all required PLLs no functional changes. v2: - Fix releasing the ICL PLLs, not clearing the PLLs from the old crtc_state. - Init port_dpll to ICL_PORT_DPLL_DEFAULT closer to where port_dpll is used for symmetry with the corresponding ICL_PORT_DPLL_MG_PHY init. (Ville) v3: - Add FIXME: for clearing the ICL port PLLs from the new crtc state. Cc: Ville Syrjälä Cc: Daniel Vetter Cc: Maarten Lankhorst Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-20-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 11 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 153 +++++++++++++++++++------- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 9 ++ drivers/gpu/drm/i915/intel_drv.h | 9 ++ 4 files changed, 140 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b27cdaad611f..3417d86284f1 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9947,6 +9947,7 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + enum icl_port_dpll_id port_dpll_id; enum intel_dpll_id id; u32 temp; @@ -9954,22 +9955,28 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, temp = I915_READ(DPCLKA_CFGCR0_ICL) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); + port_dpll_id = ICL_PORT_DPLL_DEFAULT; } else if (intel_port_is_tc(dev_priv, port)) { u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; if (clk_sel == DDI_CLK_SEL_MG) { id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port)); + port_dpll_id = ICL_PORT_DPLL_MG_PHY; } else { WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162); id = DPLL_ID_ICL_TBTPLL; + port_dpll_id = ICL_PORT_DPLL_DEFAULT; } } else { WARN(1, "Invalid port %x\n", port); return; } - pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); + pipe_config->icl_port_dplls[port_dpll_id].pll = + intel_get_shared_dpll_by_id(dev_priv, id); + + icl_set_active_port_dpll(pipe_config, port_dpll_id); } static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, @@ -12102,6 +12109,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) saved_state->scaler_state = crtc_state->scaler_state; saved_state->shared_dpll = crtc_state->shared_dpll; saved_state->dpll_hw_state = crtc_state->dpll_hw_state; + memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls, + sizeof(saved_state->icl_port_dplls)); saved_state->crc_enabled = crtc_state->crc_enabled; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 85c38eed93a8..bdc7150dbfac 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2856,34 +2856,79 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, return true; } +/** + * icl_set_active_port_dpll - select the active port DPLL for a given CRTC + * @crtc_state: state for the CRTC to select the DPLL for + * @port_dpll_id: the active @port_dpll_id to select + * + * Select the given @port_dpll_id instance from the DPLLs reserved for the + * CRTC. + */ +void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, + enum icl_port_dpll_id port_dpll_id) +{ + struct icl_port_dpll *port_dpll = + &crtc_state->icl_port_dplls[port_dpll_id]; + + crtc_state->shared_dpll = port_dpll->pll; + crtc_state->dpll_hw_state = port_dpll->hw_state; +} + +static void icl_update_active_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_digital_port *primary_port; + enum icl_port_dpll_id port_dpll_id; + + primary_port = encoder->type == INTEL_OUTPUT_DP_MST ? + enc_to_mst(&encoder->base)->primary : + enc_to_dig_port(&encoder->base); + + switch (primary_port->tc_mode) { + case TC_PORT_TBT_ALT: + port_dpll_id = ICL_PORT_DPLL_DEFAULT; + break; + case TC_PORT_DP_ALT: + case TC_PORT_LEGACY: + port_dpll_id = ICL_PORT_DPLL_MG_PHY; + break; + } + + icl_set_active_port_dpll(crtc_state, port_dpll_id); +} + static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc, struct intel_encoder *encoder) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - struct intel_shared_dpll *pll; + struct icl_port_dpll *port_dpll = + &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; - if (!icl_calc_dpll_state(crtc_state, encoder, - &crtc_state->dpll_hw_state)) { + if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n"); return false; } - pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, - DPLL_ID_ICL_DPLL0, - DPLL_ID_ICL_DPLL1); - if (!pll) { + port_dpll->pll = intel_find_shared_dpll(state, crtc, + &port_dpll->hw_state, + DPLL_ID_ICL_DPLL0, + DPLL_ID_ICL_DPLL1); + if (!port_dpll->pll) { DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n", port_name(encoder->port)); return false; } intel_reference_shared_dpll(state, crtc, - pll, &crtc_state->dpll_hw_state); + port_dpll->pll, &port_dpll->hw_state); - crtc_state->shared_dpll = pll; + icl_update_active_dpll(state, crtc, encoder); return true; } @@ -2895,49 +2940,55 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - struct intel_digital_port *dig_port; - struct intel_shared_dpll *pll; - enum intel_dpll_id min, max; - bool ret; - - if (encoder->type == INTEL_OUTPUT_DP_MST) - dig_port = enc_to_mst(&encoder->base)->primary; - else - dig_port = enc_to_dig_port(&encoder->base); + struct icl_port_dpll *port_dpll; + enum intel_dpll_id dpll_id; - if (dig_port->tc_mode == TC_PORT_TBT_ALT) { - min = DPLL_ID_ICL_TBTPLL; - max = min; - ret = icl_calc_dpll_state(crtc_state, encoder, - &crtc_state->dpll_hw_state); - } else { - min = icl_tc_port_to_pll_id(tc_port); - max = min; - ret = icl_calc_mg_pll_state(crtc_state, - &crtc_state->dpll_hw_state); + port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; + if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { + DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n"); + return false; } - if (!ret) { - DRM_DEBUG_KMS("Could not calculate PLL state.\n"); + port_dpll->pll = intel_find_shared_dpll(state, crtc, + &port_dpll->hw_state, + DPLL_ID_ICL_TBTPLL, + DPLL_ID_ICL_TBTPLL); + if (!port_dpll->pll) { + DRM_DEBUG_KMS("No TBT-ALT PLL found\n"); return false; } + intel_reference_shared_dpll(state, crtc, + port_dpll->pll, &port_dpll->hw_state); - pll = intel_find_shared_dpll(state, crtc, - &crtc_state->dpll_hw_state, - min, max); - if (!pll) { - DRM_DEBUG_KMS("No PLL selected\n"); - return false; + port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY]; + if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) { + DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n"); + goto err_unreference_tbt_pll; } + dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, + encoder->port)); + port_dpll->pll = intel_find_shared_dpll(state, crtc, + &port_dpll->hw_state, + dpll_id, + dpll_id); + if (!port_dpll->pll) { + DRM_DEBUG_KMS("No MG PHY PLL found\n"); + goto err_unreference_tbt_pll; + } intel_reference_shared_dpll(state, crtc, - pll, &crtc_state->dpll_hw_state); + port_dpll->pll, &port_dpll->hw_state); - crtc_state->shared_dpll = pll; + icl_update_active_dpll(state, crtc, encoder); return true; + +err_unreference_tbt_pll: + port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; + intel_unreference_shared_dpll(state, crtc, port_dpll->pll); + + return false; } static bool icl_get_dplls(struct intel_atomic_state *state, @@ -2957,6 +3008,26 @@ static bool icl_get_dplls(struct intel_atomic_state *state, return false; } +static void icl_put_dplls(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + enum icl_port_dpll_id id; + + for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) { + struct icl_port_dpll *port_dpll = + &crtc_state->icl_port_dplls[id]; + + if (!port_dpll->pll) + continue; + + intel_unreference_shared_dpll(state, crtc, port_dpll->pll); + + /* FIXME: Clear the icl_port_dplls from the new crtc state */ + } +} + static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) @@ -3330,7 +3401,7 @@ static const struct dpll_info icl_plls[] = { static const struct intel_dpll_mgr icl_pll_mgr = { .dpll_info = icl_plls, .get_dplls = icl_get_dplls, - .put_dplls = intel_put_dpll, + .put_dplls = icl_put_dplls, .dump_hw_state = icl_dump_hw_state, }; @@ -3343,7 +3414,7 @@ static const struct dpll_info ehl_plls[] = { static const struct intel_dpll_mgr ehl_pll_mgr = { .dpll_info = ehl_plls, .get_dplls = icl_get_dplls, - .put_dplls = intel_put_dpll, + .put_dplls = icl_put_dplls, .dump_hw_state = icl_dump_hw_state, }; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 16ddab138574..579f2ceafba3 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -141,6 +141,13 @@ enum intel_dpll_id { }; #define I915_NUM_PLLS 7 +enum icl_port_dpll_id { + ICL_PORT_DPLL_DEFAULT, + ICL_PORT_DPLL_MG_PHY, + + ICL_PORT_DPLL_COUNT, +}; + struct intel_dpll_hw_state { /* i9xx, pch plls */ u32 dpll; @@ -337,6 +344,8 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder); void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc); +void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, + enum icl_port_dpll_id port_dpll_id); void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index d9e7d011ed4a..12a102e239c5 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -812,6 +812,15 @@ struct intel_crtc_state { /* Actual register state of the dpll, for shared dpll cross-checking. */ struct intel_dpll_hw_state dpll_hw_state; + /* + * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by + * setting shared_dpll and dpll_hw_state to one of these reserved ones. + */ + struct icl_port_dpll { + struct intel_shared_dpll *pll; + struct intel_dpll_hw_state hw_state; + } icl_port_dplls[ICL_PORT_DPLL_COUNT]; + /* DSI PLL registers */ struct { u32 ctrl, div; -- cgit v1.2.3 From 24a7bfe0c2d7aec06956d48808cdfe2756f618ad Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:32 +0300 Subject: drm/i915: Keep the TypeC port mode fixed when the port is active MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The TypeC port mode needs to stay fixed whenever the port is active. Do that by introducing a tc_link_refcount to account for active ports, avoiding changing the port mode if a reference is held. During the modeset commit phase we also have to reset the port mode and update the active PLL reflecting the new port mode. We can do this only once the port and its old PLL has been already disabled. Add the new encoder update_prepare/complete hooks that are called around the whole enabling sequence. The TypeC specific hooks of these will reset the port mode, update the active PLL if the port will be active and ensure that the port mode will stay fixed for the duration of the whole enabling sequence by holding a tc_link_refcount. During the port enabling, the pre_pll_enable/post_pll_disable hooks will take/release a tc_link_refcount to ensure the port mode stays fixed while the port is active. Changing the port mode should also be avoided during connector detection and AUX transfers if the port is active, we'll do that by checking the port's tc_link_refcount. When resetting the port mode we also have to take into account the maximum lanes provided by the FIA. It's guaranteed to be 4 in TBT-alt and legacy modes, but there may be less lanes available in DP-alt mode, in which case we have to fall back to TBT-alt mode. While at it also update icl_tc_phy_connect()'s code comment, reflecting the current way of switching the port mode. v2: - Add the update_prepare/complete hooks to the encoder instead of the connector. (Ville) - Simplify intel_connector_needs_modeset() by removing redundant if. (Ville) v3: - Fix sparse warning, marking static functions as such. v4: - Rebase on drm-tip. Cc: Ville Syrjälä Cc: Daniel Vetter Cc: Maarten Lankhorst Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-21-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 41 +++++++++-- drivers/gpu/drm/i915/display/intel_display.c | 100 +++++++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 28 +++++++- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 3 + drivers/gpu/drm/i915/display/intel_tc.c | 94 +++++++++++++++++------- drivers/gpu/drm/i915/display/intel_tc.h | 3 + drivers/gpu/drm/i915/intel_drv.h | 8 +++ 7 files changed, 247 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 0c5bfbd66b19..404f555126a1 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3623,6 +3623,30 @@ static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, I915_WRITE(PORT_TX_DFLEXDPMLE1, val); } +static void +intel_ddi_update_prepare(struct intel_atomic_state *state, + struct intel_encoder *encoder, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL; + int required_lanes = crtc_state ? crtc_state->lane_count : 1; + + WARN_ON(crtc && crtc->active); + + intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes); + if (crtc_state && crtc_state->base.active) + intel_update_active_dpll(state, crtc, encoder); +} + +static void +intel_ddi_update_complete(struct intel_atomic_state *state, + struct intel_encoder *encoder, + struct intel_crtc *crtc) +{ + intel_tc_port_put_link(enc_to_dig_port(&encoder->base)); +} + static void intel_ddi_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -3630,10 +3654,13 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + bool is_tc_port = intel_port_is_tc(dev_priv, encoder->port); enum port port = encoder->port; - if (intel_crtc_has_dp_encoder(crtc_state) || - intel_port_is_tc(dev_priv, encoder->port)) + if (is_tc_port) + intel_tc_port_get_link(dig_port, crtc_state->lane_count); + + if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) intel_display_power_get(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); @@ -3658,11 +3685,14 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + bool is_tc_port = intel_port_is_tc(dev_priv, encoder->port); - if (intel_crtc_has_dp_encoder(crtc_state) || - intel_port_is_tc(dev_priv, encoder->port)) + if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) intel_display_power_put_unchecked(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); + + if (is_tc_port) + intel_tc_port_put_link(dig_port); } static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) @@ -4256,6 +4286,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) !port_info->supports_tbt; intel_tc_port_init(intel_dig_port, is_legacy); + + intel_encoder->update_prepare = intel_ddi_update_prepare; + intel_encoder->update_complete = intel_ddi_update_complete; } switch (port) { diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3417d86284f1..919f5ac844c8 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6037,6 +6037,98 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, intel_frontbuffer_flip(dev_priv, fb_bits); } +/* + * intel_connector_primary_encoder - get the primary encoder for a connector + * @connector: connector for which to return the encoder + * + * Returns the primary encoder for a connector. There is a 1:1 mapping from + * all connectors to their encoder, except for DP-MST connectors which have + * both a virtual and a primary encoder. These DP-MST primary encoders can be + * pointed to by as many DP-MST connectors as there are pipes. + */ +static struct intel_encoder * +intel_connector_primary_encoder(struct intel_connector *connector) +{ + struct intel_encoder *encoder; + + if (connector->mst_port) + return &dp_to_dig_port(connector->mst_port)->base; + + encoder = intel_attached_encoder(&connector->base); + WARN_ON(!encoder); + + return encoder; +} + +static bool +intel_connector_needs_modeset(struct intel_atomic_state *state, + const struct drm_connector_state *old_conn_state, + const struct drm_connector_state *new_conn_state) +{ + struct intel_crtc *old_crtc = old_conn_state->crtc ? + to_intel_crtc(old_conn_state->crtc) : NULL; + struct intel_crtc *new_crtc = new_conn_state->crtc ? + to_intel_crtc(new_conn_state->crtc) : NULL; + + return new_crtc != old_crtc || + (new_crtc && + needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc))); +} + +static void intel_encoders_update_prepare(struct intel_atomic_state *state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + int i; + + for_each_oldnew_connector_in_state(&state->base, conn, + old_conn_state, new_conn_state, i) { + struct intel_encoder *encoder; + struct intel_crtc *crtc; + + if (!intel_connector_needs_modeset(state, + old_conn_state, + new_conn_state)) + continue; + + encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + if (!encoder->update_prepare) + continue; + + crtc = new_conn_state->crtc ? + to_intel_crtc(new_conn_state->crtc) : NULL; + encoder->update_prepare(state, encoder, crtc); + } +} + +static void intel_encoders_update_complete(struct intel_atomic_state *state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + int i; + + for_each_oldnew_connector_in_state(&state->base, conn, + old_conn_state, new_conn_state, i) { + struct intel_encoder *encoder; + struct intel_crtc *crtc; + + if (!intel_connector_needs_modeset(state, + old_conn_state, + new_conn_state)) + continue; + + encoder = intel_connector_primary_encoder(to_intel_connector(conn)); + if (!encoder->update_complete) + continue; + + crtc = new_conn_state->crtc ? + to_intel_crtc(new_conn_state->crtc) : NULL; + encoder->update_complete(state, encoder, crtc); + } +} + static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct intel_atomic_state *state) @@ -13859,14 +13951,20 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } } + if (state->modeset) + intel_encoders_update_prepare(state); + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.update_crtcs(state); - if (state->modeset) + if (state->modeset) { + intel_encoders_update_complete(state); + intel_set_cdclk_post_plane_update(dev_priv, &state->cdclk.actual, &dev_priv->cdclk.actual, state->cdclk.pipe); + } /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index bdc7150dbfac..76a2c879efc2 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -1939,7 +1939,9 @@ struct intel_dpll_mgr { struct intel_encoder *encoder); void (*put_dplls)(struct intel_atomic_state *state, struct intel_crtc *crtc); - + void (*update_active_dpll)(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void (*dump_hw_state)(struct drm_i915_private *dev_priv, const struct intel_dpll_hw_state *hw_state); }; @@ -3402,6 +3404,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = { .dpll_info = icl_plls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, + .update_active_dpll = icl_update_active_dpll, .dump_hw_state = icl_dump_hw_state, }; @@ -3526,6 +3529,29 @@ void intel_release_shared_dplls(struct intel_atomic_state *state, dpll_mgr->put_dplls(state, crtc); } +/** + * intel_update_active_dpll - update the active DPLL for a CRTC/encoder + * @state: atomic state + * @crtc: the CRTC for which to update the active DPLL + * @encoder: encoder determining the type of port DPLL + * + * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state, + * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The + * DPLL selected will be based on the current mode of the encoder's port. + */ +void intel_update_active_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + + if (WARN_ON(!dpll_mgr)) + return; + + dpll_mgr->update_active_dpll(state, crtc, encoder); +} + /** * intel_shared_dpll_dump_hw_state - write hw_state to dmesg * @dev_priv: i915 drm device diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 579f2ceafba3..1668f8116908 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -346,6 +346,9 @@ void intel_release_shared_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc); void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state, enum icl_port_dpll_id port_dpll_id); +void intel_update_active_dpll(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_encoder *encoder); void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 58a228ba7696..ba6492bc0ee0 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -188,19 +188,12 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) * display, USB, etc. As a result, handshaking through FIA is required around * connect and disconnect to cleanly transfer ownership with the controller and * set the type-C power state. - * - * We could opt to only do the connect flow when we actually try to use the AUX - * channels or do a modeset, then immediately run the disconnect flow after - * usage, but there are some implications on this for a dynamic environment: - * things may go away or change behind our backs. So for now our driver is - * always trying to acquire ownership of the controller as soon as it gets an - * interrupt (or polls state and sees a port is connected) and only gives it - * back when it sees a disconnect. Implementation of a more fine-grained model - * will require a lot of coordination with user space and thorough testing for - * the extra possible cases. */ -static void icl_tc_phy_connect(struct intel_digital_port *dig_port) +static void icl_tc_phy_connect(struct intel_digital_port *dig_port, + int required_lanes) { + int max_lanes; + if (!icl_tc_phy_status_complete(dig_port)) { DRM_DEBUG_KMS("Port %s: PHY not ready\n", dig_port->tc_port_name); @@ -211,8 +204,9 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port) !WARN_ON(dig_port->tc_legacy_port)) goto out_set_tbt_alt_mode; + max_lanes = intel_tc_port_fia_max_lane_count(dig_port); if (dig_port->tc_legacy_port) { - WARN_ON(intel_tc_port_fia_max_lane_count(dig_port) != 4); + WARN_ON(max_lanes != 4); dig_port->tc_mode = TC_PORT_LEGACY; return; @@ -228,6 +222,13 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port) goto out_set_safe_mode; } + if (max_lanes < required_lanes) { + DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n", + dig_port->tc_port_name, + max_lanes, required_lanes); + goto out_set_safe_mode; + } + dig_port->tc_mode = TC_PORT_DP_ALT; return; @@ -311,7 +312,8 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) TC_PORT_TBT_ALT; } -static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port) +static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, + int required_lanes) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum tc_port_mode old_tc_mode = dig_port->tc_mode; @@ -319,7 +321,7 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port) intel_display_power_flush_work(dev_priv); icl_tc_phy_disconnect(dig_port); - icl_tc_phy_connect(dig_port); + icl_tc_phy_connect(dig_port, required_lanes); DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n", dig_port->tc_port_name, @@ -327,6 +329,14 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port) tc_port_mode_name(dig_port->tc_mode)); } +static void +intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port, + int refcount) +{ + WARN_ON(dig_port->tc_link_refcount); + dig_port->tc_link_refcount = refcount; +} + void intel_tc_port_sanitize(struct intel_digital_port *dig_port) { struct intel_encoder *encoder = &dig_port->base; @@ -344,11 +354,13 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port) if (!icl_tc_phy_is_connected(dig_port)) DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n", dig_port->tc_port_name, active_links); + intel_tc_port_link_init_refcount(dig_port, active_links); + goto out; } if (dig_port->tc_legacy_port) - icl_tc_phy_connect(dig_port); + icl_tc_phy_connect(dig_port, 1); out: DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n", @@ -377,27 +389,60 @@ bool intel_tc_port_connected(struct intel_digital_port *dig_port) { bool is_connected; - mutex_lock(&dig_port->tc_lock); - - if (intel_tc_port_needs_reset(dig_port)) - intel_tc_port_reset_mode(dig_port); - + intel_tc_port_lock(dig_port); is_connected = tc_port_live_status_mask(dig_port) & BIT(dig_port->tc_mode); - - mutex_unlock(&dig_port->tc_lock); + intel_tc_port_unlock(dig_port); return is_connected; } -void intel_tc_port_lock(struct intel_digital_port *dig_port) +static void __intel_tc_port_lock(struct intel_digital_port *dig_port, + int required_lanes) { + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + intel_wakeref_t wakeref; + + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); + mutex_lock(&dig_port->tc_lock); - /* TODO: reset the TypeC port mode if needed */ + + if (!dig_port->tc_link_refcount && + intel_tc_port_needs_reset(dig_port)) + intel_tc_port_reset_mode(dig_port, required_lanes); + + WARN_ON(dig_port->tc_lock_wakeref); + dig_port->tc_lock_wakeref = wakeref; +} + +void intel_tc_port_lock(struct intel_digital_port *dig_port) +{ + __intel_tc_port_lock(dig_port, 1); } void intel_tc_port_unlock(struct intel_digital_port *dig_port) { + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref); + + mutex_unlock(&dig_port->tc_lock); + + intel_display_power_put_async(dev_priv, POWER_DOMAIN_DISPLAY_CORE, + wakeref); +} + +void intel_tc_port_get_link(struct intel_digital_port *dig_port, + int required_lanes) +{ + __intel_tc_port_lock(dig_port, required_lanes); + dig_port->tc_link_refcount++; + intel_tc_port_unlock(dig_port); +} + +void intel_tc_port_put_link(struct intel_digital_port *dig_port) +{ + mutex_lock(&dig_port->tc_lock); + dig_port->tc_link_refcount--; mutex_unlock(&dig_port->tc_lock); } @@ -415,4 +460,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) mutex_init(&dig_port->tc_lock); dig_port->tc_legacy_port = is_legacy; + dig_port->tc_link_refcount = 0; } diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index b5af2fe60b22..31af7be96070 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -19,6 +19,9 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); void intel_tc_port_sanitize(struct intel_digital_port *dig_port); void intel_tc_port_lock(struct intel_digital_port *dig_port); void intel_tc_port_unlock(struct intel_digital_port *dig_port); +void intel_tc_port_get_link(struct intel_digital_port *dig_port, + int required_lanes); +void intel_tc_port_put_link(struct intel_digital_port *dig_port); void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 12a102e239c5..24c63ed45c6f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -115,6 +115,9 @@ struct intel_encoder { int (*compute_config)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); + void (*update_prepare)(struct intel_atomic_state *, + struct intel_encoder *, + struct intel_crtc *); void (*pre_pll_enable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); @@ -124,6 +127,9 @@ struct intel_encoder { void (*enable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); + void (*update_complete)(struct intel_atomic_state *, + struct intel_encoder *, + struct intel_crtc *); void (*disable)(struct intel_encoder *, const struct intel_crtc_state *, const struct drm_connector_state *); @@ -1234,6 +1240,8 @@ struct intel_digital_port { enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; struct mutex tc_lock; /* protects the TypeC port mode */ + intel_wakeref_t tc_lock_wakeref; + int tc_link_refcount; bool tc_legacy_port:1; char tc_port_name[8]; enum tc_port_mode tc_mode; -- cgit v1.2.3 From d5ce34da31456a28fb2c35db0d0f57b7e4af477e Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:33 +0300 Subject: drm/i915: Add state verification for the TypeC port mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add state verification for the TypeC port mode wrt. the port's AUX power well enabling/disabling. Also check the correctness of changing the port mode: - When enabling/disabling the AUX power well for a TypeC port we must hold the TypeC port lock - the case for AUX transfers - or hold a Type C port link reference - the case for modeset enabling/disabling. - When changing the TypeC port mode the port's AUX power domain must be disabled. v2: (Ville) - Simplify power_well_async_ref_count(). - Fix the commit log, clarifying what are the valid conditions to enable/disable the AUX power wells. Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-22-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 95 ++++++++++++++++++++-- drivers/gpu/drm/i915/display/intel_tc.c | 2 + drivers/gpu/drm/i915/display/intel_tc.h | 10 ++- 3 files changed, 98 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index fd13cd68deae..86a38116dc3a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -17,6 +17,7 @@ #include "intel_drv.h" #include "intel_hotplug.h" #include "intel_sideband.h" +#include "intel_tc.h" bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -447,26 +448,106 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) +static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int pw_idx = power_well->desc->hsw.idx; + + return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); +} + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + +static u64 async_put_domains_mask(struct i915_power_domains *power_domains); + +static int power_well_async_ref_count(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + int refs = hweight64(power_well->desc->domains & + async_put_domains_mask(&dev_priv->power_domains)); + + WARN_ON(refs > power_well->count); + + return refs; +} + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); + struct intel_digital_port *dig_port = NULL; + struct intel_encoder *encoder; + + /* Bypass the check if all references are released asynchronously */ + if (power_well_async_ref_count(dev_priv, power_well) == + power_well->count) + return; + + aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); + + for_each_intel_encoder(&dev_priv->drm, encoder) { + if (!intel_port_is_tc(dev_priv, encoder->port)) + continue; + + /* We'll check the MST primary port */ + if (encoder->type == INTEL_OUTPUT_DP_MST) + continue; + + dig_port = enc_to_dig_port(&encoder->base); + if (WARN_ON(!dig_port)) + continue; + + if (dig_port->aux_ch != aux_ch) { + dig_port = NULL; + continue; + } + + break; + } + + if (WARN_ON(!dig_port)) + return; + + WARN_ON(!intel_tc_port_ref_held(dig_port)); +} + +#else + +static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ +} + +#endif + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - int pw_idx = power_well->desc->hsw.idx; - bool is_tbt = power_well->desc->hsw.is_tc_tbt; - enum aux_ch aux_ch; + enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); u32 val; - aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : - ICL_AUX_PW_TO_CH(pw_idx); + icl_tc_port_assert_ref_held(dev_priv, power_well); + val = I915_READ(DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; - if (is_tbt) + if (power_well->desc->hsw.is_tc_tbt) val |= DP_AUX_CH_CTL_TBT_IO; I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); hsw_power_well_enable(dev_priv, power_well); } +static void +icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + icl_tc_port_assert_ref_held(dev_priv, power_well); + + hsw_power_well_disable(dev_priv, power_well); +} + /* * We should only use the power well if we explicitly asked the hardware to * enable it, so check if it's enabled and also check if we've requested it to @@ -3119,7 +3200,7 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { .sync_hw = hsw_power_well_sync_hw, .enable = icl_tc_phy_aux_power_well_enable, - .disable = hsw_power_well_disable, + .disable = icl_tc_phy_aux_power_well_disable, .is_enabled = hsw_power_well_enabled, }; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index ba6492bc0ee0..467dd3ec541b 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -319,6 +319,8 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, enum tc_port_mode old_tc_mode = dig_port->tc_mode; intel_display_power_flush_work(dev_priv); + WARN_ON(intel_display_power_is_enabled(dev_priv, + intel_aux_power_domain(dig_port))); icl_tc_phy_disconnect(dig_port); icl_tc_phy_connect(dig_port, required_lanes); diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 31af7be96070..8adc107cdbcb 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -7,8 +7,8 @@ #define __INTEL_TC_H__ #include - -struct intel_digital_port; +#include +#include "intel_drv.h" void icl_tc_phy_disconnect(struct intel_digital_port *dig_port); @@ -23,6 +23,12 @@ void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes); void intel_tc_port_put_link(struct intel_digital_port *dig_port); +static inline int intel_tc_port_ref_held(struct intel_digital_port *dig_port) +{ + return mutex_is_locked(&dig_port->tc_lock) || + dig_port->tc_link_refcount; +} + void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); #endif /* __INTEL_TC_H__ */ -- cgit v1.2.3 From a171f8e7f466345a2a0c7bacdc7992777e1c1f37 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:34 +0300 Subject: drm/i915: Remove unneeded disconnect in TypeC legacy port mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Disconnecting the TypeC PHY when the port is in legacy mode is not necessary: - BSpec doesn't specify a disconnect sequence for legacy mode. - The use of the PHY is dedicated for the display in legacy mode. - We keep the PHY always connected during runtime as well in legacy mode. We disconnect the PHY when needed during a disabling modeset for the port, so we can also remove the disconnect call from the destroy hook. Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-23-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 21 +-------------------- drivers/gpu/drm/i915/display/intel_tc.c | 4 +++- drivers/gpu/drm/i915/display/intel_tc.h | 2 -- 3 files changed, 4 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 404f555126a1..8b5350931d16 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3946,31 +3946,12 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, return 0; } -static void intel_ddi_encoder_suspend(struct intel_encoder *encoder) -{ - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - - intel_dp_encoder_suspend(encoder); - - /* - * TODO: disconnect also from USB DP alternate mode once we have a - * way to handle the modeset restore in that mode during resume - * even if the sink has disappeared while being suspended. - */ - if (dig_port->tc_legacy_port) - icl_tc_phy_disconnect(dig_port); -} - static void intel_ddi_encoder_destroy(struct drm_encoder *encoder) { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct drm_i915_private *i915 = to_i915(encoder->dev); intel_dp_encoder_flush_work(encoder); - if (intel_port_is_tc(i915, dig_port->base.port)) - icl_tc_phy_disconnect(dig_port); - drm_encoder_cleanup(encoder); kfree(dig_port); } @@ -4262,7 +4243,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->update_pipe = intel_ddi_update_pipe; intel_encoder->get_hw_state = intel_ddi_get_hw_state; intel_encoder->get_config = intel_ddi_get_config; - intel_encoder->suspend = intel_ddi_encoder_suspend; + intel_encoder->suspend = intel_dp_encoder_suspend; intel_encoder->get_power_domains = intel_ddi_get_power_domains; intel_encoder->type = INTEL_OUTPUT_DDI; intel_encoder->power_domain = intel_port_to_power_domain(port); diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 467dd3ec541b..53103a9aa8a7 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -243,10 +243,12 @@ out_set_tbt_alt_mode: * See the comment at the connect function. This implements the Disconnect * Flow. */ -void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) +static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port) { switch (dig_port->tc_mode) { case TC_PORT_LEGACY: + /* Nothing to do, we never disconnect from legacy mode */ + break; case TC_PORT_DP_ALT: icl_tc_phy_set_safe_mode(dig_port, true); dig_port->tc_mode = TC_PORT_TBT_ALT; diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 8adc107cdbcb..0d8411d4a91d 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -10,8 +10,6 @@ #include #include "intel_drv.h" -void icl_tc_phy_disconnect(struct intel_digital_port *dig_port); - bool intel_tc_port_connected(struct intel_digital_port *dig_port); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); -- cgit v1.2.3 From 3ee2771e47b038624d68aa051d78ff8558c260b9 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:35 +0300 Subject: drm/i915: WARN about invalid lane reversal in TBT-alt/DP-alt modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lane reversal happens only in the FIA module for TBT-alt/DP-alt mode, so WARN if lane reversal is attempted at a different level. See the BSpec DDI_BUF_CTL register description. Cc: Manasi Navare Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-24-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 8b5350931d16..747b68037fea 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3604,6 +3604,8 @@ static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, u32 val = I915_READ(PORT_TX_DFLEXDPMLE1); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); + val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); switch (pipe_config->lane_count) { case 1: -- cgit v1.2.3 From f18819a3c8da62a85cd6e0c546fc8b6dd4e4c865 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 1 Jul 2019 11:04:51 +0100 Subject: drm/i915/guc: Avoid reclaim locks during reset During reset, we must be very selective in which locks we take as most are tainted by being held across a wait or reclaim (kmalloc) which implicitly waits. Inside the guc reset path, we reset the ADS to sane defaults, but must keep it pinned from initialisation to avoid having to pin it during reset. Signed-off-by: Chris Wilson Cc: Daniele Ceraolo Spurio Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190701100502.15639-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_guc.h | 4 ++++ drivers/gpu/drm/i915/intel_guc_ads.c | 26 +++++++++++++------------- 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index d6a75bc3d7f4..d91c96679dbb 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -35,6 +35,8 @@ #include "i915_utils.h" #include "i915_vma.h" +struct __guc_ads_blob; + struct guc_preempt_work { struct work_struct work; struct intel_engine_cs *engine; @@ -65,6 +67,8 @@ struct intel_guc { } interrupts; struct i915_vma *ads_vma; + struct __guc_ads_blob *ads_blob; + struct i915_vma *stage_desc_pool; void *stage_desc_pool_vaddr; struct ida stage_ids; diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index ecb69fc94218..69859d1e047f 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -83,18 +83,14 @@ struct __guc_ads_blob { u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; } __packed; -static int __guc_ads_init(struct intel_guc *guc) +static void __guc_ads_init(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct __guc_ads_blob *blob; + struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; u8 engine_class; - blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB); - if (IS_ERR(blob)) - return PTR_ERR(blob); - /* GuC scheduling policies */ guc_policies_init(&blob->policies); @@ -144,9 +140,7 @@ static int __guc_ads_init(struct intel_guc *guc) blob->ads.gt_system_info = base + ptr_offset(blob, system_info); blob->ads.clients_info = base + ptr_offset(blob, clients_info); - i915_gem_object_unpin_map(guc->ads_vma->obj); - - return 0; + i915_gem_object_flush_map(guc->ads_vma->obj); } /** @@ -160,6 +154,7 @@ int intel_guc_ads_create(struct intel_guc *guc) { const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); struct i915_vma *vma; + void *blob; int ret; GEM_BUG_ON(guc->ads_vma); @@ -168,11 +163,16 @@ int intel_guc_ads_create(struct intel_guc *guc) if (IS_ERR(vma)) return PTR_ERR(vma); + blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto err_vma; + } + guc->ads_vma = vma; + guc->ads_blob = blob; - ret = __guc_ads_init(guc); - if (ret) - goto err_vma; + __guc_ads_init(guc); return 0; @@ -183,7 +183,7 @@ err_vma: void intel_guc_ads_destroy(struct intel_guc *guc) { - i915_vma_unpin_and_release(&guc->ads_vma, 0); + i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); } /** -- cgit v1.2.3 From 4a8134d57c6ca7b67acb0812f1f32ef60a2f19c0 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 25 Jun 2019 17:03:48 -0700 Subject: drm/i915/icl: Drop port parameter to icl_get_combo_buf_trans() The port parameter hasn't been used since the last bspec phy programming update. Drop it to make some upcoming changes simpler. References: 9659c1af451a ("drm/i915/icl: combo port vswing programming changes per BSPEC") Cc: Clint Taylor Signed-off-by: Matt Roper Reviewed-by: Clint Taylor Link: https://patchwork.freedesktop.org/patch/msgid/20190626000352.31926-2-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 747b68037fea..a4172595c8d8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -847,8 +847,8 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, - int type, int rate, int *n_entries) +icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, + int *n_entries) { if (type == INTEL_OUTPUT_HDMI) { *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); @@ -873,7 +873,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por if (INTEL_GEN(dev_priv) >= 11) { if (intel_port_is_combophy(dev_priv, port)) - icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, + icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); @@ -2232,7 +2232,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) if (INTEL_GEN(dev_priv) >= 11) { if (intel_port_is_combophy(dev_priv, port)) - icl_get_combo_buf_trans(dev_priv, port, encoder->type, + icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); @@ -2421,8 +2421,8 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, u32 n_entries, val; int ln; - ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, - rate, &n_entries); + ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate, + &n_entries); if (!ddi_translations) return; -- cgit v1.2.3 From 0e933162b0420b5258c34da719ac65269e4dbc49 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 25 Jun 2019 17:03:49 -0700 Subject: drm/i915/ehl: Add third combo PHY offset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Rename register to _EHL_COMBOPHY_C. (Jose) Cc: José Roberto de Souza Signed-off-by: Matt Roper Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190626000352.31926-3-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6ccc713d85b3..0c487146a5bd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1793,8 +1793,10 @@ enum i915_power_well_id { */ #define _ICL_COMBOPHY_A 0x162000 #define _ICL_COMBOPHY_B 0x6C000 +#define _EHL_COMBOPHY_C 0x160000 #define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \ - _ICL_COMBOPHY_B) + _ICL_COMBOPHY_B, \ + _EHL_COMBOPHY_C) /* CNL/ICL Port CL_DW registers */ #define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \ -- cgit v1.2.3 From f80c9a9a04d3fa8231d2477f6a2c8b66a84ab41b Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 25 Jun 2019 17:03:50 -0700 Subject: drm/i915/ehl: Don't program PHY_MISC on EHL PHY C MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although EHL added a third combo PHY, no PHY_MISC register was added for PHY C. The bspec indicates that there's no need to program the "DE to IO Comp Pwr Down" setting for this PHY that we usually need to set in PHY_MISC. v2: - Add IS_ELKHARTLAKE() guards since future platforms that have a PHY C are likely to reinstate the PHY_MISC register. (Jose) - Use goto's to skip PHY_MISC programming & minimize code deltas. (Jose) Bspec: 33148 Cc: José Roberto de Souza Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190626000352.31926-4-matthew.d.roper@intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_combo_phy.c | 28 +++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 075bab2500eb..d3d5244765e6 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -183,9 +183,13 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, enum port port) { - return !(I915_READ(ICL_PHY_MISC(port)) & - ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && - (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT); + /* The PHY C added by EHL has no PHY_MISC register */ + if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C) + return I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT; + else + return !(I915_READ(ICL_PHY_MISC(port)) & + ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && + (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT); } static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, @@ -299,6 +303,14 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) continue; } + /* + * Although EHL adds a combo PHY C, there's no PHY_MISC + * register for it and no need to program the + * DE_IO_COMP_PWR_DOWN setting on PHY C. + */ + if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C) + goto skip_phy_misc; + /* * EHL's combo PHY A can be hooked up to either an external * display (via DDI-D) or an internal display (via DDI-A or @@ -313,6 +325,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; I915_WRITE(ICL_PHY_MISC(port), val); +skip_phy_misc: cnl_set_procmon_ref_values(dev_priv, port); if (port == PORT_A) { @@ -343,10 +356,19 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n", port_name(port)); + /* + * Although EHL adds a combo PHY C, there's no PHY_MISC + * register for it and no need to program the + * DE_IO_COMP_PWR_DOWN setting on PHY C. + */ + if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C) + goto skip_phy_misc; + val = I915_READ(ICL_PHY_MISC(port)); val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; I915_WRITE(ICL_PHY_MISC(port), val); +skip_phy_misc: val = I915_READ(ICL_PORT_COMP_DW0(port)); val &= ~COMP_INIT; I915_WRITE(ICL_PORT_COMP_DW0(port), val); -- cgit v1.2.3 From 8759aa4cc1d36749845b7c99b12ab180e12f2699 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 1 Jul 2019 11:04:54 +0100 Subject: drm/i915/execlists: Refactor CSB state machine Daniele pointed out that the CSB status information will change with Tigerlake and suggested that we could rearrange our state machine to hide the differences in generation. gcc also prefers the explicit state machine, so make it so: process_csb 1980 1967 -13 Suggested-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190701100502.15639-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 64 +++++++++++++++++++++++++------------ 1 file changed, 44 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 471e134de186..953b3938a85f 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1279,6 +1279,30 @@ reset_in_progress(const struct intel_engine_execlists *execlists) return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); } +enum csb_step { + CSB_NOP, + CSB_PROMOTE, + CSB_PREEMPT, + CSB_COMPLETE, +}; + +static inline enum csb_step +csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) +{ + unsigned int status = *csb; + + if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) + return CSB_PROMOTE; + + if (status & GEN8_CTX_STATUS_PREEMPTED) + return CSB_PREEMPT; + + if (*execlists->active) + return CSB_COMPLETE; + + return CSB_NOP; +} + static void process_csb(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -1316,8 +1340,6 @@ static void process_csb(struct intel_engine_cs *engine) rmb(); do { - unsigned int status; - if (++head == num_entries) head = 0; @@ -1343,10 +1365,16 @@ static void process_csb(struct intel_engine_cs *engine) engine->name, head, buf[2 * head + 0], buf[2 * head + 1]); - status = buf[2 * head]; - if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) { + switch (csb_parse(execlists, buf + 2 * head)) { + case CSB_PREEMPT: /* cancel old inflight, prepare for switch */ + trace_ports(execlists, "preempted", execlists->active); + + while (*execlists->active) + execlists_schedule_out(*execlists->active++); + + /* fallthrough */ + case CSB_PROMOTE: /* switch pending to inflight */ GEM_BUG_ON(*execlists->active); -promote: GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); execlists->active = memcpy(execlists->inflight, @@ -1355,25 +1383,17 @@ promote: sizeof(*execlists->pending)); execlists->pending[0] = NULL; + trace_ports(execlists, "promoted", execlists->active); + if (enable_timeslice(engine)) mod_timer(&execlists->timer, jiffies + 1); if (!inject_preempt_hang(execlists)) ring_set_paused(engine, 0); - } else if (status & GEN8_CTX_STATUS_PREEMPTED) { - struct i915_request * const *port = execlists->active; - - trace_ports(execlists, "preempted", execlists->active); - - while (*port) - execlists_schedule_out(*port++); - - goto promote; - } else if (*execlists->active) { - struct i915_request *rq = *execlists->active++; + break; - trace_ports(execlists, "completed", - execlists->active - 1); + case CSB_COMPLETE: /* port0 completed, advanced to port1 */ + trace_ports(execlists, "completed", execlists->active); /* * We rely on the hardware being strongly @@ -1381,11 +1401,15 @@ promote: * coherent (visible from the CPU) before the * user interrupt and CSB is processed. */ - GEM_BUG_ON(!i915_request_completed(rq)); - execlists_schedule_out(rq); + GEM_BUG_ON(!i915_request_completed(*execlists->active)); + execlists_schedule_out(*execlists->active++); GEM_BUG_ON(execlists->active - execlists->inflight > execlists_num_ports(execlists)); + break; + + case CSB_NOP: + break; } } while (head != tail); -- cgit v1.2.3 From afd1bcd4ad776a9892ce6e814111af22dae44956 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 2 Jul 2019 10:21:17 +0100 Subject: drm/i915: Report if i915_active is still busy upon waiting If we try to wait on an i915_active from within a critical section, it will remain busy (such as if we are shrinking from within i915_active_ref). Report the failure so that we do not proceed thinking it is idle. Extracted from a future patch "drm/i915: Coordinate i915_active with its own mutex". Fixes: 12c255b5dad1 ("drm/i915: Provide an i915_active.acquire callback") Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190702092117.1707-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_active.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index cb6a1eadf7df..584b247df9bc 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -287,7 +287,13 @@ int i915_active_wait(struct i915_active *ref) } __active_retire(ref); - return err; + if (err) + return err; + + if (!i915_active_is_idle(ref)) + return -EBUSY; + + return 0; } int i915_request_await_active_request(struct i915_request *rq, -- cgit v1.2.3 From 361b69051326ed0e07553315227678d00d651a9e Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Sat, 29 Jun 2019 14:13:50 +0100 Subject: drm/i915: fix whitelist selftests with readonly registers When a register is readonly there is not much we can tell about its value (apart from its default value?). This can be covered by tests exercising the value of the register from userspace. For PS_INVOCATION_COUNT we've got the following piglit tests : KHR-GL45.pipeline_statistics_query_tests_ARB.functional_fragment_shader_invocations Vulkan CTS tests : dEQP-VK.query_pool.statistics_query.fragment_shader_invocations.* v2: Use a local to shrink under 80cols. Signed-off-by: Lionel Landwerlin Fixes: 86554f48e511 ("drm/i915/selftests: Verify whitelist of context registers") Tested-by: Anuj Phogat Signed-off-by: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190629131350.31185-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index f12cb20fe785..b933d831eeb1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -926,7 +926,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine, err = 0; for (i = 0; i < engine->whitelist.count; i++) { - if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg)) + const struct i915_wa *wa = &engine->whitelist.list[i]; + + if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD) + continue; + + if (!fn(engine, a[i], b[i], wa->reg)) err = -EINVAL; } -- cgit v1.2.3 From 2c903da50f5a9522b134e488bd0f92646c46f3c0 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Fri, 28 Jun 2019 15:07:19 +0300 Subject: drm/i915: whitelist PS_(DEPTH|INVOCATION)_COUNT CFL:C0+ changed the status of those registers which are now blacklisted by default. This is breaking a number of CTS tests on GL & Vulkan : KHR-GL45.pipeline_statistics_query_tests_ARB.functional_fragment_shader_invocations (GL) dEQP-VK.query_pool.statistics_query.fragment_shader_invocations.* (Vulkan) v2: Only use one whitelist entry (Lionel) Bspec: 14091 Signed-off-by: Lionel Landwerlin Cc: stable@vger.kernel.org Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190628120720.21682-3-lionel.g.landwerlin@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index d776e111e5d0..480bff4b7735 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1092,10 +1092,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine) static void cfl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + if (engine->class != RENDER_CLASS) return; - gen9_whitelist_build(&engine->whitelist); + gen9_whitelist_build(w); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); } static void cnl_whitelist_build(struct intel_engine_cs *engine) -- cgit v1.2.3 From 3fe0107e45ab396342497e06b8924cdd485cde3b Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Fri, 28 Jun 2019 15:07:20 +0300 Subject: drm/i915/icl: whitelist PS_(DEPTH|INVOCATION)_COUNT The same tests failing on CFL+ platforms are also failing on ICL. Documentation doesn't list the WaAllowPMDepthAndInvocationCountAccessFromUMD workaround for ICL but applying it fixes the same tests as CFL. v2: Use only one whitelist entry (Lionel) Signed-off-by: Lionel Landwerlin Tested-by: Anuj Phogat Cc: stable@vger.kernel.org Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190628120720.21682-4-lionel.g.landwerlin@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 480bff4b7735..8dd9105efad9 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1138,6 +1138,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* WaEnableStateCacheRedirectToCS:icl */ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); break; case VIDEO_DECODE_CLASS: -- cgit v1.2.3 From c5f846eed2a1856b78e988eeef08215c70598ecd Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Mon, 1 Jul 2019 13:44:42 +0300 Subject: drm/i915: Fix memleak in runtime wakeref tracking If we untrack wakerefs, the actual count may reach zero. However the krealloced owners array is still there and needs to be taken care of. Free the owners unconditionally to fix the leak. Fixes: bd780f37a361 ("drm/i915: Track all held rpm wakerefs") Reported-by: Juha-Pekka Heikkila Cc: Juha-Pekka Heikkila Cc: Chris Wilson Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190701104442.9319-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/intel_runtime_pm.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 502c54428570..8d1aebc3e857 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, static void dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) { - struct drm_printer p; + if (debug->count) { + struct drm_printer p = drm_debug_printer("i915"); - if (!debug->count) - return; - - p = drm_debug_printer("i915"); - __print_intel_runtime_pm_wakeref(&p, debug); + __print_intel_runtime_pm_wakeref(&p, debug); + } kfree(debug->owners); } -- cgit v1.2.3 From a3389c14ba09898877122cfae8d2c4c9d9e7291e Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Tue, 2 Jul 2019 13:31:48 +0200 Subject: Revert "drm/i915: Introduce private PAT management" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 4395890a48551982549d222d1923e2833dac47cf. It's been over a year since this was merged, and the actual users of intel_ppat_get / intel_ppat_put never materialized. Time to remove it! v2: Unbreak suspend (Chris) v3: Rebase, drop fixes tag to avoid confusion Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Zhi Wang Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190702113149.21200-1-michal.winiarski@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 - drivers/gpu/drm/i915/i915_gem_gtt.c | 279 ++++++------------------------------ drivers/gpu/drm/i915/i915_gem_gtt.h | 36 ----- 3 files changed, 42 insertions(+), 275 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 04723a2e0713..99e4635fac6c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1489,8 +1489,6 @@ struct drm_i915_private { DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; - struct intel_ppat ppat; - /* Kernel Modesetting */ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ff1d5008a256..30e14eac47ac 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3028,203 +3028,26 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return 0; } -static struct intel_ppat_entry * -__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value) +static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) { - struct intel_ppat_entry *entry = &ppat->entries[index]; - - GEM_BUG_ON(index >= ppat->max_entries); - GEM_BUG_ON(test_bit(index, ppat->used)); - - entry->ppat = ppat; - entry->value = value; - kref_init(&entry->ref); - set_bit(index, ppat->used); - set_bit(index, ppat->dirty); - - return entry; -} - -static void __free_ppat_entry(struct intel_ppat_entry *entry) -{ - struct intel_ppat *ppat = entry->ppat; - unsigned int index = entry - ppat->entries; - - GEM_BUG_ON(index >= ppat->max_entries); - GEM_BUG_ON(!test_bit(index, ppat->used)); - - entry->value = ppat->clear_value; - clear_bit(index, ppat->used); - set_bit(index, ppat->dirty); -} - -/** - * intel_ppat_get - get a usable PPAT entry - * @i915: i915 device instance - * @value: the PPAT value required by the caller - * - * The function tries to search if there is an existing PPAT entry which - * matches with the required value. If perfectly matched, the existing PPAT - * entry will be used. If only partially matched, it will try to check if - * there is any available PPAT index. If yes, it will allocate a new PPAT - * index for the required entry and update the HW. If not, the partially - * matched entry will be used. - */ -const struct intel_ppat_entry * -intel_ppat_get(struct drm_i915_private *i915, u8 value) -{ - struct intel_ppat *ppat = &i915->ppat; - struct intel_ppat_entry *entry = NULL; - unsigned int scanned, best_score; - int i; - - GEM_BUG_ON(!ppat->max_entries); - - scanned = best_score = 0; - for_each_set_bit(i, ppat->used, ppat->max_entries) { - unsigned int score; - - score = ppat->match(ppat->entries[i].value, value); - if (score > best_score) { - entry = &ppat->entries[i]; - if (score == INTEL_PPAT_PERFECT_MATCH) { - kref_get(&entry->ref); - return entry; - } - best_score = score; - } - scanned++; - } - - if (scanned == ppat->max_entries) { - if (!entry) - return ERR_PTR(-ENOSPC); - - kref_get(&entry->ref); - return entry; - } - - i = find_first_zero_bit(ppat->used, ppat->max_entries); - entry = __alloc_ppat_entry(ppat, i, value); - ppat->update_hw(i915); - return entry; -} - -static void release_ppat(struct kref *kref) -{ - struct intel_ppat_entry *entry = - container_of(kref, struct intel_ppat_entry, ref); - struct drm_i915_private *i915 = entry->ppat->i915; - - __free_ppat_entry(entry); - entry->ppat->update_hw(i915); -} - -/** - * intel_ppat_put - put back the PPAT entry got from intel_ppat_get() - * @entry: an intel PPAT entry - * - * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the - * entry is dynamically allocated, its reference count will be decreased. Once - * the reference count becomes into zero, the PPAT index becomes free again. - */ -void intel_ppat_put(const struct intel_ppat_entry *entry) -{ - struct intel_ppat *ppat = entry->ppat; - unsigned int index = entry - ppat->entries; - - GEM_BUG_ON(!ppat->max_entries); - - kref_put(&ppat->entries[index].ref, release_ppat); -} - -static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv) -{ - struct intel_ppat *ppat = &dev_priv->ppat; - int i; - - for_each_set_bit(i, ppat->dirty, ppat->max_entries) { - I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value); - clear_bit(i, ppat->dirty); - } -} - -static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv) -{ - struct intel_ppat *ppat = &dev_priv->ppat; - u64 pat = 0; - int i; - - for (i = 0; i < ppat->max_entries; i++) - pat |= GEN8_PPAT(i, ppat->entries[i].value); - - bitmap_clear(ppat->dirty, 0, ppat->max_entries); - - I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); -} - -static unsigned int bdw_private_pat_match(u8 src, u8 dst) -{ - unsigned int score = 0; - enum { - AGE_MATCH = BIT(0), - TC_MATCH = BIT(1), - CA_MATCH = BIT(2), - }; - - /* Cache attribute has to be matched. */ - if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst)) - return 0; - - score |= CA_MATCH; - - if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst)) - score |= TC_MATCH; - - if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst)) - score |= AGE_MATCH; - - if (score == (AGE_MATCH | TC_MATCH | CA_MATCH)) - return INTEL_PPAT_PERFECT_MATCH; - - return score; -} - -static unsigned int chv_private_pat_match(u8 src, u8 dst) -{ - return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ? - INTEL_PPAT_PERFECT_MATCH : 0; -} - -static void cnl_setup_private_ppat(struct intel_ppat *ppat) -{ - ppat->max_entries = 8; - ppat->update_hw = cnl_private_pat_update_hw; - ppat->match = bdw_private_pat_match; - ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); - - __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); - __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); - __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); - __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); - __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC); + I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); + I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); + I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability * bits. When using advanced contexts each context stores its own PAT, but * writing this data shouldn't be harmful even in those cases. */ -static void bdw_setup_private_ppat(struct intel_ppat *ppat) +static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) { - ppat->max_entries = 8; - ppat->update_hw = bdw_private_pat_update_hw; - ppat->match = bdw_private_pat_match; - ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); + u64 pat; - if (!HAS_PPGTT(ppat->i915)) { + if (!HAS_PPGTT(dev_priv)) { /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, * so RTL will always use the value corresponding to * pat_sel = 000". @@ -3238,26 +3061,25 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat) * So we can still hold onto all our assumptions wrt cpu * clflushing on LLC machines. */ - __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); - return; + pat = GEN8_PPAT(0, GEN8_PPAT_UC); + } else { + pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ + GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ + GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ + GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ + GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | + GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | + GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | + GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } - __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */ - __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */ - __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */ - __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */ - __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } -static void chv_setup_private_ppat(struct intel_ppat *ppat) +static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) { - ppat->max_entries = 8; - ppat->update_hw = bdw_private_pat_update_hw; - ppat->match = chv_private_pat_match; - ppat->clear_value = CHV_PPAT_SNOOP; + u64 pat; /* * Map WB on BDW to snooped on CHV. @@ -3278,14 +3100,17 @@ static void chv_setup_private_ppat(struct intel_ppat *ppat) * in order to keep the global status page working. */ - __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 1, 0); - __alloc_ppat_entry(ppat, 2, 0); - __alloc_ppat_entry(ppat, 3, 0); - __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP); - __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP); + pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | + GEN8_PPAT(1, 0) | + GEN8_PPAT(2, 0) | + GEN8_PPAT(3, 0) | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | + GEN8_PPAT(7, CHV_PPAT_SNOOP); + + I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } static void gen6_gmch_remove(struct i915_address_space *vm) @@ -3298,27 +3123,12 @@ static void gen6_gmch_remove(struct i915_address_space *vm) static void setup_private_pat(struct drm_i915_private *dev_priv) { - struct intel_ppat *ppat = &dev_priv->ppat; - int i; - - ppat->i915 = dev_priv; - if (INTEL_GEN(dev_priv) >= 10) - cnl_setup_private_ppat(ppat); + cnl_setup_private_ppat(dev_priv); else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) - chv_setup_private_ppat(ppat); + chv_setup_private_ppat(dev_priv); else - bdw_setup_private_ppat(ppat); - - GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES); - - for_each_clear_bit(i, ppat->used, ppat->max_entries) { - ppat->entries[i].value = ppat->clear_value; - ppat->entries[i].ppat = ppat; - set_bit(i, ppat->dirty); - } - - ppat->update_hw(dev_priv); + bdw_setup_private_ppat(dev_priv); } static int gen8_gmch_probe(struct i915_ggtt *ggtt) @@ -3697,13 +3507,8 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) { ggtt_restore_mappings(&i915->ggtt); - if (INTEL_GEN(i915) >= 8) { - struct intel_ppat *ppat = &i915->ppat; - - bitmap_set(ppat->dirty, 0, ppat->max_entries); - i915->ppat.update_hw(i915); - return; - } + if (INTEL_GEN(i915) >= 8) + setup_private_pat(i915); } static struct scatterlist * diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 028be3b44d07..d0e0905acbbb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -160,11 +160,6 @@ typedef u64 gen8_ppgtt_pml4e_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) -#define GEN8_PPAT_GET_CA(x) ((x) & 3) -#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2)) -#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4)) -#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6)) - #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) @@ -619,37 +614,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) return container_of(vm, struct i915_ppgtt, vm); } -#define INTEL_MAX_PPAT_ENTRIES 8 -#define INTEL_PPAT_PERFECT_MATCH (~0U) - -struct intel_ppat; - -struct intel_ppat_entry { - struct intel_ppat *ppat; - struct kref ref; - u8 value; -}; - -struct intel_ppat { - struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES]; - DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES); - DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES); - unsigned int max_entries; - u8 clear_value; - /* - * Return a score to show how two PPAT values match, - * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match - */ - unsigned int (*match)(u8 src, u8 dst); - void (*update_hw)(struct drm_i915_private *i915); - - struct drm_i915_private *i915; -}; - -const struct intel_ppat_entry * -intel_ppat_get(struct drm_i915_private *i915, u8 value); -void intel_ppat_put(const struct intel_ppat_entry *entry); - int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From 096a93949b8658b058e8e3d3aee7b6ff36bf4f2e Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Tue, 2 Jul 2019 13:31:49 +0200 Subject: drm/i915/gtt: Don't check PPGTT presence on PPGTT-only platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We missed one place where we check PPGTT-only platform for PPGTT presence. Let's remove it. While I'm here let's assert that this particular code is never called on pre-gen8 platforms. References: 4bdafb9ddfa4 ("drm/i915: Remove i915.enable_ppgtt override") Signed-off-by: Michał Winiarski Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190702113149.21200-2-michal.winiarski@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 35 ++++++++++------------------------- 1 file changed, 10 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 30e14eac47ac..9e76347e039e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3047,31 +3047,14 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) { u64 pat; - if (!HAS_PPGTT(dev_priv)) { - /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, - * so RTL will always use the value corresponding to - * pat_sel = 000". - * So let's disable cache for GGTT to avoid screen corruptions. - * MOCS still can be used though. - * - System agent ggtt writes (i.e. cpu gtt mmaps) already work - * before this patch, i.e. the same uncached + snooping access - * like on gen6/7 seems to be in effect. - * - So this just fixes blitter/render access. Again it looks - * like it's not just uncached access, but uncached + snooping. - * So we can still hold onto all our assumptions wrt cpu - * clflushing on LLC machines. - */ - pat = GEN8_PPAT(0, GEN8_PPAT_UC); - } else { - pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ - GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ - GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ - GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ - GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | - GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | - GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | - GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); - } + pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ + GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ + GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ + GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ + GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | + GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | + GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | + GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); @@ -3123,6 +3106,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm) static void setup_private_pat(struct drm_i915_private *dev_priv) { + GEM_BUG_ON(INTEL_GEN(dev_priv) < 8); + if (INTEL_GEN(dev_priv) >= 10) cnl_setup_private_ppat(dev_priv); else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) -- cgit v1.2.3 From 315ca4c408ed3bcfe3a204fec9f5c397a98e97a0 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 2 Jul 2019 18:17:23 +0300 Subject: drm/i915: synchronize_irq() against the actual irq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When eliminating our use of drm_irq_install() I failed to convert all our synchronize_irq() calls to consult pdev->irq instead of dev_priv->drm.irq. As we no longer populate dev_priv->drm.irq we're no longer synchronizing against anything. v2: Add intel_syncrhonize_irq() (Chris) Cc: Chris Wilson Reported-by: Imre Deak Fixes: b318b82455bd ("drm/i915: Nuke drm_driver irq vfuncs") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111012 Reviewed-by: Chris Wilson Signed-off-by: Ville Syrjälä Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190702151723.29739-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 2 +- drivers/gpu/drm/i915/display/intel_pipe_crc.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 10 +++++----- drivers/gpu/drm/i915/i915_irq.h | 5 +++++ drivers/gpu/drm/i915/intel_guc_log.c | 2 +- 7 files changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 86a38116dc3a..c19b958461ca 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1158,7 +1158,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); intel_power_sequencer_reset(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 1e2c4307d05a..9a48f7a01e7e 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc) I915_WRITE(PIPE_CRC_CTL(crtc->index), 0); POSTING_READ(PIPE_CRC_CTL(crtc->index)); - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index d1508f0b4c84..c1fb5fa3952e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1162,7 +1162,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) if (execlists_active(&engine->execlists)) { struct tasklet_struct *t = &engine->execlists.tasklet; - synchronize_hardirq(engine->i915->drm.irq); + synchronize_hardirq(engine->i915->drm.pdev->irq); local_bh_disable(); if (tasklet_trylock(t)) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index eeecdad0e3ca..6340cec733d2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4081,7 +4081,7 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) /* Synchronize with everything first in case there's been an HPD * storm, but we haven't finished handling it in the kernel yet */ - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); flush_work(&dev_priv->hotplug.dig_port_work); flush_work(&dev_priv->hotplug.hotplug_work); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 73f0338faf9f..b5724ad38bf5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -580,7 +580,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); /* Now that we will not be generating any more work, flush any * outstanding tasks. As we are called on the RPS idle path, @@ -627,7 +627,7 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); gen9_reset_guc_interrupts(dev_priv); } @@ -663,7 +663,7 @@ void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); spin_unlock_irq(&dev_priv->irq_lock); - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); gen11_reset_guc_interrupts(dev_priv); } @@ -3680,7 +3680,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); } static void cherryview_irq_reset(struct drm_i915_private *dev_priv) @@ -4970,7 +4970,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) { intel_irq_reset(dev_priv); dev_priv->runtime_pm.irqs_enabled = false; - synchronize_irq(dev_priv->drm.irq); + intel_synchronize_irq(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 5af5654f801d..44d480dab030 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -102,6 +102,11 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) return dev_priv->runtime_pm.irqs_enabled; } +static inline void intel_synchronize_irq(struct drm_i915_private *i915) +{ + synchronize_irq(i915->drm.pdev->irq); +} + int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index e3b83ecb90b5..06c09ac52c74 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -615,7 +615,7 @@ void intel_guc_log_relay_close(struct intel_guc_log *log) struct drm_i915_private *i915 = guc_to_i915(guc); guc_log_disable_flush_events(log); - synchronize_irq(i915->drm.irq); + intel_synchronize_irq(i915); flush_work(&log->relay.flush_work); -- cgit v1.2.3 From 8e9ecb3e1e3366559cd4ed5ab698c391f8236a37 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 2 Jul 2019 15:09:50 +0100 Subject: drm/i915/display: Handle lost primary_port across suspend icl-dsi is dying on suspend/resume at RIP: 0010:icl_update_active_dpll+0x2c/0xa0 [i915] which appears due to the loss of the time primary_port across suspend. Protect against the potential NULL dereference by assuming ICL_PORT_DPLL_DEFAULT unless the port is actively specified otherwise. Fixes: 24a7bfe0c2d7 ("drm/i915: Keep the TypeC port mode fixed when the port is active") Signed-off-by: Chris Wilson Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190702140950.7069-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 76a2c879efc2..f953971e7c3b 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2883,21 +2883,16 @@ static void icl_update_active_dpll(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct intel_digital_port *primary_port; - enum icl_port_dpll_id port_dpll_id; + enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT; primary_port = encoder->type == INTEL_OUTPUT_DP_MST ? enc_to_mst(&encoder->base)->primary : enc_to_dig_port(&encoder->base); - switch (primary_port->tc_mode) { - case TC_PORT_TBT_ALT: - port_dpll_id = ICL_PORT_DPLL_DEFAULT; - break; - case TC_PORT_DP_ALT: - case TC_PORT_LEGACY: + if (primary_port && + (primary_port->tc_mode == TC_PORT_DP_ALT || + primary_port->tc_mode == TC_PORT_LEGACY)) port_dpll_id = ICL_PORT_DPLL_MG_PHY; - break; - } icl_set_active_port_dpll(crtc_state, port_dpll_id); } -- cgit v1.2.3 From 63251685c141ef8db611cd36609a8369d97b47da Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 10:17:12 +0100 Subject: drm/i915/selftests: Common live setup/teardown We frequently, but not frequently enough!, remember to flush residual operations and objects at the end of a live subtest. The purpose is to cleanup after every subtest, leaving a clean slate for the next subtest, and perform early detection of leaky state. As this should ideally be common for all live subtests, pull the task into a common teardown routine. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190703091726.11690-1-chris@chris-wilson.co.uk --- .../drm/i915/gem/selftests/i915_gem_client_blt.c | 11 ++---- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 2 +- .../drm/i915/gem/selftests/i915_gem_object_blt.c | 11 ++---- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 2 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 16 +------- drivers/gpu/drm/i915/gt/selftest_timeline.c | 4 +- drivers/gpu/drm/i915/i915_selftest.h | 18 ++++++++- drivers/gpu/drm/i915/selftests/i915_gem.c | 6 +-- drivers/gpu/drm/i915/selftests/i915_selftest.c | 44 +++++++++++++++++++++- 9 files changed, 71 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 855481252bda..fa79233093eb 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -11,8 +11,8 @@ static int igt_client_fill(void *arg) { - struct intel_context *ce = arg; - struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_private *i915 = arg; + struct intel_context *ce = i915->engine[BCS0]->kernel_context; struct drm_i915_gem_object *obj; struct rnd_state prng; IGT_TIMEOUT(end); @@ -89,11 +89,6 @@ err_unpin: err_put: i915_gem_object_put(obj); err_flush: - mutex_lock(&i915->drm.struct_mutex); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - if (err == -ENOMEM) err = 0; @@ -112,5 +107,5 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) if (!HAS_ENGINE(i915, BCS0)) return 0; - return i915_subtests(tests, i915->engine[BCS0]->kernel_context); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 53c81b5dfd69..a23c6df9b9f4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1731,5 +1731,5 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) if (i915_terminally_wedged(dev_priv)) return 0; - return i915_subtests(tests, dev_priv); + return i915_live_subtests(tests, dev_priv); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c index e23d8c9e9298..11d37238c62c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -11,8 +11,8 @@ static int igt_fill_blt(void *arg) { - struct intel_context *ce = arg; - struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_private *i915 = arg; + struct intel_context *ce = i915->engine[BCS0]->kernel_context; struct drm_i915_gem_object *obj; struct rnd_state prng; IGT_TIMEOUT(end); @@ -83,11 +83,6 @@ err_unpin: err_put: i915_gem_object_put(obj); err_flush: - mutex_lock(&i915->drm.struct_mutex); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - if (err == -ENOMEM) err = 0; @@ -106,5 +101,5 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) if (!HAS_ENGINE(i915, BCS0)) return 0; - return i915_subtests(tests, i915->engine[BCS0]->kernel_context); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index cf592a049a71..2d9cc3cd1f27 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -1744,7 +1744,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ - err = i915_subtests(tests, i915); + err = i915_live_subtests(tests, i915); mutex_lock(&i915->drm.struct_mutex); igt_flush_test(i915, I915_WAIT_LOCKED); diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 0c97f953e908..11f490502ca6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -73,7 +73,6 @@ err_ctx: err_spin: igt_spinner_fini(&spin); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -290,8 +289,6 @@ err_map: err_obj: i915_gem_object_put(obj); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); @@ -472,8 +469,6 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -580,7 +575,6 @@ err_spin_lo: err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -687,7 +681,6 @@ err_spin_lo: err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -826,8 +819,6 @@ err_client_b: err_client_a: preempt_client_fini(&a); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -995,8 +986,6 @@ err_client_1: err_client_0: preempt_client_fini(&client[0]); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -1143,8 +1132,6 @@ err_client_lo: err_client_hi: preempt_client_fini(&hi); err_unlock: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -1270,7 +1257,6 @@ err_spin_lo: err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: - igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -2054,5 +2040,5 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return 0; - return i915_subtests(tests, i915); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 193cc564ade2..eae3b1963bf7 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -821,8 +821,6 @@ static int live_hwsp_recycle(void *arg) } out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); @@ -841,5 +839,5 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return 0; - return i915_subtests(tests, i915); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index 207e21b478f2..d9b17b9e6993 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -66,12 +66,28 @@ struct i915_subtest { const char *name; }; +int __i915_nop_setup(void *data); +int __i915_nop_teardown(int err, void *data); + +int __i915_live_setup(void *data); +int __i915_live_teardown(int err, void *data); + int __i915_subtests(const char *caller, + int (*setup)(void *data), + int (*teardown)(int err, void *data), const struct i915_subtest *st, unsigned int count, void *data); #define i915_subtests(T, data) \ - __i915_subtests(__func__, T, ARRAY_SIZE(T), data) + __i915_subtests(__func__, \ + __i915_nop_setup, __i915_nop_teardown, \ + T, ARRAY_SIZE(T), data) +#define i915_live_subtests(T, data) ({ \ + typecheck(struct drm_i915_private *, data); \ + __i915_subtests(__func__, \ + __i915_live_setup, __i915_live_teardown, \ + T, ARRAY_SIZE(T), data); \ +}) #define SUBTEST(x) { x, #x } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index ed0c17bf6613..b8ffae481730 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -154,8 +154,6 @@ static int igt_gem_suspend(void *arg) mutex_lock(&i915->drm.struct_mutex); err = switch_to_context(i915, ctx); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; mutex_unlock(&i915->drm.struct_mutex); out: mock_file_free(i915, file); @@ -195,8 +193,6 @@ static int igt_gem_hibernate(void *arg) mutex_lock(&i915->drm.struct_mutex); err = switch_to_context(i915, ctx); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; mutex_unlock(&i915->drm.struct_mutex); out: mock_file_free(i915, file); @@ -213,5 +209,5 @@ int i915_gem_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return 0; - return i915_subtests(tests, i915); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index b18eaefef798..f46ccf817ad5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -26,6 +26,8 @@ #include "../i915_drv.h" #include "../i915_selftest.h" +#include "igt_flush_test.h" + struct i915_selftest i915_selftest __read_mostly = { .timeout_ms = 1000, }; @@ -240,7 +242,40 @@ static bool apply_subtest_filter(const char *caller, const char *name) return result; } +int __i915_nop_setup(void *data) +{ + return 0; +} + +int __i915_nop_teardown(int err, void *data) +{ + return err; +} + +int __i915_live_setup(void *data) +{ + struct drm_i915_private *i915 = data; + + return i915_terminally_wedged(i915); +} + +int __i915_live_teardown(int err, void *data) +{ + struct drm_i915_private *i915 = data; + + mutex_lock(&i915->drm.struct_mutex); + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + i915_gem_drain_freed_objects(i915); + + return err; +} + int __i915_subtests(const char *caller, + int (*setup)(void *data), + int (*teardown)(int err, void *data), const struct i915_subtest *st, unsigned int count, void *data) @@ -255,10 +290,17 @@ int __i915_subtests(const char *caller, if (!apply_subtest_filter(caller, st->name)) continue; + err = setup(data); + if (err) { + pr_err(DRIVER_NAME "/%s: setup failed for %s\n", + caller, st->name); + return err; + } + pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name); GEM_TRACE("Running %s/%s\n", caller, st->name); - err = st->func(data); + err = teardown(st->func(data), data); if (err && err != -EINTR) { pr_err(DRIVER_NAME "/%s: %s failed with error %d\n", caller, st->name, err); -- cgit v1.2.3 From 8221d21b0664d79a4319bad526f4f727d3b15981 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 10:17:13 +0100 Subject: drm/i915/selftests: Lock the drm_mm while modifying Remember to lock the drm_mm as we modify it, lest it be modified in the background by retire/free workers! Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190703091726.11690-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index a1f0b235f56b..9b05bef15023 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -414,7 +414,9 @@ static int igt_mmap_offset_exhaustion(void *arg) drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { resv.start = hole_start; resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */ + mutex_lock(&i915->drm.struct_mutex); err = drm_mm_reserve_node(mm, &resv); + mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); goto out_park; @@ -478,7 +480,9 @@ static int igt_mmap_offset_exhaustion(void *arg) } out: + mutex_lock(&i915->drm.struct_mutex); drm_mm_remove_node(&resv); + mutex_unlock(&i915->drm.struct_mutex); out_park: restore_retire_worker(i915); return err; -- cgit v1.2.3 From ad9e3792b0c0a63d4416d6d7da8320d11110b61e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 10:17:20 +0100 Subject: drm/i915/execlists: Hesitate before slicing Be a little more hesitant before injecting a timeslice, and try to take into account any change in priority that is due for the running task before switching to another task. This will allow us to arbitrarily prevent switching away from a request if we deem it necessarily to disable preemption, for instance. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Cc: Lionel Landwerlin Acked-by: Lionel Landwerlin Link: https://patchwork.freedesktop.org/patch/msgid/20190703091726.11690-9-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 953b3938a85f..1e85e04c58c4 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -899,7 +899,7 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) hint = max(rq_prio(list_next_entry(rq, sched.link)), engine->execlists.queue_priority_hint); - return hint >= rq_prio(rq); + return hint >= effective_prio(rq); } static bool -- cgit v1.2.3 From c03467ba40f783ebe756114bb68e13a6b404c03a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 10:17:17 +0100 Subject: drm/i915/gem: Free pages before rcu-freeing the object As we have dropped the final reference to the object, we do not need to wait until after the rcu grace period to drop its pages. We still require struct_mutex to completely unbind the object to release the pages, so we still need a free-worker to manage that from process context. By scheduling the release of pages before waiting for the rcu should mean that we are not trapping those pages from beyond the reach of the shrinker. v2: Pass along the request to skip if the vma is busy to the underlying unbind routine, to avoid checking the reservation underneath the i915->mm.obj_lock which may be used from inside irq context. v3: Flip the bit for unbinding while active, for later convenience. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111035 Fixes: a93615f900bd ("drm/i915: Throw away the active object retirement complexity") Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190703091726.11690-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 82 +++++++++++++--------------- drivers/gpu/drm/i915/gem/i915_gem_phys.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 18 ++++-- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 3 +- drivers/gpu/drm/i915/i915_drv.h | 15 ++--- drivers/gpu/drm/i915/i915_gem.c | 8 ++- 6 files changed, 67 insertions(+), 61 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 43194fbcbc2e..d3e96f09c6b7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -146,6 +146,18 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) } } +static void __i915_gem_free_object_rcu(struct rcu_head *head) +{ + struct drm_i915_gem_object *obj = + container_of(head, typeof(*obj), rcu); + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + i915_gem_object_free(obj); + + GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); + atomic_dec(&i915->mm.free_count); +} + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { @@ -168,22 +180,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(!list_empty(&obj->vma.list)); GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); - /* - * This serializes freeing with the shrinker. Since the free - * is delayed, first by RCU then by the workqueue, we want the - * shrinker to be able to free pages of unreferenced objects, - * or else we may oom whilst there are plenty of deferred - * freed objects. - */ - if (i915_gem_object_has_pages(obj) && - i915_gem_object_is_shrinkable(obj)) { - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - list_del_init(&obj->mm.link); - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - } - mutex_unlock(&i915->drm.struct_mutex); GEM_BUG_ON(atomic_read(&obj->bind_count)); @@ -197,19 +193,15 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, atomic_set(&obj->mm.pages_pin_count, 0); __i915_gem_object_put_pages(obj, I915_MM_NORMAL); GEM_BUG_ON(i915_gem_object_has_pages(obj)); + bitmap_free(obj->bit_17); if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); drm_gem_object_release(&obj->base); - bitmap_free(obj->bit_17); - i915_gem_object_free(obj); - - GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); - atomic_dec(&i915->mm.free_count); - - cond_resched(); + /* But keep the pointer alive for RCU-protected lookups */ + call_rcu(&obj->rcu, __i915_gem_free_object_rcu); } intel_runtime_pm_put(&i915->runtime_pm, wakeref); } @@ -260,18 +252,34 @@ static void __i915_gem_free_work(struct work_struct *work) spin_unlock(&i915->mm.free_lock); } -static void __i915_gem_free_object_rcu(struct rcu_head *head) +void i915_gem_free_object(struct drm_gem_object *gem_obj) { - struct drm_i915_gem_object *obj = - container_of(head, typeof(*obj), rcu); + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_i915_private *i915 = to_i915(obj->base.dev); /* - * We reuse obj->rcu for the freed list, so we had better not treat - * it like a rcu_head from this point forwards. And we expect all - * objects to be freed via this path. + * Before we free the object, make sure any pure RCU-only + * read-side critical sections are complete, e.g. + * i915_gem_busy_ioctl(). For the corresponding synchronized + * lookup see i915_gem_object_lookup_rcu(). */ - destroy_rcu_head(&obj->rcu); + atomic_inc(&i915->mm.free_count); + + /* + * This serializes freeing with the shrinker. Since the free + * is delayed, first by RCU then by the workqueue, we want the + * shrinker to be able to free pages of unreferenced objects, + * or else we may oom whilst there are plenty of deferred + * freed objects. + */ + if (i915_gem_object_has_pages(obj) && + i915_gem_object_is_shrinkable(obj)) { + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + list_del_init(&obj->mm.link); + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + } /* * Since we require blocking on struct_mutex to unbind the freed @@ -287,20 +295,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) queue_work(i915->wq, &i915->mm.free_work); } -void i915_gem_free_object(struct drm_gem_object *gem_obj) -{ - struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - - /* - * Before we free the object, make sure any pure RCU-only - * read-side critical sections are complete, e.g. - * i915_gem_busy_ioctl(). For the corresponding synchronized - * lookup see i915_gem_object_lookup_rcu(). - */ - atomic_inc(&to_i915(obj->base.dev)->mm.free_count); - call_rcu(&obj->rcu, __i915_gem_free_object_rcu); -} - static inline enum fb_op_origin fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 7b900ee4ed8d..b9fab22ada6f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -159,7 +159,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) if (obj->ops != &i915_gem_shmem_ops) return -EINVAL; - err = i915_gem_object_unbind(obj); + err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); if (err) return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index d99f1a600b96..3f4c6bdcc3c3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -88,10 +88,18 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; } -static bool unsafe_drop_pages(struct drm_i915_gem_object *obj) +static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, + unsigned long shrink) { - if (i915_gem_object_unbind(obj) == 0) + unsigned long flags; + + flags = 0; + if (shrink & I915_SHRINK_ACTIVE) + flags = I915_GEM_OBJECT_UNBIND_ACTIVE; + + if (i915_gem_object_unbind(obj, flags) == 0) __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); + return !i915_gem_object_has_pages(obj); } @@ -229,9 +237,7 @@ i915_gem_shrink(struct drm_i915_private *i915, continue; if (!(shrink & I915_SHRINK_ACTIVE) && - (i915_gem_object_is_framebuffer(obj) || - !reservation_object_test_signaled_rcu(obj->base.resv, - true))) + i915_gem_object_is_framebuffer(obj)) continue; if (!(shrink & I915_SHRINK_BOUND) && @@ -246,7 +252,7 @@ i915_gem_shrink(struct drm_i915_private *i915, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - if (unsafe_drop_pages(obj)) { + if (unsafe_drop_pages(obj, shrink)) { /* May arrive from get_pages on another bo */ mutex_lock_nested(&obj->mm.lock, I915_MM_SHRINKER); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 528b61678334..16ccec7fb7da 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -150,7 +150,8 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, } } - ret = i915_gem_object_unbind(obj); + ret = i915_gem_object_unbind(obj, + I915_GEM_OBJECT_UNBIND_ACTIVE); if (ret == 0) ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 99e4635fac6c..a8587e4be987 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2444,18 +2444,17 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv); static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { - if (!atomic_read(&i915->mm.free_count)) - return; - - /* A single pass should suffice to release all the freed objects (along + /* + * A single pass should suffice to release all the freed objects (along * most call paths) , but be a little more paranoid in that freeing * the objects does take a little amount of time, during which the rcu * callbacks could have added new objects into the freed list, and * armed the work again. */ - do { + while (atomic_read(&i915->mm.free_count)) { + flush_work(&i915->mm.free_work); rcu_barrier(); - } while (flush_work(&i915->mm.free_work)); + } } static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) @@ -2486,7 +2485,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, u64 alignment, u64 flags); -int i915_gem_object_unbind(struct drm_i915_gem_object *obj); +int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + unsigned long flags); +#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0) void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b7f290b77f8f..7ade42b8ec99 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -101,7 +101,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } -int i915_gem_object_unbind(struct drm_i915_gem_object *obj) +int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + unsigned long flags) { struct i915_vma *vma; LIST_HEAD(still_in_list); @@ -116,7 +117,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) list_move_tail(&vma->obj_link, &still_in_list); spin_unlock(&obj->vma.lock); - ret = i915_vma_unbind(vma); + ret = -EBUSY; + if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || + !i915_vma_is_active(vma)) + ret = i915_vma_unbind(vma); spin_lock(&obj->vma.lock); } -- cgit v1.2.3 From d650d1f5cfd7ca607dbbb2023b76c2afa873246a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 10:17:18 +0100 Subject: drm/i915: Markup potential lock for i915_active Make the lockchains more deterministic via i915_active by flagging the potential lock. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190703091726.11690-7-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_active.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 584b247df9bc..13f304a29fc8 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -268,6 +268,8 @@ int i915_active_wait(struct i915_active *ref) int err; might_sleep(); + might_lock(&ref->mutex); + if (RB_EMPTY_ROOT(&ref->tree)) return 0; -- cgit v1.2.3 From 09480072e3ee471fe2735d46f454483f4c26853b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 10:17:19 +0100 Subject: drm/i915: Mark up vma->active as safe for use inside shrinkers Since a shrinker may be forced to wait on GPU activity, i915_active_wait(&vma->active) must be safe for use inside a shrinker, and so let's mark up the lock as being acquired by the shrinker to avoid any nasty surprises creeping in. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190703091726.11690-8-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_vma.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index c20a3022cd80..ee73baf29415 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -22,6 +22,7 @@ * */ +#include #include #include "display/intel_frontbuffer.h" @@ -120,6 +121,13 @@ vma_create(struct drm_i915_gem_object *obj, __i915_vma_active, __i915_vma_retire); INIT_ACTIVE_REQUEST(&vma->last_fence); + /* Declare ourselves safe for use inside shrinkers */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&vma->active.mutex); + fs_reclaim_release(GFP_KERNEL); + } + INIT_LIST_HEAD(&vma->closed_link); if (view && view->type != I915_GGTT_VIEW_NORMAL) { -- cgit v1.2.3 From 56e9371bc3f3e7d6c1a197a45d550b2ce6af25f6 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 6 Jun 2019 15:42:10 +0300 Subject: drm/i915: Deal with machines that expose less than three QGV points MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When SAGV is forced to disabled/min/med/max in the BIOS pcode will only hand us a single QGV point instead of the normal three. Fix the code to deal with that instead declaring the bandwidth limit to be 0 MB/s (and thus preventing any planes from being enabled). Also shrink the max_bw sturct a bit while at it, and change the deratedbw type to unsigned since the code returns the bw as an unsigned int. Since we now keep track of how many qgv points we got from pcode we can drop the earlier check added for the "pcode doesn't support the memory subsystem query" case. Cc: felix.j.degrood@intel.com Cc: Mark Janes Cc: Matt Roper Cc: Clint Taylor Fixes: c457d9cf256e ("drm/i915: Make sure we have enough memory bandwidth on ICL") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110838 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190606124210.3482-1-ville.syrjala@linux.intel.com Reviewed-by: Matt Roper --- drivers/gpu/drm/i915/display/intel_bw.c | 15 ++++++++++----- drivers/gpu/drm/i915/i915_drv.h | 5 +++-- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 753ac3165061..7b908e10d32e 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) clpchgroup = (sa->deburst * deinterleave / num_channels) << i; bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + bi->num_qgv_points = qi.num_points; + for (j = 0; j < qi.num_points; j++) { const struct intel_qgv_point *sp = &qi.points[j]; int ct, bw; @@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) bi->deratedbw[j] = min(maxdebw, bw * 9 / 10); /* 90% */ - DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n", + DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n", i, j, bi->num_planes, bi->deratedbw[j]); } @@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, { int i; - /* Did we initialize the bw limits successfully? */ - if (dev_priv->max_bw[0].num_planes == 0) - return UINT_MAX; - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { const struct intel_bw_info *bi = &dev_priv->max_bw[i]; + /* + * Pcode will not expose all QGV points when + * SAGV is forced to off/min/med/max. + */ + if (qgv_point >= bi->num_qgv_points) + return UINT_MAX; + if (num_planes >= bi->num_planes) return bi->deratedbw[qgv_point]; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a8587e4be987..ca3afe6e95b5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1673,8 +1673,9 @@ struct drm_i915_private { } dram_info; struct intel_bw_info { - int num_planes; - int deratedbw[3]; + unsigned int deratedbw[3]; /* for each QGV point */ + u8 num_qgv_points; + u8 num_planes; } max_bw[6]; struct drm_private_obj bw_obj; -- cgit v1.2.3 From 4a49c2bf9ee45e7475c4c2dec74a3c034345aa5b Mon Sep 17 00:00:00 2001 From: Aditya Swarup Date: Thu, 27 Jun 2019 15:07:07 -0700 Subject: drm/i915: Use port clock to set correct N value MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use port_clock to check the clock values in n/cts lookup table instead of crtc_clock. As port_clock is already adjusted based on color mode set (8 bit or deep color), this will help in checking clock values for deep color modes from n/cts lookup table. Signed-off-by: Aditya Swarup Cc: Clint Taylor Cc: Jani Nikula Cc: Ville Syrjälä Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190627220708.31700-1-aditya.swarup@intel.com --- drivers/gpu/drm/i915/display/intel_audio.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 840daff12246..156d524c29d9 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -212,13 +212,11 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state, int rate) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->base.adjusted_mode; int i; for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) { if (rate == hdmi_aud_ncts[i].sample_rate && - adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) { + crtc_state->port_clock == hdmi_aud_ncts[i].clock) { return hdmi_aud_ncts[i].n; } } -- cgit v1.2.3 From 2c291417c1646a8651dd9c8db9ad1ce4ff2150f7 Mon Sep 17 00:00:00 2001 From: Aditya Swarup Date: Thu, 27 Jun 2019 15:07:08 -0700 Subject: drm/i915: Add N & CTS values for 10/12 bit deep color MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adding N & CTS values for 10/12 bit deep color from Appendix C table in HDMI 2.0 spec. The correct values for N is not chosen automatically by hardware for deep color modes. v2: Remove unnecessary initialization of size Signed-off-by: Aditya Swarup Cc: Clint Taylor Cc: Jani Nikula Cc: Ville Syrjälä Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190627220708.31700-2-aditya.swarup@intel.com --- drivers/gpu/drm/i915/display/intel_audio.c | 79 +++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 156d524c29d9..c8fd35a7ca42 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -72,6 +72,13 @@ struct dp_aud_n_m { u16 n; }; +struct hdmi_aud_ncts { + int sample_rate; + int clock; + int n; + int cts; +}; + /* Values according to DP 1.4 Table 2-104 */ static const struct dp_aud_n_m dp_aud_n_m[] = { { 32000, LC_162M, 1024, 10125 }, @@ -148,12 +155,7 @@ static const struct { #define TMDS_594M 594000 #define TMDS_593M 593407 -static const struct { - int sample_rate; - int clock; - int n; - int cts; -} hdmi_aud_ncts[] = { +static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = { { 32000, TMDS_296M, 5824, 421875 }, { 32000, TMDS_297M, 3072, 222750 }, { 32000, TMDS_593M, 5824, 843750 }, @@ -184,6 +186,49 @@ static const struct { { 192000, TMDS_594M, 24576, 594000 }, }; +/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/ +/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/ +#define TMDS_371M 371250 +#define TMDS_370M 370878 + +static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = { + { 32000, TMDS_370M, 5824, 527344 }, + { 32000, TMDS_371M, 6144, 556875 }, + { 44100, TMDS_370M, 8918, 585938 }, + { 44100, TMDS_371M, 4704, 309375 }, + { 88200, TMDS_370M, 17836, 585938 }, + { 88200, TMDS_371M, 9408, 309375 }, + { 176400, TMDS_370M, 35672, 585938 }, + { 176400, TMDS_371M, 18816, 309375 }, + { 48000, TMDS_370M, 11648, 703125 }, + { 48000, TMDS_371M, 5120, 309375 }, + { 96000, TMDS_370M, 23296, 703125 }, + { 96000, TMDS_371M, 10240, 309375 }, + { 192000, TMDS_370M, 46592, 703125 }, + { 192000, TMDS_371M, 20480, 309375 }, +}; + +/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/ +#define TMDS_445_5M 445500 +#define TMDS_445M 445054 + +static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = { + { 32000, TMDS_445M, 5824, 632813 }, + { 32000, TMDS_445_5M, 4096, 445500 }, + { 44100, TMDS_445M, 8918, 703125 }, + { 44100, TMDS_445_5M, 4704, 371250 }, + { 88200, TMDS_445M, 17836, 703125 }, + { 88200, TMDS_445_5M, 9408, 371250 }, + { 176400, TMDS_445M, 35672, 703125 }, + { 176400, TMDS_445_5M, 18816, 371250 }, + { 48000, TMDS_445M, 5824, 421875 }, + { 48000, TMDS_445_5M, 5120, 371250 }, + { 96000, TMDS_445M, 11648, 421875 }, + { 96000, TMDS_445_5M, 10240, 371250 }, + { 192000, TMDS_445M, 23296, 421875 }, + { 192000, TMDS_445_5M, 20480, 371250 }, +}; + /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state) { @@ -212,12 +257,24 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state, int rate) { - int i; + const struct hdmi_aud_ncts *hdmi_ncts_table; + int i, size; + + if (crtc_state->pipe_bpp == 36) { + hdmi_ncts_table = hdmi_aud_ncts_36bpp; + size = ARRAY_SIZE(hdmi_aud_ncts_36bpp); + } else if (crtc_state->pipe_bpp == 30) { + hdmi_ncts_table = hdmi_aud_ncts_30bpp; + size = ARRAY_SIZE(hdmi_aud_ncts_30bpp); + } else { + hdmi_ncts_table = hdmi_aud_ncts_24bpp; + size = ARRAY_SIZE(hdmi_aud_ncts_24bpp); + } - for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) { - if (rate == hdmi_aud_ncts[i].sample_rate && - crtc_state->port_clock == hdmi_aud_ncts[i].clock) { - return hdmi_aud_ncts[i].n; + for (i = 0; i < size; i++) { + if (rate == hdmi_ncts_table[i].sample_rate && + crtc_state->port_clock == hdmi_ncts_table[i].clock) { + return hdmi_ncts_table[i].n; } } return 0; -- cgit v1.2.3 From 068610895ebd4bd86f496f01eb7b97e56d7269b2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 18:19:12 +0100 Subject: drm/i915/gtt: Defer the free for alloc error paths If we hit an error while allocating the page tables, we have to unwind the incomplete updates, and wish to free the unused pd. However, we are not allowed to be hoding the spinlock at that point, and so must use the later free to defer it until after we drop the lock. <3> [414.363795] BUG: sleeping function called from invalid context at drivers/gpu/drm/i915/i915_gem_gtt.c:472 <3> [414.364167] in_atomic(): 1, irqs_disabled(): 0, pid: 3905, name: i915_selftest <4> [414.364406] 3 locks held by i915_selftest/3905: <4> [414.364408] #0: 0000000034fe8aa8 (&dev->mutex){....}, at: device_driver_attach+0x18/0x50 <4> [414.364415] #1: 000000006bd8a560 (&dev->struct_mutex){+.+.}, at: igt_ctx_exec+0xb7/0x410 [i915] <4> [414.364476] #2: 000000003dfdc766 (&(&pd->lock)->rlock){+.+.}, at: gen8_ppgtt_alloc_pdp+0x448/0x540 [i915] <3> [414.364529] Preemption disabled at: <4> [414.364530] [<0000000000000000>] 0x0 <4> [414.364696] CPU: 0 PID: 3905 Comm: i915_selftest Tainted: G U 5.2.0-rc7-CI-CI_DRM_6403+ #1 <4> [414.364698] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.1-0-g8891697-prebuilt.qemu-project.org 04/01/2014 <4> [414.364699] Call Trace: <4> [414.364704] dump_stack+0x67/0x9b <4> [414.364708] ___might_sleep+0x167/0x250 <4> [414.364777] vm_free_page+0x24/0xc0 [i915] <4> [414.364852] free_pd+0xf/0x20 [i915] <4> [414.364897] gen8_ppgtt_alloc_pdp+0x489/0x540 [i915] <4> [414.364946] gen8_ppgtt_alloc_4lvl+0x8e/0x2e0 [i915] <4> [414.364992] ppgtt_bind_vma+0x2e/0x60 [i915] <4> [414.365039] i915_vma_bind+0xe8/0x2c0 [i915] <4> [414.365088] __i915_vma_do_pin+0xa1/0xd20 [i915] Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111050 Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190703171913.16585-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9e76347e039e..1065753e86fb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1489,7 +1489,8 @@ unwind_pd: gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); GEM_BUG_ON(!atomic_read(&pdp->used)); atomic_dec(&pdp->used); - free_pd(vm, pd); + GEM_BUG_ON(alloc); + alloc = pd; /* defer the free to after the lock */ } spin_unlock(&pdp->lock); unwind: @@ -1558,7 +1559,8 @@ unwind_pdp: spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - free_pd(vm, pdp); + GEM_BUG_ON(alloc); + alloc = pdp; /* defer the free until after the lock */ } spin_unlock(&pml4->lock); unwind: -- cgit v1.2.3 From 2006058e9988421a113e8edc004a8e0eae1a6d3f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 10:19:25 +0100 Subject: drm/i915: Move the renderstate setup under gt/ The render state is used to initialise the default RCS context, and only used during early setup from within the gt code. As such, it makes a good candidate for placing within gt/, even if it is not yet entirely clean of our GEM heritage. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190704091925.7391-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 14 +- drivers/gpu/drm/i915/gt/gen6_renderstate.c | 315 ++++++++ drivers/gpu/drm/i915/gt/gen7_renderstate.c | 279 +++++++ drivers/gpu/drm/i915/gt/gen8_renderstate.c | 983 +++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/gen9_renderstate.c | 999 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 6 +- drivers/gpu/drm/i915/gt/intel_renderstate.c | 234 ++++++ drivers/gpu/drm/i915/gt/intel_renderstate.h | 51 ++ drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 7 +- drivers/gpu/drm/i915/i915_gem_render_state.c | 235 ------ drivers/gpu/drm/i915/i915_gem_render_state.h | 31 - drivers/gpu/drm/i915/intel_renderstate.h | 47 -- drivers/gpu/drm/i915/intel_renderstate_gen6.c | 315 -------- drivers/gpu/drm/i915/intel_renderstate_gen7.c | 279 ------- drivers/gpu/drm/i915/intel_renderstate_gen8.c | 983 ------------------------- drivers/gpu/drm/i915/intel_renderstate_gen9.c | 999 -------------------------- 16 files changed, 2874 insertions(+), 2903 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/gen6_renderstate.c create mode 100644 drivers/gpu/drm/i915/gt/gen7_renderstate.c create mode 100644 drivers/gpu/drm/i915/gt/gen8_renderstate.c create mode 100644 drivers/gpu/drm/i915/gt/gen9_renderstate.c create mode 100644 drivers/gpu/drm/i915/gt/intel_renderstate.c create mode 100644 drivers/gpu/drm/i915/gt/intel_renderstate.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_render_state.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_render_state.h delete mode 100644 drivers/gpu/drm/i915/intel_renderstate.h delete mode 100644 drivers/gpu/drm/i915/intel_renderstate_gen6.c delete mode 100644 drivers/gpu/drm/i915/intel_renderstate_gen7.c delete mode 100644 drivers/gpu/drm/i915/intel_renderstate_gen8.c delete mode 100644 drivers/gpu/drm/i915/intel_renderstate_gen9.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 82c49ad16361..5266dbeab01f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -78,12 +78,19 @@ gt-y += \ gt/intel_gt_pm.o \ gt/intel_hangcheck.o \ gt/intel_lrc.o \ + gt/intel_renderstate.o \ gt/intel_reset.o \ gt/intel_ringbuffer.o \ gt/intel_mocs.o \ gt/intel_sseu.o \ gt/intel_timeline.o \ gt/intel_workarounds.o +# autogenerated null render state +gt-y += \ + gt/gen6_renderstate.o \ + gt/gen7_renderstate.o \ + gt/gen8_renderstate.o \ + gt/gen9_renderstate.o gt-$(CONFIG_DRM_I915_SELFTEST) += \ gt/mock_engine.o i915-y += $(gt-y) @@ -123,7 +130,6 @@ i915-y += \ i915_gem_fence_reg.o \ i915_gem_gtt.o \ i915_gem.o \ - i915_gem_render_state.o \ i915_globals.o \ i915_query.o \ i915_request.o \ @@ -144,12 +150,6 @@ i915-y += intel_uc.o \ intel_huc.o \ intel_huc_fw.o -# autogenerated null render state -i915-y += intel_renderstate_gen6.o \ - intel_renderstate_gen7.o \ - intel_renderstate_gen8.o \ - intel_renderstate_gen9.o - # modesetting core code obj-y += display/ i915-y += \ diff --git a/drivers/gpu/drm/i915/gt/gen6_renderstate.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c new file mode 100644 index 000000000000..11c8e7b3dd7c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c @@ -0,0 +1,315 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.8-220-g01153e7 + */ + +#include "intel_renderstate.h" + +static const u32 gen6_null_state_relocs[] = { + 0x00000020, + 0x00000024, + 0x0000002c, + 0x000001e0, + 0x000001e4, + -1, +}; + +static const u32 gen6_null_state_batch[] = { + 0x69040000, + 0x790d0001, + 0x00000000, + 0x00000000, + 0x78180000, + 0x00000001, + 0x61010008, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, + 0x61020000, + 0x00000000, + 0x78050001, + 0x00000018, + 0x00000000, + 0x780d1002, + 0x00000000, + 0x00000000, + 0x00000420, + 0x78150003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78100004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78160003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78110005, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78170003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79050005, + 0xe0040000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79100000, + 0x00000000, + 0x79000002, + 0xffffffff, + 0x00000000, + 0x00000000, + 0x780e0002, + 0x00000441, + 0x00000401, + 0x00000401, + 0x78021002, + 0x00000000, + 0x00000000, + 0x00000400, + 0x78130012, + 0x00400810, + 0x00000000, + 0x20000000, + 0x04000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78140007, + 0x00000280, + 0x08080000, + 0x00000000, + 0x00060000, + 0x4e080002, + 0x00100400, + 0x00000000, + 0x00000000, + 0x78090005, + 0x02000000, + 0x22220000, + 0x02f60000, + 0x11330000, + 0x02850004, + 0x11220000, + 0x78011002, + 0x00000000, + 0x00000000, + 0x00000200, + 0x78080003, + 0x00002000, + 0x00000448, /* reloc */ + 0x00000448, /* reloc */ + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000220, /* state start */ + 0x00000240, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0060005a, + 0x204077be, + 0x000000c0, + 0x008d0040, + 0x0060005a, + 0x206077be, + 0x000000c0, + 0x008d0080, + 0x0060005a, + 0x208077be, + 0x000000d0, + 0x008d0040, + 0x0060005a, + 0x20a077be, + 0x000000d0, + 0x008d0080, + 0x00000201, + 0x20080061, + 0x00000000, + 0x00000000, + 0x00600001, + 0x20200022, + 0x008d0000, + 0x00000000, + 0x02800031, + 0x21c01cc9, + 0x00000020, + 0x0a8a0001, + 0x00600001, + 0x204003be, + 0x008d01c0, + 0x00000000, + 0x00600001, + 0x206003be, + 0x008d01e0, + 0x00000000, + 0x00600001, + 0x208003be, + 0x008d0200, + 0x00000000, + 0x00600001, + 0x20a003be, + 0x008d0220, + 0x00000000, + 0x00600001, + 0x20c003be, + 0x008d0240, + 0x00000000, + 0x00600001, + 0x20e003be, + 0x008d0260, + 0x00000000, + 0x00600001, + 0x210003be, + 0x008d0280, + 0x00000000, + 0x00600001, + 0x212003be, + 0x008d02a0, + 0x00000000, + 0x05800031, + 0x24001cc8, + 0x00000040, + 0x90019000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0000007e, + 0x00000000, + 0x00000000, + 0x00000000, + 0x30000000, + 0x00000124, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0xf99a130c, + 0x799a130c, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x80000031, + 0x00000003, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(6); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderstate.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c new file mode 100644 index 000000000000..655180646152 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c @@ -0,0 +1,279 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.8-220-g01153e7 + */ + +#include "intel_renderstate.h" + +static const u32 gen7_null_state_relocs[] = { + 0x0000000c, + 0x00000010, + 0x00000018, + 0x000001ec, + -1, +}; + +static const u32 gen7_null_state_batch[] = { + 0x69040000, + 0x61010008, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, + 0x790d0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78180000, + 0x00000001, + 0x79160000, + 0x00000008, + 0x78300000, + 0x02010040, + 0x78310000, + 0x04000000, + 0x78320000, + 0x04000000, + 0x78330000, + 0x02000000, + 0x78100004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781b0005, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781c0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781d0004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78110005, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78210000, + 0x00000000, + 0x78130005, + 0x00000000, + 0x20000000, + 0x04000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78140001, + 0x20000800, + 0x00000000, + 0x781e0001, + 0x00000000, + 0x00000000, + 0x78050005, + 0xe0040000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78040001, + 0x00000000, + 0x00000000, + 0x78240000, + 0x00000240, + 0x78230000, + 0x00000260, + 0x782f0000, + 0x00000280, + 0x781f000c, + 0x00400810, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78200006, + 0x000002c0, + 0x08080000, + 0x00000000, + 0x28000402, + 0x00060000, + 0x00000000, + 0x00000000, + 0x78090005, + 0x02000000, + 0x22220000, + 0x02f60000, + 0x11230000, + 0x02f60004, + 0x11230000, + 0x78080003, + 0x00006008, + 0x00000340, /* reloc */ + 0xffffffff, + 0x00000000, + 0x782a0000, + 0x00000360, + 0x79000002, + 0xffffffff, + 0x00000000, + 0x00000000, + 0x7b000005, + 0x0000000f, + 0x00000003, + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000031, /* state start */ + 0x00000003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0xf99a130c, + 0x799a130c, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000492, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0080005a, + 0x2e2077bd, + 0x000000c0, + 0x008d0040, + 0x0080005a, + 0x2e6077bd, + 0x000000d0, + 0x008d0040, + 0x02800031, + 0x21801fa9, + 0x008d0e20, + 0x08840001, + 0x00800001, + 0x2e2003bd, + 0x008d0180, + 0x00000000, + 0x00800001, + 0x2e6003bd, + 0x008d01c0, + 0x00000000, + 0x00800001, + 0x2ea003bd, + 0x008d0200, + 0x00000000, + 0x00800001, + 0x2ee003bd, + 0x008d0240, + 0x00000000, + 0x05800031, + 0x20001fa8, + 0x008d0e20, + 0x90031000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000380, + 0x000003a0, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(7); diff --git a/drivers/gpu/drm/i915/gt/gen8_renderstate.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c new file mode 100644 index 000000000000..95288a34c15d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c @@ -0,0 +1,983 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.8-220-g01153e7 + */ + +#include "intel_renderstate.h" + +static const u32 gen8_null_state_relocs[] = { + 0x00000798, + 0x000007a4, + 0x000007ac, + 0x000007bc, + -1, +}; + +static const u32 gen8_null_state_batch[] = { + 0x7a000004, + 0x01000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x69040000, + 0x78140000, + 0x04000000, + 0x7820000a, + 0x00000000, + 0x00000000, + 0x80000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78130002, + 0x00000000, + 0x00000000, + 0x02001808, + 0x781f0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78510009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78100007, + 0x00000000, + 0x00000000, + 0x00010000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781b0007, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000800, + 0x00000000, + 0x78110008, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781e0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781d0007, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78500003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781c0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780c0000, + 0x00000000, + 0x78520003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78300000, + 0x08010040, + 0x78310000, + 0x1e000000, + 0x78320000, + 0x1e000000, + 0x78330000, + 0x1e000000, + 0x79190002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791a0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791b0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79120000, + 0x00000000, + 0x79130000, + 0x00000000, + 0x79140000, + 0x00000000, + 0x79150000, + 0x00000000, + 0x79160000, + 0x00000000, + 0x78150009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78190009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781a0009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78160009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78170009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78490001, + 0x00000000, + 0x00000000, + 0x784a0000, + 0x00000000, + 0x784b0000, + 0x00000004, + 0x79170101, + 0x00000000, + 0x00000080, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x20000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x40000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x60000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x6101000e, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00001001, + 0x00001001, + 0x00000001, + 0x00001001, + 0x61020001, + 0x00000000, + 0x00000000, + 0x79000002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78050006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x40000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x80000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0xc0000000, + 0x00000000, + 0x00000000, + 0x79080001, + 0x00000000, + 0x00000000, + 0x790a0001, + 0x00000000, + 0x00000000, + 0x78060003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78070003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78040001, + 0x00000000, + 0x00000000, + 0x79110000, + 0x00000000, + 0x780d0000, + 0x00000000, + 0x79060000, + 0x00000000, + 0x7907001f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7902000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x790c000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780a0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78080083, + 0x00004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x04004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x08004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x10004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x14004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x18004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x1c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x20004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x24004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x28004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x2c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x30004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x34004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x38004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x3c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x40004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x44004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x48004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x4c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x50004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x54004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x58004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x5c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x60004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x64004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x68004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x6c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x70004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x74004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x80004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78090043, + 0x02000000, + 0x22220000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x680b0001, + 0x78260000, + 0x00000000, + 0x78270000, + 0x00000000, + 0x78280000, + 0x00000000, + 0x78290000, + 0x00000000, + 0x782a0000, + 0x00000000, + 0x780e0000, + 0x00000dc1, + 0x78240000, + 0x00000e01, + 0x784f0000, + 0x80000100, + 0x784d0000, + 0x40000000, + 0x782b0000, + 0x00000000, + 0x782c0000, + 0x00000000, + 0x782d0000, + 0x00000000, + 0x782e0000, + 0x00000000, + 0x782f0000, + 0x00000000, + 0x780f0000, + 0x00000000, + 0x78230000, + 0x00000e60, + 0x78210000, + 0x00000e80, + 0x7b000005, + 0x00000004, + 0x00000001, + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state start */ + 0x00000000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(8); diff --git a/drivers/gpu/drm/i915/gt/gen9_renderstate.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c new file mode 100644 index 000000000000..7d3ac02f0177 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c @@ -0,0 +1,999 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Generated by: intel-gpu-tools-1.19-177-g68e2eab2 + */ + +#include "intel_renderstate.h" + +static const u32 gen9_null_state_relocs[] = { + 0x000007a8, + 0x000007b4, + 0x000007bc, + 0x000007cc, + -1, +}; + +static const u32 gen9_null_state_batch[] = { + 0x7a000004, + 0x01000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x69040300, + 0x78140000, + 0x04000000, + 0x7820000a, + 0x00000000, + 0x00000000, + 0x80000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78130002, + 0x00000000, + 0x00000000, + 0x02001808, + 0x781f0004, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78510009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78100007, + 0x00000000, + 0x00000000, + 0x00010000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781b0007, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000800, + 0x00000000, + 0x78110008, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781e0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781d0009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78120002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78500003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781c0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780c0000, + 0x00000000, + 0x78520003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78300000, + 0x08010040, + 0x78310000, + 0x1e000000, + 0x78320000, + 0x1e000000, + 0x78330000, + 0x1e000000, + 0x79190002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791a0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x791b0002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79120000, + 0x00000000, + 0x79130000, + 0x00000000, + 0x79140000, + 0x00000000, + 0x79150000, + 0x00000000, + 0x79160000, + 0x00000000, + 0x78150009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78190009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x781a0009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78160009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78170009, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78490001, + 0x00000000, + 0x00000000, + 0x784a0000, + 0x00000000, + 0x784b0000, + 0x00000004, + 0x79170101, + 0x00000000, + 0x00000080, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x20000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x40000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79180006, + 0x60000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x61010011, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000001, /* reloc */ + 0x00000000, + 0x00001001, + 0x00001001, + 0x00000001, + 0x00001001, + 0x00000000, + 0x00000000, + 0x00000000, + 0x61020001, + 0x00000000, + 0x00000000, + 0x79000002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78050006, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x00000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x40000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0x80000000, + 0x00000000, + 0x00000000, + 0x79040002, + 0xc0000000, + 0x00000000, + 0x00000000, + 0x79080001, + 0x00000000, + 0x00000000, + 0x790a0001, + 0x00000000, + 0x00000000, + 0x78060003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78070003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78040001, + 0x00000000, + 0x00000000, + 0x79110000, + 0x00000000, + 0x780d0000, + 0x00000000, + 0x79060000, + 0x00000000, + 0x7907001f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7902000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x790c000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x780a0003, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78080083, + 0x00004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x04004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x08004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x0c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x10004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x14004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x18004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x1c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x20004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x24004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x28004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x2c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x30004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x34004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x38004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x3c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x40004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x44004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x48004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x4c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x50004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x54004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x58004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x5c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x60004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x64004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x68004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x6c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x70004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x74004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x7c004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x80004000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78090043, + 0x02000000, + 0x22220000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x78550003, + 0x0000000f, + 0x00000000, + 0x00000000, + 0x00000000, + 0x680b0001, + 0x780e0000, + 0x00000e01, + 0x78240000, + 0x00000e41, + 0x784f0000, + 0x80000100, + 0x784d0000, + 0x40000000, + 0x782b0000, + 0x00000000, + 0x782c0000, + 0x00000000, + 0x782d0000, + 0x00000000, + 0x782e0000, + 0x00000000, + 0x782f0000, + 0x00000000, + 0x780f0000, + 0x00000000, + 0x78230000, + 0x00000ea0, + 0x78210000, + 0x00000ec0, + 0x78260000, + 0x00000000, + 0x78270000, + 0x00000000, + 0x78280000, + 0x00000000, + 0x78290000, + 0x00000000, + 0x782a0000, + 0x00000000, + 0x7b000005, + 0x00000004, + 0x00000001, + 0x00000000, + 0x00000001, + 0x00000000, + 0x00000000, + 0x05000000, /* cmds end */ + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state start */ + 0x00000000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x3f800000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, + 0x00000000, /* state end */ +}; + +RO_RENDERSTATE(9); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 1e85e04c58c4..f5b09b29f50e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -135,13 +135,13 @@ #include "gem/i915_gem_context.h" -#include "gt/intel_gt.h" #include "i915_drv.h" -#include "i915_gem_render_state.h" #include "i915_vgpu.h" #include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" +#include "intel_renderstate.h" #include "intel_reset.h" #include "intel_workarounds.h" @@ -2677,7 +2677,7 @@ static int gen8_init_rcs_context(struct i915_request *rq) if (ret) DRM_ERROR("MOCS failed to program: expect performance issues.\n"); - return i915_gem_render_state_emit(rq); + return intel_renderstate_emit(rq); } static void execlists_park(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c new file mode 100644 index 000000000000..06a8dc40b19f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -0,0 +1,234 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mika Kuoppala + * + */ + +#include "i915_drv.h" +#include "intel_renderstate.h" + +struct intel_renderstate { + const struct intel_renderstate_rodata *rodata; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 batch_offset; + u32 batch_size; + u32 aux_offset; + u32 aux_size; +}; + +static const struct intel_renderstate_rodata * +render_state_get_rodata(const struct intel_engine_cs *engine) +{ + if (engine->id != RCS0) + return NULL; + + switch (INTEL_GEN(engine->i915)) { + case 6: + return &gen6_null_state; + case 7: + return &gen7_null_state; + case 8: + return &gen8_null_state; + case 9: + return &gen9_null_state; + } + + return NULL; +} + +/* + * Macro to add commands to auxiliary batch. + * This macro only checks for page overflow before inserting the commands, + * this is sufficient as the null state generator makes the final batch + * with two passes to build command and state separately. At this point + * the size of both are known and it compacts them by relocating the state + * right after the commands taking care of alignment so we should sufficient + * space below them for adding new commands. + */ +#define OUT_BATCH(batch, i, val) \ + do { \ + if ((i) >= PAGE_SIZE / sizeof(u32)) \ + goto err; \ + (batch)[(i)++] = (val); \ + } while(0) + +static int render_state_setup(struct intel_renderstate *so, + struct drm_i915_private *i915) +{ + const struct intel_renderstate_rodata *rodata = so->rodata; + unsigned int i = 0, reloc_index = 0; + unsigned int needs_clflush; + u32 *d; + int ret; + + ret = i915_gem_object_prepare_write(so->obj, &needs_clflush); + if (ret) + return ret; + + d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0)); + + while (i < rodata->batch_items) { + u32 s = rodata->batch[i]; + + if (i * 4 == rodata->reloc[reloc_index]) { + u64 r = s + so->vma->node.start; + s = lower_32_bits(r); + if (HAS_64BIT_RELOC(i915)) { + if (i + 1 >= rodata->batch_items || + rodata->batch[i + 1] != 0) + goto err; + + d[i++] = s; + s = upper_32_bits(r); + } + + reloc_index++; + } + + d[i++] = s; + } + + if (rodata->reloc[reloc_index] != -1) { + DRM_ERROR("only %d relocs resolved\n", reloc_index); + goto err; + } + + so->batch_offset = i915_ggtt_offset(so->vma); + so->batch_size = rodata->batch_items * sizeof(u32); + + while (i % CACHELINE_DWORDS) + OUT_BATCH(d, i, MI_NOOP); + + so->aux_offset = i * sizeof(u32); + + if (HAS_POOLED_EU(i915)) { + /* + * We always program 3x6 pool config but depending upon which + * subslice is disabled HW drops down to appropriate config + * shown below. + * + * In the below table 2x6 config always refers to + * fused-down version, native 2x6 is not available and can + * be ignored + * + * SNo subslices config eu pool configuration + * ----------------------------------------------------------- + * 1 3 subslices enabled (3x6) - 0x00777000 (9+9) + * 2 ss0 disabled (2x6) - 0x00777000 (3+9) + * 3 ss1 disabled (2x6) - 0x00770000 (6+6) + * 4 ss2 disabled (2x6) - 0x00007000 (9+3) + */ + u32 eu_pool_config = 0x00777000; + + OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE); + OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE); + OUT_BATCH(d, i, eu_pool_config); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + OUT_BATCH(d, i, 0); + } + + OUT_BATCH(d, i, MI_BATCH_BUFFER_END); + so->aux_size = i * sizeof(u32) - so->aux_offset; + so->aux_offset += so->batch_offset; + /* + * Since we are sending length, we need to strictly conform to + * all requirements. For Gen2 this must be a multiple of 8. + */ + so->aux_size = ALIGN(so->aux_size, 8); + + if (needs_clflush) + drm_clflush_virt_range(d, i * sizeof(u32)); + kunmap_atomic(d); + + ret = 0; +out: + i915_gem_object_finish_access(so->obj); + return ret; + +err: + kunmap_atomic(d); + ret = -EINVAL; + goto out; +} + +#undef OUT_BATCH + +int intel_renderstate_emit(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + struct intel_renderstate so = {}; /* keep the compiler happy */ + int err; + + so.rodata = render_state_get_rodata(engine); + if (!so.rodata) + return 0; + + if (so.rodata->batch_items * 4 > PAGE_SIZE) + return -EINVAL; + + so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(so.obj)) + return PTR_ERR(so.obj); + + so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(so.vma)) { + err = PTR_ERR(so.vma); + goto err_obj; + } + + err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + goto err_vma; + + err = render_state_setup(&so, rq->i915); + if (err) + goto err_unpin; + + err = engine->emit_bb_start(rq, + so.batch_offset, so.batch_size, + I915_DISPATCH_SECURE); + if (err) + goto err_unpin; + + if (so.aux_size > 8) { + err = engine->emit_bb_start(rq, + so.aux_offset, so.aux_size, + I915_DISPATCH_SECURE); + if (err) + goto err_unpin; + } + + i915_vma_lock(so.vma); + err = i915_vma_move_to_active(so.vma, rq, 0); + i915_vma_unlock(so.vma); +err_unpin: + i915_vma_unpin(so.vma); +err_vma: + i915_vma_close(so.vma); +err_obj: + i915_gem_object_put(so.obj); + return err; +} diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h new file mode 100644 index 000000000000..8d5079145054 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INTEL_RENDERSTATE_H_ +#define _INTEL_RENDERSTATE_H_ + +#include + +struct i915_request; + +struct intel_renderstate_rodata { + const u32 *reloc; + const u32 *batch; + const u32 batch_items; +}; + +#define RO_RENDERSTATE(_g) \ + const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ + .reloc = gen ## _g ## _null_state_relocs, \ + .batch = gen ## _g ## _null_state_batch, \ + .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ + } + +extern const struct intel_renderstate_rodata gen6_null_state; +extern const struct intel_renderstate_rodata gen7_null_state; +extern const struct intel_renderstate_rodata gen8_null_state; +extern const struct intel_renderstate_rodata gen9_null_state; + +int intel_renderstate_emit(struct i915_request *rq); + +#endif /* _INTEL_RENDERSTATE_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 81f9b0422e6a..f804ec35037d 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -33,12 +33,11 @@ #include "gem/i915_gem_context.h" -#include "gt/intel_gt.h" - #include "i915_drv.h" -#include "i915_gem_render_state.h" #include "i915_trace.h" #include "intel_context.h" +#include "intel_gt.h" +#include "intel_renderstate.h" #include "intel_reset.h" #include "intel_workarounds.h" @@ -813,7 +812,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq) if (ret != 0) return ret; - ret = i915_gem_render_state_emit(rq); + ret = intel_renderstate_emit(rq); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c deleted file mode 100644 index 6bda08c1e8d7..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Mika Kuoppala - * - */ - -#include "i915_drv.h" -#include "i915_gem_render_state.h" -#include "intel_renderstate.h" - -struct intel_render_state { - const struct intel_renderstate_rodata *rodata; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - u32 batch_offset; - u32 batch_size; - u32 aux_offset; - u32 aux_size; -}; - -static const struct intel_renderstate_rodata * -render_state_get_rodata(const struct intel_engine_cs *engine) -{ - if (engine->id != RCS0) - return NULL; - - switch (INTEL_GEN(engine->i915)) { - case 6: - return &gen6_null_state; - case 7: - return &gen7_null_state; - case 8: - return &gen8_null_state; - case 9: - return &gen9_null_state; - } - - return NULL; -} - -/* - * Macro to add commands to auxiliary batch. - * This macro only checks for page overflow before inserting the commands, - * this is sufficient as the null state generator makes the final batch - * with two passes to build command and state separately. At this point - * the size of both are known and it compacts them by relocating the state - * right after the commands taking care of alignment so we should sufficient - * space below them for adding new commands. - */ -#define OUT_BATCH(batch, i, val) \ - do { \ - if ((i) >= PAGE_SIZE / sizeof(u32)) \ - goto err; \ - (batch)[(i)++] = (val); \ - } while(0) - -static int render_state_setup(struct intel_render_state *so, - struct drm_i915_private *i915) -{ - const struct intel_renderstate_rodata *rodata = so->rodata; - unsigned int i = 0, reloc_index = 0; - unsigned int needs_clflush; - u32 *d; - int ret; - - ret = i915_gem_object_prepare_write(so->obj, &needs_clflush); - if (ret) - return ret; - - d = kmap_atomic(i915_gem_object_get_dirty_page(so->obj, 0)); - - while (i < rodata->batch_items) { - u32 s = rodata->batch[i]; - - if (i * 4 == rodata->reloc[reloc_index]) { - u64 r = s + so->vma->node.start; - s = lower_32_bits(r); - if (HAS_64BIT_RELOC(i915)) { - if (i + 1 >= rodata->batch_items || - rodata->batch[i + 1] != 0) - goto err; - - d[i++] = s; - s = upper_32_bits(r); - } - - reloc_index++; - } - - d[i++] = s; - } - - if (rodata->reloc[reloc_index] != -1) { - DRM_ERROR("only %d relocs resolved\n", reloc_index); - goto err; - } - - so->batch_offset = i915_ggtt_offset(so->vma); - so->batch_size = rodata->batch_items * sizeof(u32); - - while (i % CACHELINE_DWORDS) - OUT_BATCH(d, i, MI_NOOP); - - so->aux_offset = i * sizeof(u32); - - if (HAS_POOLED_EU(i915)) { - /* - * We always program 3x6 pool config but depending upon which - * subslice is disabled HW drops down to appropriate config - * shown below. - * - * In the below table 2x6 config always refers to - * fused-down version, native 2x6 is not available and can - * be ignored - * - * SNo subslices config eu pool configuration - * ----------------------------------------------------------- - * 1 3 subslices enabled (3x6) - 0x00777000 (9+9) - * 2 ss0 disabled (2x6) - 0x00777000 (3+9) - * 3 ss1 disabled (2x6) - 0x00770000 (6+6) - * 4 ss2 disabled (2x6) - 0x00007000 (9+3) - */ - u32 eu_pool_config = 0x00777000; - - OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE); - OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE); - OUT_BATCH(d, i, eu_pool_config); - OUT_BATCH(d, i, 0); - OUT_BATCH(d, i, 0); - OUT_BATCH(d, i, 0); - } - - OUT_BATCH(d, i, MI_BATCH_BUFFER_END); - so->aux_size = i * sizeof(u32) - so->aux_offset; - so->aux_offset += so->batch_offset; - /* - * Since we are sending length, we need to strictly conform to - * all requirements. For Gen2 this must be a multiple of 8. - */ - so->aux_size = ALIGN(so->aux_size, 8); - - if (needs_clflush) - drm_clflush_virt_range(d, i * sizeof(u32)); - kunmap_atomic(d); - - ret = 0; -out: - i915_gem_object_finish_access(so->obj); - return ret; - -err: - kunmap_atomic(d); - ret = -EINVAL; - goto out; -} - -#undef OUT_BATCH - -int i915_gem_render_state_emit(struct i915_request *rq) -{ - struct intel_engine_cs *engine = rq->engine; - struct intel_render_state so = {}; /* keep the compiler happy */ - int err; - - so.rodata = render_state_get_rodata(engine); - if (!so.rodata) - return 0; - - if (so.rodata->batch_items * 4 > PAGE_SIZE) - return -EINVAL; - - so.obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(so.obj)) - return PTR_ERR(so.obj); - - so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL); - if (IS_ERR(so.vma)) { - err = PTR_ERR(so.vma); - goto err_obj; - } - - err = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL | PIN_HIGH); - if (err) - goto err_vma; - - err = render_state_setup(&so, rq->i915); - if (err) - goto err_unpin; - - err = engine->emit_bb_start(rq, - so.batch_offset, so.batch_size, - I915_DISPATCH_SECURE); - if (err) - goto err_unpin; - - if (so.aux_size > 8) { - err = engine->emit_bb_start(rq, - so.aux_offset, so.aux_size, - I915_DISPATCH_SECURE); - if (err) - goto err_unpin; - } - - i915_vma_lock(so.vma); - err = i915_vma_move_to_active(so.vma, rq, 0); - i915_vma_unlock(so.vma); -err_unpin: - i915_vma_unpin(so.vma); -err_vma: - i915_vma_close(so.vma); -err_obj: - i915_gem_object_put(so.obj); - return err; -} diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h deleted file mode 100644 index 112cda8fa1a8..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_render_state.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef _I915_GEM_RENDER_STATE_H_ -#define _I915_GEM_RENDER_STATE_H_ - -struct i915_request; - -int i915_gem_render_state_emit(struct i915_request *rq); - -#endif /* _I915_GEM_RENDER_STATE_H_ */ diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h deleted file mode 100644 index 08f6fea05a2c..000000000000 --- a/drivers/gpu/drm/i915/intel_renderstate.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - -#ifndef _INTEL_RENDERSTATE_H -#define _INTEL_RENDERSTATE_H - -#include - -struct intel_renderstate_rodata { - const u32 *reloc; - const u32 *batch; - const u32 batch_items; -}; - -#define RO_RENDERSTATE(_g) \ - const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ - .reloc = gen ## _g ## _null_state_relocs, \ - .batch = gen ## _g ## _null_state_batch, \ - .batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \ - } - -extern const struct intel_renderstate_rodata gen6_null_state; -extern const struct intel_renderstate_rodata gen7_null_state; -extern const struct intel_renderstate_rodata gen8_null_state; -extern const struct intel_renderstate_rodata gen9_null_state; - -#endif /* INTEL_RENDERSTATE_H */ diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c deleted file mode 100644 index 11c8e7b3dd7c..000000000000 --- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Generated by: intel-gpu-tools-1.8-220-g01153e7 - */ - -#include "intel_renderstate.h" - -static const u32 gen6_null_state_relocs[] = { - 0x00000020, - 0x00000024, - 0x0000002c, - 0x000001e0, - 0x000001e4, - -1, -}; - -static const u32 gen6_null_state_batch[] = { - 0x69040000, - 0x790d0001, - 0x00000000, - 0x00000000, - 0x78180000, - 0x00000001, - 0x61010008, - 0x00000000, - 0x00000001, /* reloc */ - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, - 0x00000000, - 0x00000001, - 0x61020000, - 0x00000000, - 0x78050001, - 0x00000018, - 0x00000000, - 0x780d1002, - 0x00000000, - 0x00000000, - 0x00000420, - 0x78150003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78100004, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78160003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78110005, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78120002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78170003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79050005, - 0xe0040000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79100000, - 0x00000000, - 0x79000002, - 0xffffffff, - 0x00000000, - 0x00000000, - 0x780e0002, - 0x00000441, - 0x00000401, - 0x00000401, - 0x78021002, - 0x00000000, - 0x00000000, - 0x00000400, - 0x78130012, - 0x00400810, - 0x00000000, - 0x20000000, - 0x04000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78140007, - 0x00000280, - 0x08080000, - 0x00000000, - 0x00060000, - 0x4e080002, - 0x00100400, - 0x00000000, - 0x00000000, - 0x78090005, - 0x02000000, - 0x22220000, - 0x02f60000, - 0x11330000, - 0x02850004, - 0x11220000, - 0x78011002, - 0x00000000, - 0x00000000, - 0x00000200, - 0x78080003, - 0x00002000, - 0x00000448, /* reloc */ - 0x00000448, /* reloc */ - 0x00000000, - 0x05000000, /* cmds end */ - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000220, /* state start */ - 0x00000240, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0060005a, - 0x204077be, - 0x000000c0, - 0x008d0040, - 0x0060005a, - 0x206077be, - 0x000000c0, - 0x008d0080, - 0x0060005a, - 0x208077be, - 0x000000d0, - 0x008d0040, - 0x0060005a, - 0x20a077be, - 0x000000d0, - 0x008d0080, - 0x00000201, - 0x20080061, - 0x00000000, - 0x00000000, - 0x00600001, - 0x20200022, - 0x008d0000, - 0x00000000, - 0x02800031, - 0x21c01cc9, - 0x00000020, - 0x0a8a0001, - 0x00600001, - 0x204003be, - 0x008d01c0, - 0x00000000, - 0x00600001, - 0x206003be, - 0x008d01e0, - 0x00000000, - 0x00600001, - 0x208003be, - 0x008d0200, - 0x00000000, - 0x00600001, - 0x20a003be, - 0x008d0220, - 0x00000000, - 0x00600001, - 0x20c003be, - 0x008d0240, - 0x00000000, - 0x00600001, - 0x20e003be, - 0x008d0260, - 0x00000000, - 0x00600001, - 0x210003be, - 0x008d0280, - 0x00000000, - 0x00600001, - 0x212003be, - 0x008d02a0, - 0x00000000, - 0x05800031, - 0x24001cc8, - 0x00000040, - 0x90019000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0000007e, - 0x00000000, - 0x00000000, - 0x00000000, - 0x30000000, - 0x00000124, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0xf99a130c, - 0x799a130c, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x80000031, - 0x00000003, - 0x00000000, /* state end */ -}; - -RO_RENDERSTATE(6); diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c deleted file mode 100644 index 655180646152..000000000000 --- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Generated by: intel-gpu-tools-1.8-220-g01153e7 - */ - -#include "intel_renderstate.h" - -static const u32 gen7_null_state_relocs[] = { - 0x0000000c, - 0x00000010, - 0x00000018, - 0x000001ec, - -1, -}; - -static const u32 gen7_null_state_batch[] = { - 0x69040000, - 0x61010008, - 0x00000000, - 0x00000001, /* reloc */ - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, - 0x00000000, - 0x00000001, - 0x790d0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78180000, - 0x00000001, - 0x79160000, - 0x00000008, - 0x78300000, - 0x02010040, - 0x78310000, - 0x04000000, - 0x78320000, - 0x04000000, - 0x78330000, - 0x02000000, - 0x78100004, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781b0005, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781c0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781d0004, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78110005, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78120002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78210000, - 0x00000000, - 0x78130005, - 0x00000000, - 0x20000000, - 0x04000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78140001, - 0x20000800, - 0x00000000, - 0x781e0001, - 0x00000000, - 0x00000000, - 0x78050005, - 0xe0040000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78040001, - 0x00000000, - 0x00000000, - 0x78240000, - 0x00000240, - 0x78230000, - 0x00000260, - 0x782f0000, - 0x00000280, - 0x781f000c, - 0x00400810, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78200006, - 0x000002c0, - 0x08080000, - 0x00000000, - 0x28000402, - 0x00060000, - 0x00000000, - 0x00000000, - 0x78090005, - 0x02000000, - 0x22220000, - 0x02f60000, - 0x11230000, - 0x02f60004, - 0x11230000, - 0x78080003, - 0x00006008, - 0x00000340, /* reloc */ - 0xffffffff, - 0x00000000, - 0x782a0000, - 0x00000360, - 0x79000002, - 0xffffffff, - 0x00000000, - 0x00000000, - 0x7b000005, - 0x0000000f, - 0x00000003, - 0x00000000, - 0x00000001, - 0x00000000, - 0x00000000, - 0x05000000, /* cmds end */ - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000031, /* state start */ - 0x00000003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0xf99a130c, - 0x799a130c, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000492, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0080005a, - 0x2e2077bd, - 0x000000c0, - 0x008d0040, - 0x0080005a, - 0x2e6077bd, - 0x000000d0, - 0x008d0040, - 0x02800031, - 0x21801fa9, - 0x008d0e20, - 0x08840001, - 0x00800001, - 0x2e2003bd, - 0x008d0180, - 0x00000000, - 0x00800001, - 0x2e6003bd, - 0x008d01c0, - 0x00000000, - 0x00800001, - 0x2ea003bd, - 0x008d0200, - 0x00000000, - 0x00800001, - 0x2ee003bd, - 0x008d0240, - 0x00000000, - 0x05800031, - 0x20001fa8, - 0x008d0e20, - 0x90031000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000380, - 0x000003a0, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, /* state end */ -}; - -RO_RENDERSTATE(7); diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c deleted file mode 100644 index 95288a34c15d..000000000000 --- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c +++ /dev/null @@ -1,983 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Generated by: intel-gpu-tools-1.8-220-g01153e7 - */ - -#include "intel_renderstate.h" - -static const u32 gen8_null_state_relocs[] = { - 0x00000798, - 0x000007a4, - 0x000007ac, - 0x000007bc, - -1, -}; - -static const u32 gen8_null_state_batch[] = { - 0x7a000004, - 0x01000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x69040000, - 0x78140000, - 0x04000000, - 0x7820000a, - 0x00000000, - 0x00000000, - 0x80000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78130002, - 0x00000000, - 0x00000000, - 0x02001808, - 0x781f0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78510009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78100007, - 0x00000000, - 0x00000000, - 0x00010000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781b0007, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000800, - 0x00000000, - 0x78110008, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781e0003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781d0007, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78120002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78500003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781c0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x780c0000, - 0x00000000, - 0x78520003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78300000, - 0x08010040, - 0x78310000, - 0x1e000000, - 0x78320000, - 0x1e000000, - 0x78330000, - 0x1e000000, - 0x79190002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x791a0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x791b0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79120000, - 0x00000000, - 0x79130000, - 0x00000000, - 0x79140000, - 0x00000000, - 0x79150000, - 0x00000000, - 0x79160000, - 0x00000000, - 0x78150009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78190009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781a0009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78160009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78170009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78490001, - 0x00000000, - 0x00000000, - 0x784a0000, - 0x00000000, - 0x784b0000, - 0x00000004, - 0x79170101, - 0x00000000, - 0x00000080, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x20000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x40000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x60000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x6101000e, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00001001, - 0x00001001, - 0x00000001, - 0x00001001, - 0x61020001, - 0x00000000, - 0x00000000, - 0x79000002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78050006, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0x40000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0x80000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0xc0000000, - 0x00000000, - 0x00000000, - 0x79080001, - 0x00000000, - 0x00000000, - 0x790a0001, - 0x00000000, - 0x00000000, - 0x78060003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78070003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78040001, - 0x00000000, - 0x00000000, - 0x79110000, - 0x00000000, - 0x780d0000, - 0x00000000, - 0x79060000, - 0x00000000, - 0x7907001f, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x7902000f, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x790c000f, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x780a0003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78080083, - 0x00004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x04004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x08004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x10004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x14004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x18004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x1c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x20004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x24004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x28004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x2c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x30004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x34004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x38004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x3c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x40004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x44004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x48004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x4c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x50004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x54004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x58004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x5c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x60004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x64004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x68004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x6c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x70004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x74004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x7c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x80004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78090043, - 0x02000000, - 0x22220000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x680b0001, - 0x78260000, - 0x00000000, - 0x78270000, - 0x00000000, - 0x78280000, - 0x00000000, - 0x78290000, - 0x00000000, - 0x782a0000, - 0x00000000, - 0x780e0000, - 0x00000dc1, - 0x78240000, - 0x00000e01, - 0x784f0000, - 0x80000100, - 0x784d0000, - 0x40000000, - 0x782b0000, - 0x00000000, - 0x782c0000, - 0x00000000, - 0x782d0000, - 0x00000000, - 0x782e0000, - 0x00000000, - 0x782f0000, - 0x00000000, - 0x780f0000, - 0x00000000, - 0x78230000, - 0x00000e60, - 0x78210000, - 0x00000e80, - 0x7b000005, - 0x00000004, - 0x00000001, - 0x00000000, - 0x00000001, - 0x00000000, - 0x00000000, - 0x05000000, /* cmds end */ - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, /* state start */ - 0x00000000, - 0x3f800000, - 0x3f800000, - 0x3f800000, - 0x3f800000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, /* state end */ -}; - -RO_RENDERSTATE(8); diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c deleted file mode 100644 index 7d3ac02f0177..000000000000 --- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c +++ /dev/null @@ -1,999 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Generated by: intel-gpu-tools-1.19-177-g68e2eab2 - */ - -#include "intel_renderstate.h" - -static const u32 gen9_null_state_relocs[] = { - 0x000007a8, - 0x000007b4, - 0x000007bc, - 0x000007cc, - -1, -}; - -static const u32 gen9_null_state_batch[] = { - 0x7a000004, - 0x01000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x69040300, - 0x78140000, - 0x04000000, - 0x7820000a, - 0x00000000, - 0x00000000, - 0x80000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78130002, - 0x00000000, - 0x00000000, - 0x02001808, - 0x781f0004, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78510009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78100007, - 0x00000000, - 0x00000000, - 0x00010000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781b0007, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000800, - 0x00000000, - 0x78110008, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781e0003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781d0009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78120002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78500003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781c0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x780c0000, - 0x00000000, - 0x78520003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78300000, - 0x08010040, - 0x78310000, - 0x1e000000, - 0x78320000, - 0x1e000000, - 0x78330000, - 0x1e000000, - 0x79190002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x791a0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x791b0002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79120000, - 0x00000000, - 0x79130000, - 0x00000000, - 0x79140000, - 0x00000000, - 0x79150000, - 0x00000000, - 0x79160000, - 0x00000000, - 0x78150009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78190009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x781a0009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78160009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78170009, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78490001, - 0x00000000, - 0x00000000, - 0x784a0000, - 0x00000000, - 0x784b0000, - 0x00000004, - 0x79170101, - 0x00000000, - 0x00000080, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x20000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x40000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79180006, - 0x60000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x61010011, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00000001, - 0x00000000, - 0x00000001, /* reloc */ - 0x00000000, - 0x00001001, - 0x00001001, - 0x00000001, - 0x00001001, - 0x00000000, - 0x00000000, - 0x00000000, - 0x61020001, - 0x00000000, - 0x00000000, - 0x79000002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78050006, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0x00000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0x40000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0x80000000, - 0x00000000, - 0x00000000, - 0x79040002, - 0xc0000000, - 0x00000000, - 0x00000000, - 0x79080001, - 0x00000000, - 0x00000000, - 0x790a0001, - 0x00000000, - 0x00000000, - 0x78060003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78070003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78040001, - 0x00000000, - 0x00000000, - 0x79110000, - 0x00000000, - 0x780d0000, - 0x00000000, - 0x79060000, - 0x00000000, - 0x7907001f, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x7902000f, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x790c000f, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x780a0003, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78080083, - 0x00004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x04004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x08004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x0c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x10004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x14004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x18004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x1c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x20004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x24004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x28004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x2c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x30004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x34004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x38004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x3c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x40004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x44004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x48004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x4c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x50004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x54004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x58004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x5c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x60004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x64004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x68004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x6c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x70004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x74004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x7c004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x80004000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78090043, - 0x02000000, - 0x22220000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x78550003, - 0x0000000f, - 0x00000000, - 0x00000000, - 0x00000000, - 0x680b0001, - 0x780e0000, - 0x00000e01, - 0x78240000, - 0x00000e41, - 0x784f0000, - 0x80000100, - 0x784d0000, - 0x40000000, - 0x782b0000, - 0x00000000, - 0x782c0000, - 0x00000000, - 0x782d0000, - 0x00000000, - 0x782e0000, - 0x00000000, - 0x782f0000, - 0x00000000, - 0x780f0000, - 0x00000000, - 0x78230000, - 0x00000ea0, - 0x78210000, - 0x00000ec0, - 0x78260000, - 0x00000000, - 0x78270000, - 0x00000000, - 0x78280000, - 0x00000000, - 0x78290000, - 0x00000000, - 0x782a0000, - 0x00000000, - 0x7b000005, - 0x00000004, - 0x00000001, - 0x00000000, - 0x00000001, - 0x00000000, - 0x00000000, - 0x05000000, /* cmds end */ - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, /* state start */ - 0x00000000, - 0x3f800000, - 0x3f800000, - 0x3f800000, - 0x3f800000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, - 0x00000000, /* state end */ -}; - -RO_RENDERSTATE(9); -- cgit v1.2.3 From 4fda44bf16b79a0b78fe36c6b9859e9ce2d09f43 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 18:19:13 +0100 Subject: drm/i915: Flush the workqueue before draining Trying to drain a workqueue while we may still be adding to it from background tasks is, according to kernel/workqueue.c, verboten. So, add a flush_workqueue() at the start of our cleanup procedure. References: https://bugs.freedesktop.org/show_bug.cgi?id=110550 Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190703171913.16585-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ca3afe6e95b5..a4ae9ea298dd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2473,6 +2473,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915) */ int pass = 3; do { + flush_workqueue(i915->wq); rcu_barrier(); i915_gem_drain_freed_objects(i915); } while (--pass); -- cgit v1.2.3 From b7dc9395c5db2748aac24704d3f2d96f34f90f07 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 11:20:48 +0100 Subject: drm/i915: Check caller held wakerefs in assert_forcewakes_active The intent of the assert is to document that the caller took the appropriate wakerefs for the function. However, as Tvrtko pointed out, we simply check whether the fw_domains are active and may be confused by the auto wakeref which may be dropped between the check and use. Let's be more careful in the assert and check that each fw_domain has an explicit caller wakeref above and beyond the automatic wakeref. v2: Fix spelling for config DRM_I915_DEBUG_RUNTIME_PM v3: Timer may still be active after we drop the autowakeref, we need to check domain->active instead. v4: The timer checks domain->active, but we still need to check the timer. (This is starting to look weird...) Reported-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190704102048.6436-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 68d54e126d79..2042c94c9cc9 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -738,6 +738,12 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore) void assert_forcewakes_active(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { + struct intel_uncore_forcewake_domain *domain; + unsigned int tmp; + + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) + return; + if (!uncore->funcs.force_wake_get) return; @@ -747,6 +753,24 @@ void assert_forcewakes_active(struct intel_uncore *uncore, WARN(fw_domains & ~uncore->fw_domains_active, "Expected %08x fw_domains to be active, but %08x are off\n", fw_domains, fw_domains & ~uncore->fw_domains_active); + + /* + * Check that the caller has an explicit wakeref and we don't mistake + * it for the auto wakeref. + */ + local_irq_disable(); + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + unsigned int expect = 1; + + if (hrtimer_active(&domain->timer) && READ_ONCE(domain->active)) + expect++; /* pending automatic release */ + + if (WARN(domain->wake_count < expect, + "Expected domain %d to be held awake by caller, count=%d\n", + domain->id, domain->wake_count)) + break; + } + local_irq_enable(); } /* We give fast paths for the really cool registers */ -- cgit v1.2.3 From 56e0f78e47e47795873acbabab443029aa30bcb4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 16:52:23 +0100 Subject: drm/i915/gt: Use caller provided forcewake for intel_mocs_init_engine During post-reset resume, we call intel_mocs_init_engine to reinitialise the MOCS registers. Suprisingly, especially when enhanced by lockdep, the acquisition of the forcewake lock around each register write takes a substantial portion of the reset time. We don't need to use the individual forcewake here as we can assume that the caller is holding a blanket forcewake for the reset&resume and the resume is serialised. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190703155225.9501-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_mocs.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index ae6cbf0d517c..290a5e9b90b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -346,6 +346,9 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) unsigned int index; u32 unused_value; + /* Called under a blanket forcewake */ + assert_forcewakes_active(uncore, FORCEWAKE_ALL); + if (!get_mocs_settings(gt, &table)) return; @@ -355,16 +358,16 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) for (index = 0; index < table.size; index++) { u32 value = get_entry_control(&table, index); - intel_uncore_write(uncore, - mocs_register(engine->id, index), - value); + intel_uncore_write_fw(uncore, + mocs_register(engine->id, index), + value); } /* All remaining entries are also unused */ for (; index < table.n_entries; index++) - intel_uncore_write(uncore, - mocs_register(engine->id, index), - unused_value); + intel_uncore_write_fw(uncore, + mocs_register(engine->id, index), + unused_value); } /** -- cgit v1.2.3 From 313443b16ac26dd0250462d8513c5b5c0a188e38 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 16:52:24 +0100 Subject: drm/i915/gt: Assume we hold forcewake for execlists resume We can assume the caller is holding a blanket forcewake for the register writes during resume, and so we can skip taking individual locks around each write inside execlists resume. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190703155225.9501-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index f5b09b29f50e..15906a1bee73 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2076,22 +2076,23 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) static void enable_execlists(struct intel_engine_cs *engine) { + u32 mode; + + assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); + intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ if (INTEL_GEN(engine->i915) >= 11) - ENGINE_WRITE(engine, - RING_MODE_GEN7, - _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE)); + mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); else - ENGINE_WRITE(engine, - RING_MODE_GEN7, - _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); + mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); + ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); - ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); - ENGINE_WRITE(engine, - RING_HWS_PGA, - i915_ggtt_offset(engine->status_page.vma)); + ENGINE_WRITE_FW(engine, + RING_HWS_PGA, + i915_ggtt_offset(engine->status_page.vma)); ENGINE_POSTING_READ(engine, RING_HWS_PGA); } @@ -2099,7 +2100,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine) { bool unexpected = false; - if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) { + if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); unexpected = true; } -- cgit v1.2.3 From 21de5a9e34fb0f3efa31df5712cedc0ae97f7b4d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 16:52:25 +0100 Subject: drm/i915/gt: Ignore forcewake acquisition for posting_reads We don't care about the result of the read, so it may be garbage, we only care that the mmio is flushed. As such, we can forgo using an individual forcewake and lock around any posting-read for an engine. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190703155225.9501-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 557b08b13feb..0331e9ac2485 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -51,7 +51,7 @@ struct drm_printer; #define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__) #define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__) #define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__) -#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__) +#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__) #define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__) #define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \ -- cgit v1.2.3 From 0c159ffef628fa94d0f4f9128e7f2b6f2b5e86ef Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 19:06:01 +0100 Subject: drm/i915/gem: Defer obj->base.resv fini until RCU callback Since reservation_object_fini() does an immediate free, rather than kfree_rcu as normal, we have to delay the release until after the RCU grace period has elapsed (i.e. from the rcu cleanup callback) so that we can rely on the RCU protected access to the fences while the object is a zombie. i915_gem_busy_ioctl relies on having an RCU barrier to protect the reservation in order to avoid having to take a reference and strong memory barriers. v2: Order is important; only release after putting the pages! Fixes: c03467ba40f7 ("drm/i915/gem: Free pages before rcu-freeing the object") Testcase: igt/gem_busy/close-race Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190703180601.10950-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 9 +++++---- drivers/gpu/drm/i915/gem/i915_gem_phys.c | 7 ------- drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 7 +++++++ drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 -- 4 files changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index d3e96f09c6b7..d5197a2a106f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -152,6 +152,7 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) container_of(head, typeof(*obj), rcu); struct drm_i915_private *i915 = to_i915(obj->base.dev); + reservation_object_fini(&obj->base._resv); i915_gem_object_free(obj); GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); @@ -187,9 +188,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); GEM_BUG_ON(!list_empty(&obj->lut_list)); - if (obj->ops->release) - obj->ops->release(obj); - atomic_set(&obj->mm.pages_pin_count, 0); __i915_gem_object_put_pages(obj, I915_MM_NORMAL); GEM_BUG_ON(i915_gem_object_has_pages(obj)); @@ -198,7 +196,10 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); - drm_gem_object_release(&obj->base); + drm_gem_free_mmap_offset(&obj->base); + + if (obj->ops->release) + obj->ops->release(obj); /* But keep the pointer alive for RCU-protected lookups */ call_rcu(&obj->rcu, __i915_gem_free_object_rcu); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index b9fab22ada6f..102fd7a23d3d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -133,16 +133,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, drm_pci_free(obj->base.dev, obj->phys_handle); } -static void -i915_gem_object_release_phys(struct drm_i915_gem_object *obj) -{ - i915_gem_object_unpin_pages(obj); -} - static const struct drm_i915_gem_object_ops i915_gem_phys_ops = { .get_pages = i915_gem_object_get_pages_phys, .put_pages = i915_gem_object_put_pages_phys, - .release = i915_gem_object_release_phys, }; int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 19d9ecdb2894..d2a1158868e7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -414,6 +414,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj, return 0; } +static void shmem_release(struct drm_i915_gem_object *obj) +{ + fput(obj->base.filp); +} + const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_IS_SHRINKABLE, @@ -424,6 +429,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .writeback = shmem_writeback, .pwrite = shmem_pwrite, + + .release = shmem_release, }; static int create_shmem(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index de1fab2058ec..639c852bad12 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -529,8 +529,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) GEM_BUG_ON(!stolen); - __i915_gem_object_unpin_pages(obj); - i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); } -- cgit v1.2.3 From bf73fc0fa9cf78e37d6ee99e8d12bfa2083594d6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 15:37:02 +0100 Subject: drm/i915: Show support for accurate sw PMU busyness tracking Expose whether or not we support the PMU software tracking in our scheduler capabilities, so userspace can query at runtime. v2: Use I915_SCHEDULER_CAP_ENGINE_BUSY_STATS for a less ambiguous capability name. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190703143702.11339-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 7 ++++--- drivers/gpu/drm/i915/i915_pmu.c | 4 +--- include/uapi/drm/i915_drm.h | 1 + 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index c1fb5fa3952e..7d6d6e62e9cc 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -688,9 +688,10 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) u8 engine; u8 sched; } map[] = { -#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) } - MAP(PREEMPTION, PREEMPTION), - MAP(SEMAPHORES, SEMAPHORES), +#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } + MAP(HAS_PREEMPTION, PREEMPTION), + MAP(HAS_SEMAPHORES, SEMAPHORES), + MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), #undef MAP }; struct intel_engine_cs *engine; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 8fe46ee920a0..eff86483bec0 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -102,10 +102,8 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) /* * Also there is software busyness tracking available we do not * need the timer for I915_SAMPLE_BUSY counter. - * - * Use RCS as proxy for all engines. */ - else if (intel_engine_supports_stats(i915->engine[RCS0])) + else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS) enable &= ~BIT(I915_SAMPLE_BUSY); /* diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 328d05e77d9f..469dc512cca3 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait { #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) +#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) #define I915_PARAM_HUC_STATUS 42 -- cgit v1.2.3 From ae1c5fd72dfcb66bda5c9002aa341196c7c3e3e1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 11:43:45 +0100 Subject: drm/i915/gtt: Handle double alloc failures Matthew pointed out that we could face a double failure with concurrent allocations/frees, and so the assumption that the local var alloc was NULL was fraught with danger. Rather than complicate the error paths too much to add a second local for a second free, just do the second free earlier on the unwind path. Reported-by: Matthew Auld Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190704104345.6603-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1065753e86fb..9756f1b670e9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1484,6 +1484,10 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto out; unwind_pd: + if (alloc) { + free_pd(vm, alloc); + alloc = NULL; + } spin_lock(&pdp->lock); if (atomic_dec_and_test(&pd->used)) { gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); @@ -1556,6 +1560,10 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto out; unwind_pdp: + if (alloc) { + free_pd(vm, alloc); + alloc = NULL; + } spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); -- cgit v1.2.3 From 2a46fbb25ae8ef98f44003bbcc0b4480b3b9281e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Wed, 3 Jul 2019 11:36:39 +0000 Subject: drm/i915/guc: Upgrade to GuC 33.0.0 New GuC firmware is available. Let's use it. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190703113640.31100-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_guc_fw.c | 20 ++++++++++---------- drivers/gpu/drm/i915/intel_guc_fwif.h | 7 +------ 2 files changed, 11 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index 970f39ef248b..db1e0daca7db 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -38,37 +38,37 @@ __stringify(KEY##_GUC_FW_PATCH) ".bin" #define SKL_GUC_FW_PREFIX skl -#define SKL_GUC_FW_MAJOR 32 +#define SKL_GUC_FW_MAJOR 33 #define SKL_GUC_FW_MINOR 0 -#define SKL_GUC_FW_PATCH 3 +#define SKL_GUC_FW_PATCH 0 #define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL) MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH); #define BXT_GUC_FW_PREFIX bxt -#define BXT_GUC_FW_MAJOR 32 +#define BXT_GUC_FW_MAJOR 33 #define BXT_GUC_FW_MINOR 0 -#define BXT_GUC_FW_PATCH 3 +#define BXT_GUC_FW_PATCH 0 #define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT) MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH); #define KBL_GUC_FW_PREFIX kbl -#define KBL_GUC_FW_MAJOR 32 +#define KBL_GUC_FW_MAJOR 33 #define KBL_GUC_FW_MINOR 0 -#define KBL_GUC_FW_PATCH 3 +#define KBL_GUC_FW_PATCH 0 #define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL) MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); #define GLK_GUC_FW_PREFIX glk -#define GLK_GUC_FW_MAJOR 32 +#define GLK_GUC_FW_MAJOR 33 #define GLK_GUC_FW_MINOR 0 -#define GLK_GUC_FW_PATCH 3 +#define GLK_GUC_FW_PATCH 0 #define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK) MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH); #define ICL_GUC_FW_PREFIX icl -#define ICL_GUC_FW_MAJOR 32 +#define ICL_GUC_FW_MAJOR 33 #define ICL_GUC_FW_MINOR 0 -#define ICL_GUC_FW_PATCH 3 +#define ICL_GUC_FW_PATCH 0 #define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL) MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH); diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 92bd7ffb5b10..30cca3a29323 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -43,13 +43,8 @@ #define GUC_VIDEO_ENGINE2 4 #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) -/* - * XXX: Beware that Gen9 firmware 32.x uses wrong definition for - * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now - * as we are not enabling GuC submission mode where this will be used - */ #define GUC_MAX_ENGINE_CLASSES 5 -#define GUC_MAX_INSTANCES_PER_CLASS 4 +#define GUC_MAX_INSTANCES_PER_CLASS 16 #define GUC_DOORBELL_INVALID 256 -- cgit v1.2.3 From 4a54da35102925401b1d670de468127914c67034 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 14:58:04 +0100 Subject: drm/i915: Dump w/a lists on all engines We store separate wa_list on every engine, so be sure to include all when dumping the current set via debugfs. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190703135805.7310-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6340cec733d2..fa8ff2704b6e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2960,14 +2960,28 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) static int i915_wa_registers(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); - const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list; - struct i915_wa *wa; - unsigned int i; + struct intel_engine_cs *engine; + enum intel_engine_id id; - seq_printf(m, "Workarounds applied: %u\n", wal->count); - for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", - i915_mmio_reg_offset(wa->reg), wa->val, wa->mask); + for_each_engine(engine, i915, id) { + const struct i915_wa_list *wal = &engine->ctx_wa_list; + const struct i915_wa *wa; + unsigned int count; + + count = wal->count; + if (!count) + continue; + + seq_printf(m, "%s: Workarounds applied: %u\n", + engine->name, count); + + for (wa = wal->list; count--; wa++) + seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", + i915_mmio_reg_offset(wa->reg), + wa->val, wa->mask); + + seq_printf(m, "\n"); + } return 0; } -- cgit v1.2.3 From ab9e2f77768211a8ef5263e0c3a939c9b2de01a7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 14:58:05 +0100 Subject: drm/i915/gt: Pull engine w/a initialisation into common We need to setup the workarounds on all engines, with the knowledge about which platforms each workaround applies to kept together in the workaround list. As such, we can pull the w/a initialisation into the common setup and try to avoid duplicating knowledge about when to setup the workarounds. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190703135805.7310-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 1 - drivers/gpu/drm/i915/gt/intel_engine_cs.c | 4 ++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 3 --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 8a9787cf0cd0..e367dce2a696 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -657,7 +657,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(dev_priv->preempt_context); - intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); init_contexts(dev_priv); /* lowest priority; idle task */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7d6d6e62e9cc..df5932f5f578 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -636,6 +636,10 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) engine->sseu = intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); + intel_engine_init_workarounds(engine); + intel_engine_init_whitelist(engine); + intel_engine_init_ctx_wa(engine); + return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 15906a1bee73..e1ae1399c72b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2807,9 +2807,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) if (ret) return ret; - intel_engine_init_workarounds(engine); - intel_engine_init_whitelist(engine); - if (intel_init_workaround_bb(engine)) /* * We continue even if we fail to initialize WA batch diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 8dd9105efad9..53fe1eb7c7bd 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1364,7 +1364,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) { struct i915_wa_list *wal = &engine->wa_list; - if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8)) + if (INTEL_GEN(engine->i915) < 8) return; wa_init_start(wal, engine->name); -- cgit v1.2.3 From e7539b79f703a6b533385088fc15cb5c9ab3f56f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 21:16:56 +0100 Subject: drm/i915/gtt: Mark the freed page table entries with scratch On unwinding the allocation error path and having freed the page table entry, it is imperative that we mark it as scratch. <4> [416.075569] general protection fault: 0000 [#1] PREEMPT SMP PTI <4> [416.075801] CPU: 0 PID: 2385 Comm: kworker/u2:11 Tainted: G U 5.2.0-rc7-CI-Patchwork_13534+ #1 <4> [416.076162] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.1-0-g8891697-prebuilt.qemu-project.org 04/01/2014 <4> [416.076522] Workqueue: i915 __i915_vm_release [i915] <4> [416.076754] RIP: 0010:gen8_ppgtt_cleanup_3lvl+0x58/0xb0 [i915] <4> [416.077023] Code: 81 e2 04 fe ff ff 81 c2 ff 01 00 00 4c 8d 74 d6 58 4d 8b 65 00 4d 3b a7 28 02 00 00 74 40 49 8d 5c 24 50 49 81 c4 50 10 00 00 <48> 8b 2b 49 3b af 20 02 00 00 74 13 4c 89 ff 48 89 ee e8 01 fb ff <4> [416.077445] RSP: 0018:ffffc9000046bd98 EFLAGS: 00010206 <4> [416.077625] RAX: 0001000000000000 RBX: 6b6b6b6b6b6b6bbb RCX: 8b4b56d500000000 <4> [416.077838] RDX: 00000000000001ff RSI: ffff88805a578008 RDI: ffff88805bd0efc8 <4> [416.078167] RBP: ffff88805bd0efc8 R08: 0000000004e42b93 R09: 0000000000000001 <4> [416.078381] R10: 0000000000000000 R11: ffff888077a1b0b8 R12: 6b6b6b6b6b6b7bbb <4> [416.078594] R13: ffff88805a578058 R14: ffff88805a579058 R15: ffff88805bd0efc8 <4> [416.078815] FS: 0000000000000000(0000) GS:ffff88807da00000(0000) knlGS:0000000000000000 <4> [416.079395] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [416.079851] CR2: 000056160fec2b14 CR3: 0000000071bbc003 CR4: 00000000003606f0 <4> [416.080388] Call Trace: <4> [416.080828] gen8_ppgtt_cleanup+0x64/0x100 [i915] <4> [416.081399] __i915_vm_release+0xfc/0x1d0 [i915] Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Mika Kuoppala Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190704201656.15775-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9756f1b670e9..57db2d7270c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1491,6 +1491,7 @@ unwind_pd: spin_lock(&pdp->lock); if (atomic_dec_and_test(&pd->used)) { gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); + pdp->entry[pdpe] = vm->scratch_pd; GEM_BUG_ON(!atomic_read(&pdp->used)); atomic_dec(&pdp->used); GEM_BUG_ON(alloc); @@ -1567,6 +1568,7 @@ unwind_pdp: spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); + pml4->entry[pml4e] = vm->scratch_pdp; GEM_BUG_ON(alloc); alloc = pdp; /* defer the free until after the lock */ } -- cgit v1.2.3 From 6582f4f613e35225c663fc1194b1a7d1f0af2a2f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 17:53:17 +0100 Subject: drm/i915/selftests: Drain the freedlists between exec passes During the context execution tests, we issue a lot of work and discard a lot of objects without releasing the lock and allowing the background reaper to free those objects. Insert a small break between each pass to flush the worker. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190704165317.21060-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index a23c6df9b9f4..91d13f019265 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -562,6 +562,8 @@ out_unlock: mock_file_free(i915, file); if (err) return err; + + i915_gem_drain_freed_objects(i915); } return 0; @@ -672,6 +674,10 @@ static int igt_shared_ctx_exec(void *arg) dw += rem; } + + mutex_unlock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(i915); + mutex_lock(&i915->drm.struct_mutex); } out_test: if (igt_live_test_end(&t)) -- cgit v1.2.3 From 1ee2ae896bb48c4b896c85c56214efb44320cd8b Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 3 Jul 2019 15:41:16 +0100 Subject: drm/i915/hangcheck: Look at instdone for all engines It seems intel_engine_get_instdone is able to get instdone for all engines but intel_hangcheck.c/subunits_stuck decides to ignore it for non render. We can just drop the check in subunits_stuck since the checks on unavailable fields will always return stuck, which when bitwise and with the potential unstuck instdone is harmless. Signed-off-by: Tvrtko Ursulin Cc: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190703144116.15593-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 6bcfa6456c45..797d8ef0969c 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -57,9 +57,6 @@ static bool subunits_stuck(struct intel_engine_cs *engine) int slice; int subslice; - if (engine->id != RCS0) - return true; - intel_engine_get_instdone(engine, &instdone); /* There might be unstable subunit states even when -- cgit v1.2.3 From ec22f256a60ccf0dedd4ee305e616b6f17ed2bb7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 21:04:53 +0100 Subject: drm/i915/overlay: Stash the kernel context on initialisation Simplify runtime request creation by storing the context we need to use during initialisation. This allows us to remove one more hardcoded engine lookup. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190704200455.14870-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_overlay.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 21339b7f6a3e..07929726b780 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -175,6 +175,7 @@ struct overlay_registers { struct intel_overlay { struct drm_i915_private *i915; + struct intel_context *context; struct intel_crtc *crtc; struct i915_vma *vma; struct i915_vma *old_vma; @@ -239,9 +240,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, static struct i915_request *alloc_request(struct intel_overlay *overlay) { - struct intel_engine_cs *engine = overlay->i915->engine[RCS0]; - - return i915_request_create(engine->kernel_context); + return i915_request_create(overlay->context); } /* overlay needs to be disable in OCMD reg */ @@ -1359,11 +1358,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) if (!HAS_OVERLAY(dev_priv)) return; + if (!HAS_ENGINE(dev_priv, RCS0)) + return; + overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return; overlay->i915 = dev_priv; + overlay->context = dev_priv->engine[RCS0]->kernel_context; + GEM_BUG_ON(!overlay->context); overlay->color_key = 0x0101fe; overlay->color_key_enabled = true; -- cgit v1.2.3 From 8f856c743c770b57308de450b21f70c0d16ab4cf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 22:23:43 +0100 Subject: drm/i915/selftests: Be engine agnostic When using MI operations, we do not care which engine we use, so use them all where possible, and where inconvenient double check we have the engine we selected at random. v2: Drop the local copy of engine->sseu to avoid an unchecked deref Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190704212343.6820-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 42 +++++++++++++++++----- .../drm/i915/gem/selftests/i915_gem_coherency.c | 3 ++ .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 15 ++++---- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 25 +++++++------ 4 files changed, 59 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 2154cdee4ab3..86eed4c3ae2b 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1422,6 +1422,9 @@ static int igt_ppgtt_pin_update(void *arg) struct drm_i915_gem_object *obj; struct i915_vma *vma; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned int n; int first, last; int err; @@ -1519,11 +1522,20 @@ static int igt_ppgtt_pin_update(void *arg) * land in the now stale 2M page. */ - err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf); - if (err) - goto out_unpin; + n = 0; + for_each_engine(engine, dev_priv, id) { + if (!intel_engine_can_store_dword(engine)) + continue; - err = cpu_check(obj, 0, 0xdeadbeaf); + err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + if (err) + goto out_unpin; + } + while (n--) { + err = cpu_check(obj, n, 0xdeadbeaf); + if (err) + goto out_unpin; + } out_unpin: i915_vma_unpin(vma); @@ -1599,8 +1611,11 @@ static int igt_shrink_thp(void *arg) struct drm_i915_private *i915 = ctx->i915; struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; + enum intel_engine_id id; struct i915_vma *vma; unsigned int flags = PIN_USER; + unsigned int n; int err; /* @@ -1636,9 +1651,15 @@ static int igt_shrink_thp(void *arg) if (err) goto out_unpin; - err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf); - if (err) - goto out_unpin; + n = 0; + for_each_engine(engine, i915, id) { + if (!intel_engine_can_store_dword(engine)) + continue; + + err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + if (err) + goto out_unpin; + } i915_vma_unpin(vma); @@ -1663,7 +1684,12 @@ static int igt_shrink_thp(void *arg) if (err) goto out_close; - err = cpu_check(obj, 0, 0xdeadbeaf); + while (n--) { + err = cpu_check(obj, n, 0xdeadbeaf); + if (err) + goto out_unpin; + } + out_unpin: i915_vma_unpin(vma); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 8f22d3f18422..861f32be7d46 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -250,6 +250,9 @@ static bool needs_mi_store_dword(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return false; + if (!HAS_ENGINE(i915, RCS0)) + return false; + return intel_engine_can_store_dword(i915->engine[RCS0]); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 91d13f019265..3abe15a08b6d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1025,7 +1025,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915, unsigned int flags) { struct intel_engine_cs *engine = i915->engine[RCS0]; - struct intel_sseu default_sseu = engine->sseu; struct drm_i915_gem_object *obj; struct i915_gem_context *ctx; struct intel_context *ce; @@ -1033,26 +1032,26 @@ __igt_ctx_sseu(struct drm_i915_private *i915, struct drm_file *file; int ret; - if (INTEL_GEN(i915) < 9) + if (INTEL_GEN(i915) < 9 || !engine) return 0; if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) return 0; - if (hweight32(default_sseu.slice_mask) < 2) + if (hweight32(engine->sseu.slice_mask) < 2) return 0; /* * Gen11 VME friendly power-gated configuration with half enabled * sub-slices. */ - pg_sseu = default_sseu; + pg_sseu = engine->sseu; pg_sseu.slice_mask = 1; pg_sseu.subslice_mask = - ~(~0 << (hweight32(default_sseu.subslice_mask) / 2)); + ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2)); pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", - name, flags, hweight32(default_sseu.slice_mask), + name, flags, hweight32(engine->sseu.slice_mask), hweight32(pg_sseu.slice_mask)); file = mock_file(i915); @@ -1088,7 +1087,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_context; /* First set the default mask. */ - ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); if (ret) goto out_fail; @@ -1098,7 +1097,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_fail; /* Back to defaults. */ - ret = __sseu_test(i915, name, flags, ce, obj, default_sseu); + ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); if (ret) goto out_fail; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 9b05bef15023..b95fdc2b6bfc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -328,7 +328,8 @@ out: static int make_obj_busy(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_request *rq; + struct intel_engine_cs *engine; + enum intel_engine_id id; struct i915_vma *vma; int err; @@ -340,17 +341,21 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) if (err) return err; - rq = i915_request_create(i915->engine[RCS0]->kernel_context); - if (IS_ERR(rq)) { - i915_vma_unpin(vma); - return PTR_ERR(rq); - } + for_each_engine(engine, i915, id) { + struct i915_request *rq; - i915_vma_lock(vma); - err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); - i915_vma_unlock(vma); + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + i915_vma_unpin(vma); + return PTR_ERR(rq); + } - i915_request_add(rq); + i915_vma_lock(vma); + err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(vma); + + i915_request_add(rq); + } i915_vma_unpin(vma); i915_gem_object_put(obj); /* leave it only alive via its active ref */ -- cgit v1.2.3 From b8cade5959acd712b03b0cecca8299e9292c47a6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 21:04:55 +0100 Subject: drm/i915: Show instdone for each engine in debugfs Although polling each engine quickly is preferable as it should give us a sample of each engine at roughly the same time, keep it simple and just sample the engine as print out the debug state. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190704200455.14870-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fa8ff2704b6e..3e4f58f19362 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1076,8 +1076,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; - u64 acthd[I915_NUM_ENGINES]; - struct intel_instdone instdone; intel_wakeref_t wakeref; enum intel_engine_id id; @@ -1092,13 +1090,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - for_each_engine(engine, dev_priv, id) - acthd[id] = intel_engine_get_active_head(engine); - - intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone); - } - if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - @@ -1110,23 +1101,25 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); - for_each_engine(engine, dev_priv, id) { - seq_printf(m, "%s: %d ms ago\n", - engine->name, - jiffies_to_msecs(jiffies - - engine->hangcheck.action_timestamp)); + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { + for_each_engine(engine, dev_priv, id) { + struct intel_instdone instdone; - seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", - (long long)engine->hangcheck.acthd, - (long long)acthd[id]); + seq_printf(m, "%s: %d ms ago\n", + engine->name, + jiffies_to_msecs(jiffies - + engine->hangcheck.action_timestamp)); - if (engine->id == RCS0) { - seq_puts(m, "\tinstdone read =\n"); + seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", + (long long)engine->hangcheck.acthd, + intel_engine_get_active_head(engine)); + + intel_engine_get_instdone(engine, &instdone); + seq_puts(m, "\tinstdone read =\n"); i915_instdone_info(dev_priv, m, &instdone); seq_puts(m, "\tinstdone accu =\n"); - i915_instdone_info(dev_priv, m, &engine->hangcheck.instdone); } -- cgit v1.2.3 From 9b77011e4122d420abbc486bad8ec53aa2a2873d Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 4 Jul 2019 13:17:54 +0100 Subject: drm/i915: Rework some interrupt handling functions to take intel_gt Some interrupt handling functions already have gt in their names suggesting them as obvious candidates to make them take struct intel_gt instead of i915. Signed-off-by: Paulo Zanoni Co-developed-by: Paulo Zanoni Signed-off-by: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Acked-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190704121756.27824-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_irq.c | 88 +++++++++++++++++++++-------------------- 1 file changed, 46 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b5724ad38bf5..cb9cc9ceac2e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -305,17 +305,17 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, } static u32 -gen11_gt_engine_identity(struct drm_i915_private * const i915, +gen11_gt_engine_identity(struct intel_gt *gt, const unsigned int bank, const unsigned int bit); -static bool gen11_reset_one_iir(struct drm_i915_private * const i915, +static bool gen11_reset_one_iir(struct intel_gt *gt, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 dw; - lockdep_assert_held(&i915->irq_lock); + lockdep_assert_held(>->i915->irq_lock); dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); if (dw & BIT(bit)) { @@ -323,7 +323,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915, * According to the BSpec, DW_IIR bits cannot be cleared without * first servicing the Selector & Shared IIR registers. */ - gen11_gt_engine_identity(i915, bank, bit); + gen11_gt_engine_identity(gt, bank, bit); /* * We locked GT INT DW by reading it. If we want to (try @@ -528,7 +528,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) { spin_lock_irq(&dev_priv->irq_lock); - while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) + while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM)) ; dev_priv->gt_pm.rps.pm_iir = 0; @@ -555,7 +555,7 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) WARN_ON_ONCE(rps->pm_iir); if (INTEL_GEN(dev_priv) >= 11) - WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); + WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM)); else WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); @@ -635,7 +635,7 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) void gen11_reset_guc_interrupts(struct drm_i915_private *i915) { spin_lock_irq(&i915->irq_lock); - gen11_reset_one_iir(i915, 0, GEN11_GUC); + gen11_reset_one_iir(&i915->gt, 0, GEN11_GUC); spin_unlock_irq(&i915->irq_lock); } @@ -646,7 +646,7 @@ void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv) u32 events = REG_FIELD_PREP(ENGINE1_MASK, GEN11_GUC_INTR_GUC2HOST); - WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC)); + WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC)); I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events); I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events); dev_priv->guc.interrupts.enabled = true; @@ -3033,14 +3033,14 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) } static u32 -gen11_gt_engine_identity(struct drm_i915_private * const i915, +gen11_gt_engine_identity(struct intel_gt *gt, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 timeout_ts; u32 ident; - lockdep_assert_held(&i915->irq_lock); + lockdep_assert_held(>->i915->irq_lock); raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); @@ -3067,9 +3067,11 @@ gen11_gt_engine_identity(struct drm_i915_private * const i915, } static void -gen11_other_irq_handler(struct drm_i915_private * const i915, - const u8 instance, const u16 iir) +gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, + const u16 iir) { + struct drm_i915_private *i915 = gt->i915; + if (instance == OTHER_GUC_INSTANCE) return gen11_guc_irq_handler(i915, iir); @@ -3081,13 +3083,13 @@ gen11_other_irq_handler(struct drm_i915_private * const i915, } static void -gen11_engine_irq_handler(struct drm_i915_private * const i915, - const u8 class, const u8 instance, const u16 iir) +gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, + const u8 instance, const u16 iir) { struct intel_engine_cs *engine; if (instance <= MAX_ENGINE_INSTANCE) - engine = i915->engine_class[class][instance]; + engine = gt->i915->engine_class[class][instance]; else engine = NULL; @@ -3099,8 +3101,7 @@ gen11_engine_irq_handler(struct drm_i915_private * const i915, } static void -gen11_gt_identity_handler(struct drm_i915_private * const i915, - const u32 identity) +gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) { const u8 class = GEN11_INTR_ENGINE_CLASS(identity); const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); @@ -3110,31 +3111,30 @@ gen11_gt_identity_handler(struct drm_i915_private * const i915, return; if (class <= COPY_ENGINE_CLASS) - return gen11_engine_irq_handler(i915, class, instance, intr); + return gen11_engine_irq_handler(gt, class, instance, intr); if (class == OTHER_CLASS) - return gen11_other_irq_handler(i915, instance, intr); + return gen11_other_irq_handler(gt, instance, intr); WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", class, instance, intr); } static void -gen11_gt_bank_handler(struct drm_i915_private * const i915, - const unsigned int bank) +gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) { - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; unsigned long intr_dw; unsigned int bit; - lockdep_assert_held(&i915->irq_lock); + lockdep_assert_held(>->i915->irq_lock); intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); for_each_set_bit(bit, &intr_dw, 32) { - const u32 ident = gen11_gt_engine_identity(i915, bank, bit); + const u32 ident = gen11_gt_engine_identity(gt, bank, bit); - gen11_gt_identity_handler(i915, ident); + gen11_gt_identity_handler(gt, ident); } /* Clear must be after shared has been served for engine */ @@ -3142,25 +3142,25 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915, } static void -gen11_gt_irq_handler(struct drm_i915_private * const i915, - const u32 master_ctl) +gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) { + struct drm_i915_private *i915 = gt->i915; unsigned int bank; spin_lock(&i915->irq_lock); for (bank = 0; bank < 2; bank++) { if (master_ctl & GEN11_GT_DW_IRQ(bank)) - gen11_gt_bank_handler(i915, bank); + gen11_gt_bank_handler(gt, bank); } spin_unlock(&i915->irq_lock); } static u32 -gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) +gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl) { - void __iomem * const regs = dev_priv->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -3174,10 +3174,10 @@ gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) } static void -gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) +gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir) { if (iir & GEN11_GU_MISC_GSE) - intel_opregion_asle_intr(dev_priv); + intel_opregion_asle_intr(gt->i915); } static inline u32 gen11_master_intr_disable(void __iomem * const regs) @@ -3202,6 +3202,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; void __iomem * const regs = i915->uncore.regs; + struct intel_gt *gt = &i915->gt; u32 master_ctl; u32 gu_misc_iir; @@ -3215,7 +3216,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) } /* Find, clear, then process each source of interrupt. */ - gen11_gt_irq_handler(i915, master_ctl); + gen11_gt_irq_handler(gt, master_ctl); /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & GEN11_DISPLAY_IRQ) { @@ -3230,11 +3231,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) enable_rpm_wakeref_asserts(&i915->runtime_pm); } - gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); gen11_master_intr_enable(regs); - gen11_gu_misc_irq_handler(i915, gu_misc_iir); + gen11_gu_misc_irq_handler(gt, gu_misc_iir); return IRQ_HANDLED; } @@ -3590,8 +3591,10 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) ibx_irq_reset(dev_priv); } -static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) +static void gen11_gt_irq_reset(struct intel_gt *gt) { + struct drm_i915_private *dev_priv = gt->i915; + /* Disable RCS, BCS, VCS and VECS class engines. */ I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); @@ -3616,7 +3619,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) gen11_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(dev_priv); + gen11_gt_irq_reset(&dev_priv->gt); I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); @@ -4222,8 +4225,9 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) gen8_master_intr_enable(dev_priv->uncore.regs); } -static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) +static void gen11_gt_irq_postinstall(struct intel_gt *gt) { + struct drm_i915_private *dev_priv = gt->i915; const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; BUILD_BUG_ON(irqs & 0xffff0000); @@ -4275,14 +4279,14 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_postinstall(dev_priv); - gen11_gt_irq_postinstall(dev_priv); + gen11_gt_irq_postinstall(&dev_priv->gt); gen8_de_irq_postinstall(dev_priv); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - gen11_master_intr_enable(dev_priv->uncore.regs); + gen11_master_intr_enable(uncore->regs); POSTING_READ(GEN11_GFX_MSTR_IRQ); } -- cgit v1.2.3 From f0818984fa5d791583bb078086a8580231341b86 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 4 Jul 2019 13:17:55 +0100 Subject: drm/i915: Remove some legacy mmio accessors from interrupt handling Mostly in gen11 interrupt handling and a couple neighbouring functions which were easy since uncore local was already available. Signed-off-by: Paulo Zanoni Co-developed-by: Paulo Zanoni Signed-off-by: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Acked-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190704121756.27824-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_irq.c | 75 +++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index cb9cc9ceac2e..09df7e61815e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3479,12 +3479,12 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; if (IS_CHERRYVIEW(dev_priv)) - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); i9xx_pipestat_irq_reset(dev_priv); @@ -3531,11 +3531,11 @@ static void ironlake_irq_reset(struct drm_i915_private *dev_priv) GEN3_IRQ_RESET(uncore, DE); if (IS_GEN(dev_priv, 7)) - I915_WRITE(GEN7_ERR_INT, 0xffffffff); + intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); if (IS_HASWELL(dev_priv)) { - I915_WRITE(EDP_PSR_IMR, 0xffffffff); - I915_WRITE(EDP_PSR_IIR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); } gen5_gt_irq_reset(dev_priv); @@ -3575,8 +3575,8 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) gen8_gt_irq_reset(dev_priv); - I915_WRITE(EDP_PSR_IMR, 0xffffffff); - I915_WRITE(EDP_PSR_IIR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, @@ -3593,23 +3593,23 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) static void gen11_gt_irq_reset(struct intel_gt *gt) { - struct drm_i915_private *dev_priv = gt->i915; + struct intel_uncore *uncore = gt->uncore; /* Disable RCS, BCS, VCS and VECS class engines. */ - I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); - I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ - I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); - I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); - I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); - I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); - I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); - - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); + + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); } static void gen11_irq_reset(struct drm_i915_private *dev_priv) @@ -3621,10 +3621,10 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) gen11_gt_irq_reset(&dev_priv->gt); - I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); + intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); - I915_WRITE(EDP_PSR_IMR, 0xffffffff); - I915_WRITE(EDP_PSR_IIR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, @@ -4227,21 +4227,24 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) static void gen11_gt_irq_postinstall(struct intel_gt *gt) { - struct drm_i915_private *dev_priv = gt->i915; const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; + struct drm_i915_private *dev_priv = gt->i915; + struct intel_uncore *uncore = gt->uncore; + const u32 dmask = irqs << 16 | irqs; + const u32 smask = irqs << 16; BUILD_BUG_ON(irqs & 0xffff0000); /* Enable RCS, BCS, VCS and VECS class interrupts. */ - I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); - I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); + intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); + intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ - I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); - I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); - I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); - I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); - I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); + intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); + intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); + intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); + intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); + intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); /* * RPS interrupts will get enabled/disabled on demand when RPS itself @@ -4249,12 +4252,12 @@ static void gen11_gt_irq_postinstall(struct intel_gt *gt) */ dev_priv->pm_ier = 0x0; dev_priv->pm_imr = ~dev_priv->pm_ier; - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); - I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); /* Same thing for GuC interrupts */ - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); } static void icp_irq_postinstall(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 58820574f1e937a1cf3eea629f1496e02560a132 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 4 Jul 2019 13:17:56 +0100 Subject: drm/i915: Move dev_priv->pm_i{m, e}r into intel_gt PM interrupts belong to the GT so move the variables to be inside struct intel_gt. Signed-off-by: Paulo Zanoni Co-developed-by: Paulo Zanoni Signed-off-by: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Acked-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190704121756.27824-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gt_types.h | 3 + drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 4 +- drivers/gpu/drm/i915/i915_drv.h | 2 - drivers/gpu/drm/i915/i915_irq.c | 121 +++++++++++++++-------------- drivers/gpu/drm/i915/i915_irq.h | 4 +- 5 files changed, 71 insertions(+), 63 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index c03e56628ee2..37da428bef62 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -55,6 +55,9 @@ struct intel_gt { ktime_t last_init_time; struct i915_vma *scratch; + + u32 pm_imr; + u32 pm_ier; }; #endif /* __INTEL_GT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index f804ec35037d..b33cfc56f623 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1040,14 +1040,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine) /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ ENGINE_POSTING_READ(engine, RING_IMR); - gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask); + gen6_unmask_pm_irq(engine->gt, engine->irq_enable_mask); } static void hsw_vebox_irq_disable(struct intel_engine_cs *engine) { ENGINE_WRITE(engine, RING_IMR, ~0); - gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask); + gen6_mask_pm_irq(engine->gt, engine->irq_enable_mask); } static int diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a4ae9ea298dd..c35de1380da9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1403,8 +1403,6 @@ struct drm_i915_private { u32 de_irq_mask[I915_MAX_PIPES]; }; u32 gt_irq_mask; - u32 pm_imr; - u32 pm_ier; u32 pm_rps_events; u32 pm_guc_events; u32 pipestat_irq_mask[I915_MAX_PIPES]; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 09df7e61815e..7c5ba5cbea34 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -409,50 +409,54 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; } -static void write_pm_imr(struct drm_i915_private *dev_priv) +static void write_pm_imr(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 mask = gt->pm_imr; i915_reg_t reg; - u32 mask = dev_priv->pm_imr; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { reg = GEN11_GPM_WGBOXPERF_INTR_MASK; /* pm is in upper half */ mask = mask << 16; - } else if (INTEL_GEN(dev_priv) >= 8) { + } else if (INTEL_GEN(i915) >= 8) { reg = GEN8_GT_IMR(2); } else { reg = GEN6_PMIMR; } - I915_WRITE(reg, mask); - POSTING_READ(reg); + intel_uncore_write(uncore, reg, mask); + intel_uncore_posting_read(uncore, reg); } -static void write_pm_ier(struct drm_i915_private *dev_priv) +static void write_pm_ier(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + u32 mask = gt->pm_ier; i915_reg_t reg; - u32 mask = dev_priv->pm_ier; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE; /* pm is in upper half */ mask = mask << 16; - } else if (INTEL_GEN(dev_priv) >= 8) { + } else if (INTEL_GEN(i915) >= 8) { reg = GEN8_GT_IER(2); } else { reg = GEN6_PMIER; } - I915_WRITE(reg, mask); + intel_uncore_write(uncore, reg, mask); } /** * snb_update_pm_irq - update GEN6_PMIMR - * @dev_priv: driver private + * @gt: gt for the interrupts * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -static void snb_update_pm_irq(struct drm_i915_private *dev_priv, +static void snb_update_pm_irq(struct intel_gt *gt, u32 interrupt_mask, u32 enabled_irq_mask) { @@ -460,37 +464,37 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, WARN_ON(enabled_irq_mask & ~interrupt_mask); - lockdep_assert_held(&dev_priv->irq_lock); + lockdep_assert_held(>->i915->irq_lock); - new_val = dev_priv->pm_imr; + new_val = gt->pm_imr; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->pm_imr) { - dev_priv->pm_imr = new_val; - write_pm_imr(dev_priv); + if (new_val != gt->pm_imr) { + gt->pm_imr = new_val; + write_pm_imr(gt); } } -void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) +void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask) { - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (WARN_ON(!intel_irqs_enabled(gt->i915))) return; - snb_update_pm_irq(dev_priv, mask, mask); + snb_update_pm_irq(gt, mask, mask); } -static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) +static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask) { - snb_update_pm_irq(dev_priv, mask, 0); + snb_update_pm_irq(gt, mask, 0); } -void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) +void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask) { - if (WARN_ON(!intel_irqs_enabled(dev_priv))) + if (WARN_ON(!intel_irqs_enabled(gt->i915))) return; - __gen6_mask_pm_irq(dev_priv, mask); + __gen6_mask_pm_irq(gt, mask); } static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) @@ -504,23 +508,23 @@ static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) POSTING_READ(reg); } -static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) +static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask) { - lockdep_assert_held(&dev_priv->irq_lock); + lockdep_assert_held(>->i915->irq_lock); - dev_priv->pm_ier |= enable_mask; - write_pm_ier(dev_priv); - gen6_unmask_pm_irq(dev_priv, enable_mask); + gt->pm_ier |= enable_mask; + write_pm_ier(gt); + gen6_unmask_pm_irq(gt, enable_mask); /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ } -static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) +static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask) { - lockdep_assert_held(&dev_priv->irq_lock); + lockdep_assert_held(>->i915->irq_lock); - dev_priv->pm_ier &= ~disable_mask; - __gen6_mask_pm_irq(dev_priv, disable_mask); - write_pm_ier(dev_priv); + gt->pm_ier &= ~disable_mask; + __gen6_mask_pm_irq(gt, disable_mask); + write_pm_ier(gt); /* though a barrier is missing here, but don't really need a one */ } @@ -546,6 +550,7 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) { + struct intel_gt *gt = &dev_priv->gt; struct intel_rps *rps = &dev_priv->gt_pm.rps; if (READ_ONCE(rps->interrupts_enabled)) @@ -555,12 +560,12 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) WARN_ON_ONCE(rps->pm_iir); if (INTEL_GEN(dev_priv) >= 11) - WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM)); + WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM)); else WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); rps->interrupts_enabled = true; - gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_enable_pm_irq(gt, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } @@ -577,7 +582,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); - gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); + gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS); spin_unlock_irq(&dev_priv->irq_lock); intel_synchronize_irq(dev_priv); @@ -612,7 +617,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_guc_events); dev_priv->guc.interrupts.enabled = true; - gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); + gen6_enable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events); } spin_unlock_irq(&dev_priv->irq_lock); } @@ -624,7 +629,7 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); dev_priv->guc.interrupts.enabled = false; - gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); + gen6_disable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events); spin_unlock_irq(&dev_priv->irq_lock); intel_synchronize_irq(dev_priv); @@ -1426,7 +1431,7 @@ out: /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ spin_lock_irq(&dev_priv->irq_lock); if (rps->interrupts_enabled) - gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); + gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events); spin_unlock_irq(&dev_priv->irq_lock); } @@ -1893,8 +1898,9 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, /* The RPS events need forcewake, so we add them to a work queue and mask their * IMR bits until the work is done. Other interrupts can be processed without * the work queue. */ -static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir) +static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir) { + struct drm_i915_private *i915 = gt->i915; struct intel_rps *rps = &i915->gt_pm.rps; const u32 events = i915->pm_rps_events & pm_iir; @@ -1903,7 +1909,7 @@ static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir) if (unlikely(!events)) return; - gen6_mask_pm_irq(i915, events); + gen6_mask_pm_irq(gt, events); if (!rps->interrupts_enabled) return; @@ -1918,7 +1924,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); - gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); + gen6_mask_pm_irq(&dev_priv->gt, + pm_iir & dev_priv->pm_rps_events); if (rps->interrupts_enabled) { rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; schedule_work(&rps->work); @@ -3076,7 +3083,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, return gen11_guc_irq_handler(i915, iir); if (instance == OTHER_GTPM_INSTANCE) - return gen11_rps_irq_handler(i915, iir); + return gen11_rps_irq_handler(gt, iir); WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", instance, iir); @@ -4006,11 +4013,11 @@ static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv) */ if (HAS_ENGINE(dev_priv, VECS0)) { pm_irqs |= PM_VEBOX_USER_INTERRUPT; - dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; + dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT; } - dev_priv->pm_imr = 0xffffffff; - GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs); + dev_priv->gt.pm_imr = 0xffffffff; + GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs); } } @@ -4107,9 +4114,10 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) POSTING_READ(VLV_MASTER_IER); } -static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) +static void gen8_gt_irq_postinstall(struct drm_i915_private *i915) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &i915->gt; + struct intel_uncore *uncore = gt->uncore; /* These are interrupts we'll toggle with the ring mask register */ u32 gt_interrupts[] = { @@ -4129,15 +4137,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) }; - dev_priv->pm_ier = 0x0; - dev_priv->pm_imr = ~dev_priv->pm_ier; + gt->pm_ier = 0x0; + gt->pm_imr = ~gt->pm_ier; GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); /* * RPS interrupts will get enabled/disabled on demand when RPS itself * is enabled/disabled. Same wil be the case for GuC interrupts. */ - GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); + GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); } @@ -4228,7 +4236,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) static void gen11_gt_irq_postinstall(struct intel_gt *gt) { const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; - struct drm_i915_private *dev_priv = gt->i915; struct intel_uncore *uncore = gt->uncore; const u32 dmask = irqs << 16 | irqs; const u32 smask = irqs << 16; @@ -4250,8 +4257,8 @@ static void gen11_gt_irq_postinstall(struct intel_gt *gt) * RPS interrupts will get enabled/disabled on demand when RPS itself * is enabled/disabled. */ - dev_priv->pm_ier = 0x0; - dev_priv->pm_imr = ~dev_priv->pm_ier; + gt->pm_ier = 0x0; + gt->pm_imr = ~gt->pm_ier; intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 44d480dab030..d93fa4e75442 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -77,8 +77,8 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits) void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask); -void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); -void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); +void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask); +void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask); void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From 26443a4bc44850ff28329ae97a3cc9349d7dd87b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 3 Jul 2019 23:08:19 +0300 Subject: drm/i915: Add windowing for primary planes on gen2/3 and chv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plane B and C (note that we don't actually expose plane C currently) on gen2/3 have a window generator, as does the primary plane on CHV pipe B. So let's allow positioning of these planes freely within the pipe source area. Plane A on gen2/3 seems to have some kind of partial window generator which would allow you to cut the plane off midway through the scanout, but it would still have to start at the top-left corner of the pipe, and it would have to be full width. That's doesn't sound all that useful, so for simplicity let's just keep to the idea that plane A has to be fullscreen. Gen4 removed the plane A/B windowing support entirely, and it wasn't reintroduced until SKL (apart from the CHV pipe B special case). v2: s/plane/i9xx_plane/ etc. (James) v3: Make it less confusing v4: Deal with IS_GEN() Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190703200824.5971-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 40 +++++++++++++++++++++------- 1 file changed, 31 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 919f5ac844c8..1add3a0dfc06 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3716,10 +3716,27 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) return 0; } +static bool i9xx_plane_has_windowing(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + + if (IS_CHERRYVIEW(dev_priv)) + return i9xx_plane == PLANE_B; + else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + return false; + else if (IS_GEN(dev_priv, 4)) + return i9xx_plane == PLANE_C; + else + return i9xx_plane == PLANE_B || + i9xx_plane == PLANE_C; +} + static int i9xx_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); int ret; ret = chv_plane_check_rotation(plane_state); @@ -3730,7 +3747,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state, &crtc_state->base, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, - false, true); + i9xx_plane_has_windowing(plane), + true); if (ret) return ret; @@ -3759,6 +3777,10 @@ static void i9xx_update_plane(struct intel_plane *plane, u32 linear_offset; int x = plane_state->color_plane[0].x; int y = plane_state->color_plane[0].y; + int crtc_x = plane_state->base.dst.x1; + int crtc_y = plane_state->base.dst.y1; + int crtc_w = drm_rect_width(&plane_state->base.dst); + int crtc_h = drm_rect_height(&plane_state->base.dst); unsigned long irqflags; u32 dspaddr_offset; u32 dspcntr; @@ -3777,18 +3799,18 @@ static void i9xx_update_plane(struct intel_plane *plane, I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride); if (INTEL_GEN(dev_priv) < 4) { - /* pipesrc and dspsize control the size that is scaled from, - * which should always be the user's requested size. + /* + * PLANE_A doesn't actually have a full window + * generator but let's assume we still need to + * program whatever is there. */ - I915_WRITE_FW(DSPPOS(i9xx_plane), 0); + I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x); I915_WRITE_FW(DSPSIZE(i9xx_plane), - ((crtc_state->pipe_src_h - 1) << 16) | - (crtc_state->pipe_src_w - 1)); + ((crtc_h - 1) << 16) | (crtc_w - 1)); } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { - I915_WRITE_FW(PRIMPOS(i9xx_plane), 0); + I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); I915_WRITE_FW(PRIMSIZE(i9xx_plane), - ((crtc_state->pipe_src_h - 1) << 16) | - (crtc_state->pipe_src_w - 1)); + ((crtc_h - 1) << 16) | (crtc_w - 1)); I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0); } -- cgit v1.2.3 From 423ee8e99aa5ee7ee5133616366099b8379870dc Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 3 Jul 2019 23:08:20 +0300 Subject: drm/i915: Disable sprite gamma on ivb-bdw MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We don't currently have any use for the sprite gamma on ivb-bdw. Let's disable it. We already do that on skl+. On pre-ivb there is no way to disable the sprite gamma, and it only affects YCbCr pixel formats, whereas on ivb+ it also affects RGB formats. Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190703200824.5971-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 2 ++ drivers/gpu/drm/i915/i915_reg.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 004b52027ae8..c7314617424a 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -1013,6 +1013,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return 0; } + sprctl |= SPRITE_INT_GAMMA_DISABLE; + if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0c487146a5bd..6b3affb5a279 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6465,7 +6465,7 @@ enum { #define SPRITE_YUV_ORDER_VYUY (3 << 16) #define SPRITE_ROTATE_180 (1 << 15) #define SPRITE_TRICKLE_FEED_DISABLE (1 << 14) -#define SPRITE_INT_GAMMA_ENABLE (1 << 13) +#define SPRITE_INT_GAMMA_DISABLE (1 << 13) #define SPRITE_TILED (1 << 10) #define SPRITE_DEST_KEY (1 << 2) #define _SPRA_LINOFF 0x70284 -- cgit v1.2.3 From 94e15723df815497347408631203b2dabcb0bca3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 3 Jul 2019 23:08:21 +0300 Subject: drm/i915: Program plane gamma ramps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All sprite planes have a progammable gamma ramp. Set it up with a linear ramp on all platforms. This actually matches the reset value but soon we'll want to reprogram this ramp on some machines, so let's just set it up across the board. Note that on pre-IVB the hardware bypasses the gamma unit unless a YCbCr pixel format is used. v2: Add parens around << in ilk_linear_gamma() Skip gamma programming for RGB on pre-IVB s/DVSGAMC/DVSGAMC_G4X/ Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190703200824.5971-4-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 142 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 31 ++++-- 2 files changed, 166 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index c7314617424a..dc26d84b1b61 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -683,6 +683,16 @@ skl_plane_get_hw_state(struct intel_plane *plane, return ret; } +static void i9xx_plane_linear_gamma(u16 gamma[8]) +{ + /* The points are not evenly spaced. */ + static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 }; + int i; + + for (i = 0; i < 8; i++) + gamma[i] = (in[i] << 8) / 32; +} + static void chv_update_csc(const struct intel_plane_state *plane_state) { @@ -858,6 +868,31 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } +static void vlv_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + u16 gamma[8]; + int i; + + /* Seems RGB data bypasses the gamma always */ + if (!fb->format->is_yuv) + return; + + i9xx_plane_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + /* The two end points are implicit (0.0 and 1.0) */ + for (i = 1; i < 8 - 1; i++) + I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1), + gamma[i] << 16 | + gamma[i] << 8 | + gamma[i]); +} + static void vlv_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -916,6 +951,7 @@ vlv_update_plane(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + sprsurf_offset); vlv_update_clrc(plane_state); + vlv_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1035,6 +1071,45 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } +static void ivb_sprite_linear_gamma(u16 gamma[18]) +{ + int i; + + for (i = 0; i < 17; i++) + gamma[i] = (i << 10) / 16; + + gamma[i] = 3 << 10; + i++; +} + +static void ivb_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + u16 gamma[18]; + int i; + + ivb_sprite_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + for (i = 0; i < 16; i++) + I915_WRITE_FW(SPRGAMC(pipe, i), + gamma[i] << 20 | + gamma[i] << 10 | + gamma[i]); + + I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]); + I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]); + I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]); + i++; + + I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]); + I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]); + I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]); + i++; +} + static void ivb_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -1101,6 +1176,8 @@ ivb_update_plane(struct intel_plane *plane, I915_WRITE_FW(SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); + ivb_update_gamma(plane_state); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1226,6 +1303,66 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, return dvscntr; } +static void g4x_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = plane->pipe; + u16 gamma[8]; + int i; + + /* Seems RGB data bypasses the gamma always */ + if (!fb->format->is_yuv) + return; + + i9xx_plane_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + /* The two end points are implicit (0.0 and 1.0) */ + for (i = 1; i < 8 - 1; i++) + I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1), + gamma[i] << 16 | + gamma[i] << 8 | + gamma[i]); +} + +static void ilk_sprite_linear_gamma(u16 gamma[17]) +{ + int i; + + for (i = 0; i < 17; i++) + gamma[i] = (i << 10) / 16; +} + +static void ilk_update_gamma(const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum pipe pipe = plane->pipe; + u16 gamma[17]; + int i; + + /* Seems RGB data bypasses the gamma always */ + if (!fb->format->is_yuv) + return; + + ilk_sprite_linear_gamma(gamma); + + /* FIXME these register are single buffered :( */ + for (i = 0; i < 16; i++) + I915_WRITE_FW(DVSGAMC_ILK(pipe, i), + gamma[i] << 20 | + gamma[i] << 10 | + gamma[i]); + + I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]); + I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]); + I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]); + i++; +} + static void g4x_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -1285,6 +1422,11 @@ g4x_update_plane(struct intel_plane *plane, I915_WRITE_FW(DVSSURF(pipe), intel_plane_ggtt_offset(plane_state) + dvssurf_offset); + if (IS_G4X(dev_priv)) + g4x_update_gamma(plane_state); + else + ilk_update_gamma(plane_state); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6b3affb5a279..089b18609727 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6292,6 +6292,7 @@ enum { #define _DSPATILEOFF 0x701A4 /* 965+ only */ #define _DSPAOFFSET 0x701A4 /* HSW */ #define _DSPASURFLIVE 0x701AC +#define _DSPAGAMC 0x701E0 #define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR) #define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR) @@ -6303,6 +6304,7 @@ enum { #define DSPLINOFF(plane) DSPADDR(plane) #define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET) #define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE) +#define DSPGAMC(plane, i) _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */ /* CHV pipe B blender and primary plane */ #define _CHV_BLEND_A 0x60a00 @@ -6405,6 +6407,7 @@ enum { #define _DVSAKEYMAXVAL 0x721a0 #define _DVSATILEOFF 0x721a4 #define _DVSASURFLIVE 0x721ac +#define _DVSAGAMC_G4X 0x721e0 /* g4x */ #define _DVSASCALE 0x72204 #define DVS_SCALE_ENABLE (1 << 31) #define DVS_FILTER_MASK (3 << 29) @@ -6413,7 +6416,8 @@ enum { #define DVS_FILTER_SOFTENING (2 << 29) #define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ #define DVS_VERTICAL_OFFSET_ENABLE (1 << 27) -#define _DVSAGAMC 0x72300 +#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */ +#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */ #define _DVSBCNTR 0x73180 #define _DVSBLINOFF 0x73184 @@ -6426,8 +6430,10 @@ enum { #define _DVSBKEYMAXVAL 0x731a0 #define _DVSBTILEOFF 0x731a4 #define _DVSBSURFLIVE 0x731ac +#define _DVSBGAMC_G4X 0x731e0 /* g4x */ #define _DVSBSCALE 0x73204 -#define _DVSBGAMC 0x73300 +#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */ +#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */ #define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR) #define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF) @@ -6441,6 +6447,9 @@ enum { #define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) #define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) #define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE) +#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */ +#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */ +#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */ #define _SPRA_CTL 0x70280 #define SPRITE_ENABLE (1 << 31) @@ -6488,6 +6497,8 @@ enum { #define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ #define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27) #define _SPRA_GAMC 0x70400 +#define _SPRA_GAMC16 0x70440 +#define _SPRA_GAMC17 0x7044c #define _SPRB_CTL 0x71280 #define _SPRB_LINOFF 0x71284 @@ -6503,6 +6514,8 @@ enum { #define _SPRB_SURFLIVE 0x712ac #define _SPRB_SCALE 0x71304 #define _SPRB_GAMC 0x71400 +#define _SPRB_GAMC16 0x71440 +#define _SPRB_GAMC17 0x7144c #define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL) #define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF) @@ -6516,7 +6529,9 @@ enum { #define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) #define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET) #define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) -#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) +#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */ +#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */ +#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */ #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) @@ -6559,7 +6574,7 @@ enum { #define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4) #define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */ #define SP_SH_COS(x) (x) /* u3.7 */ -#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4) +#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0) #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) #define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284) @@ -6574,10 +6589,12 @@ enum { #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8) #define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0) #define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4) -#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4) +#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0) +#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \ + _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \ - _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b)) + _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b))) #define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR) #define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF) @@ -6592,7 +6609,7 @@ enum { #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA) #define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0) #define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1) -#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) +#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */ /* * CHV pipe B sprite CSC -- cgit v1.2.3 From d56e823ac32a793b24c857e8635def13d42b4008 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 3 Jul 2019 23:08:22 +0300 Subject: drm/i915: Deal with cpp==8 for g4x watermarks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Docs tell us that on g4x we have to compute the SR watermarks using 4 bytes per pixel. I'm going to assume that only applies to 1 and 2 byte per pixel formats, and not 8 byte per pixel formats. That seems like a recipe for an insufficient watermark which could lead to underruns. Use the maximum of the two numbers instead. Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190703200824.5971-5-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d10c62d3f10c..87244d8215a7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1116,6 +1116,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, if (!intel_wm_plane_visible(crtc_state, plane_state)) return 0; + cpp = plane_state->base.fb->format->cpp[0]; + /* * Not 100% sure which way ELK should go here as the * spec only says CL/CTG should assume 32bpp and BW @@ -1129,9 +1131,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, */ if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY && level != G4X_WM_LEVEL_NORMAL) - cpp = 4; - else - cpp = plane_state->base.fb->format->cpp[0]; + cpp = max(cpp, 4u); clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; -- cgit v1.2.3 From 94e35ce221732497f939713640816ae0a42a71c1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 3 Jul 2019 23:08:23 +0300 Subject: drm/i915: Cosmetic fix for skl+ plane switch statement MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit One of the switch cases has the byte order vs. format bits reversed to all the other cases. Appease the ocd and reorder them. Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190703200824.5971-6-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1add3a0dfc06..f689796d0703 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3973,10 +3973,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format) case DRM_FORMAT_XRGB8888: case DRM_FORMAT_ARGB8888: return PLANE_CTL_FORMAT_XRGB_8888; + case DRM_FORMAT_XBGR2101010: + return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX; case DRM_FORMAT_XRGB2101010: return PLANE_CTL_FORMAT_XRGB_2101010; - case DRM_FORMAT_XBGR2101010: - return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ABGR16161616F: return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX; -- cgit v1.2.3 From 3e69db291bda338d92f554b55122c7319b612b10 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 3 Jul 2019 23:08:24 +0300 Subject: drm/i915: Clean up skl vs. icl plane formats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split the format lists for different planes on skl/icl more cleanly. On skl+ we have just two types of planes: those can do planar and those that can't. On icl we have three types of planes: hdr planes, sdr planes that can do planar, and sdr planes that can't do planar. Those latter two are the same set of planes we must when choose from when picking the UV vs. Y plane for planar scanout. So we shall just designate them sdr uv planes and sdr y planes. Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190703200824.5971-7-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 133 ++++++++++++++-------------- 1 file changed, 66 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index dc26d84b1b61..34586f29be60 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -1996,28 +1996,7 @@ static const u32 skl_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u32 icl_plane_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_YUYV, - DRM_FORMAT_YVYU, - DRM_FORMAT_UYVY, - DRM_FORMAT_VYUY, - DRM_FORMAT_Y210, - DRM_FORMAT_Y212, - DRM_FORMAT_Y216, - DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, -}; - -static const u32 icl_hdr_plane_formats[] = { +static const u32 skl_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -2026,23 +2005,14 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, - DRM_FORMAT_XRGB16161616F, - DRM_FORMAT_XBGR16161616F, - DRM_FORMAT_ARGB16161616F, - DRM_FORMAT_ABGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, - DRM_FORMAT_Y210, - DRM_FORMAT_Y212, - DRM_FORMAT_Y216, - DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, + DRM_FORMAT_NV12, }; -static const u32 skl_planar_formats[] = { +static const u32 glk_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -2056,9 +2026,12 @@ static const u32 skl_planar_formats[] = { DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_NV12, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, }; -static const u32 glk_planar_formats[] = { +static const u32 icl_sdr_y_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -2071,13 +2044,15 @@ static const u32 glk_planar_formats[] = { DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, - DRM_FORMAT_NV12, - DRM_FORMAT_P010, - DRM_FORMAT_P012, - DRM_FORMAT_P016, + DRM_FORMAT_Y210, + DRM_FORMAT_Y212, + DRM_FORMAT_Y216, + DRM_FORMAT_XVYU2101010, + DRM_FORMAT_XVYU12_16161616, + DRM_FORMAT_XVYU16161616, }; -static const u32 icl_planar_formats[] = { +static const u32 icl_sdr_uv_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -2102,7 +2077,7 @@ static const u32 icl_planar_formats[] = { DRM_FORMAT_XVYU16161616, }; -static const u32 icl_hdr_planar_formats[] = { +static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -2345,9 +2320,6 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { - if (INTEL_GEN(dev_priv) >= 11) - return plane_id <= PLANE_SPRITE3; - /* Display WA #0870: skl, bxt */ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) return false; @@ -2361,6 +2333,48 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, return true; } +static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { + *num_formats = ARRAY_SIZE(skl_planar_formats); + return skl_planar_formats; + } else { + *num_formats = ARRAY_SIZE(skl_plane_formats); + return skl_plane_formats; + } +} + +static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { + *num_formats = ARRAY_SIZE(glk_planar_formats); + return glk_planar_formats; + } else { + *num_formats = ARRAY_SIZE(skl_plane_formats); + return skl_plane_formats; + } +} + +static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, + enum pipe pipe, enum plane_id plane_id, + int *num_formats) +{ + if (icl_is_hdr_plane(dev_priv, plane_id)) { + *num_formats = ARRAY_SIZE(icl_hdr_plane_formats); + return icl_hdr_plane_formats; + } else if (icl_is_nv12_y_plane(plane_id)) { + *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats); + return icl_sdr_y_plane_formats; + } else { + *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats); + return icl_sdr_uv_plane_formats; + } +} + static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -2414,30 +2428,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, if (icl_is_nv12_y_plane(plane_id)) plane->update_slave = icl_update_slave; - if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { - if (icl_is_hdr_plane(dev_priv, plane_id)) { - formats = icl_hdr_planar_formats; - num_formats = ARRAY_SIZE(icl_hdr_planar_formats); - } else if (INTEL_GEN(dev_priv) >= 11) { - formats = icl_planar_formats; - num_formats = ARRAY_SIZE(icl_planar_formats); - } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) { - formats = glk_planar_formats; - num_formats = ARRAY_SIZE(glk_planar_formats); - } else { - formats = skl_planar_formats; - num_formats = ARRAY_SIZE(skl_planar_formats); - } - } else if (icl_is_hdr_plane(dev_priv, plane_id)) { - formats = icl_hdr_plane_formats; - num_formats = ARRAY_SIZE(icl_hdr_plane_formats); - } else if (INTEL_GEN(dev_priv) >= 11) { - formats = icl_plane_formats; - num_formats = ARRAY_SIZE(icl_plane_formats); - } else { - formats = skl_plane_formats; - num_formats = ARRAY_SIZE(skl_plane_formats); - } + if (INTEL_GEN(dev_priv) >= 11) + formats = icl_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); + else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + formats = glk_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); + else + formats = skl_get_plane_formats(dev_priv, pipe, + plane_id, &num_formats); plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); if (plane->has_ccs) -- cgit v1.2.3 From eef037ea02809afd4d21e204acf27d3bd1525829 Mon Sep 17 00:00:00 2001 From: Vivek Kasireddy Date: Wed, 3 Jul 2019 16:03:53 -0700 Subject: drm/i915/ehl: Add support for DPLL4 (v10) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for DPLL4 on EHL that include the following restrictions: - DPLL4 cannot be used with DDIA (combo port A internal eDP usage). DPLL4 can be used with other DDIs, including DDID (combo port A external usage). - DPLL4 cannot be enabled when DC5 or DC6 are enabled. - The DPLL4 enable, lock, power enabled, and power state are connected to the MGPLL1_ENABLE register. v2: (suggestions from Bob Paauwe) - Rework ehl_get_dpll() function to call intel_find_shared_dpll() and iterate twice: once for Combo plls and once for MG plls. - Use MG pll funcs for DPLL4 instead of creating new ones and modify mg_pll_enable to include the restrictions for EHL. v3: Fix compilation error v4: (suggestions from Lucas and Ville) - Treat DPLL4 as a combo phy PLL and not as MG PLL - Disable DC states when this DPLL is being enabled - Reuse icl_get_dpll instead of creating a separate one for EHL v5: (suggestion from Ville) - Refcount the DC OFF power domains during the enabling and disabling of this DPLL. v6: rebase v7: (suggestion from Imre) - Add a new power domain instead of iterating over the domains assoicated with DC OFF power well. v8: (Ville and Imre) - Rename POWER_DOMAIN_DPLL4 TO POWER_DOMAIN_DPLL_DC_OFF - Grab a reference in intel_modeset_setup_hw_state() if this DPLL was already enabled perhaps by BIOS. - Check for the port type instead of the encoder v9: (Ville) - Move the block of code that grabs a reference to the power domain POWER_DOMAIN_DPLL_DC_OFF to intel_modeset_readout_hw_state() to ensure that there is a reference present before this DPLL might get disabled. v10: rebase Cc: José Roberto de Souza Cc: Ville Syrjälä Cc: Matt Roper Cc: Imre Deak Signed-off-by: Vivek Kasireddy Reviewed-by: José Roberto de Souza Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190703230353.24059-1-vivek.kasireddy@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 7 ++++ drivers/gpu/drm/i915/display/intel_display_power.c | 3 ++ drivers/gpu/drm/i915/display/intel_display_power.h | 1 + drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 47 ++++++++++++++++++++-- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 6 +++ 5 files changed, 60 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f689796d0703..f09eda75711a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -16675,6 +16675,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pll->on = pll->info->funcs->get_hw_state(dev_priv, pll, &pll->state.hw_state); + + if (IS_ELKHARTLAKE(dev_priv) && pll->on && + pll->info->id == DPLL_ID_EHL_DPLL4) { + pll->wakeref = intel_display_power_get(dev_priv, + POWER_DOMAIN_DPLL_DC_OFF); + } + pll->state.crtc_mask = 0; for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c19b958461ca..7437fc71d289 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -118,6 +118,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "MODESET"; case POWER_DOMAIN_GT_IRQ: return "GT_IRQ"; + case POWER_DOMAIN_DPLL_DC_OFF: + return "DPLL_DC_OFF"; default: MISSING_CASE(domain); return "?"; @@ -2455,6 +2457,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, ICL_PW_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define ICL_DDI_IO_A_POWER_DOMAINS ( \ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index ff57b0a7fe59..8f43f7051a16 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -59,6 +59,7 @@ enum intel_display_power_domain { POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, + POWER_DOMAIN_DPLL_DC_OFF, POWER_DOMAIN_INIT, POWER_DOMAIN_NUM, diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index f953971e7c3b..67cfe836286e 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2905,6 +2905,9 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); struct icl_port_dpll *port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum port port = encoder->port; + bool has_dpll4 = false; if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n"); @@ -2912,10 +2915,14 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, return false; } + if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) + has_dpll4 = true; + port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, DPLL_ID_ICL_DPLL0, - DPLL_ID_ICL_DPLL1); + has_dpll4 ? DPLL_ID_EHL_DPLL4 + : DPLL_ID_ICL_DPLL1); if (!port_dpll->pll) { DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n", port_name(encoder->port)); @@ -3119,8 +3126,14 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state) { - return icl_pll_get_hw_state(dev_priv, pll, hw_state, - CNL_DPLL_ENABLE(pll->info->id)); + i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + + if (IS_ELKHARTLAKE(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) { + enable_reg = MG_PLL_ENABLE(0); + } + + return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg); } static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv, @@ -3231,6 +3244,19 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv, { i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + if (IS_ELKHARTLAKE(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) { + enable_reg = MG_PLL_ENABLE(0); + + /* + * We need to disable DC states when this DPLL is enabled. + * This can be done by taking a reference on DPLL4 power + * domain. + */ + pll->wakeref = intel_display_power_get(dev_priv, + POWER_DOMAIN_DPLL_DC_OFF); + } + icl_pll_power_enable(dev_priv, pll, enable_reg); icl_dpll_write(dev_priv, pll); @@ -3326,7 +3352,19 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, static void combo_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id)); + i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + + if (IS_ELKHARTLAKE(dev_priv) && + pll->info->id == DPLL_ID_EHL_DPLL4) { + enable_reg = MG_PLL_ENABLE(0); + icl_pll_disable(dev_priv, pll, enable_reg); + + intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, + pll->wakeref); + return; + } + + icl_pll_disable(dev_priv, pll, enable_reg); } static void tbt_pll_disable(struct drm_i915_private *dev_priv, @@ -3406,6 +3444,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = { static const struct dpll_info ehl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 }, { }, }; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 1668f8116908..4c2c5e93aff3 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -28,6 +28,7 @@ #include #include "intel_display.h" +#include "intel_wakeref.h" /*FIXME: Move this to a more appropriate place. */ #define abs_diff(a, b) ({ \ @@ -118,6 +119,10 @@ enum intel_dpll_id { * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1 */ DPLL_ID_ICL_DPLL1 = 1, + /** + * @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4 + */ + DPLL_ID_EHL_DPLL4 = 2, /** * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL */ @@ -320,6 +325,7 @@ struct intel_shared_dpll { * @info: platform specific info */ const struct dpll_info *info; + intel_wakeref_t wakeref; }; #define SKL_DPLL0 0 -- cgit v1.2.3 From badf1f2724d430c8a310be549b49f8333817dbeb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 5 Jul 2019 08:45:57 +0100 Subject: drm/i915: Order assert forcewake test Read the current value before computing the expected to ensure that if the timer does complete early (against our will), it should not cause a false positive. v2: The local irq disable did not prevent the timer from running. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111074 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190705074604.16496-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 2042c94c9cc9..bb9e0da30e94 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -758,19 +758,18 @@ void assert_forcewakes_active(struct intel_uncore *uncore, * Check that the caller has an explicit wakeref and we don't mistake * it for the auto wakeref. */ - local_irq_disable(); for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + unsigned int actual = READ_ONCE(domain->wake_count); unsigned int expect = 1; if (hrtimer_active(&domain->timer) && READ_ONCE(domain->active)) expect++; /* pending automatic release */ - if (WARN(domain->wake_count < expect, + if (WARN(actual < expect, "Expected domain %d to be held awake by caller, count=%d\n", - domain->id, domain->wake_count)) + domain->id, actual)) break; } - local_irq_enable(); } /* We give fast paths for the really cool registers */ -- cgit v1.2.3 From 9d1bc13e7fe1b31d93c58b6811f426b9b7dea59f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 5 Jul 2019 11:31:12 +0000 Subject: drm/i915: Remove set but not used variable 'encoder' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/i915/display/intel_dp.c: In function 'intel_dp_set_drrs_state': drivers/gpu/drm/i915/display/intel_dp.c:6623:24: warning: variable 'encoder' set but not used [-Wunused-but-set-variable] It's never used, so can be removed.Also remove related variable 'dig_port' Signed-off-by: YueHaibing Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190705113112.64715-1-yuehaibing@huawei.com --- drivers/gpu/drm/i915/display/intel_dp.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8f7188d71d08..0bdb7ecc5a81 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -6620,8 +6620,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state, int refresh_rate) { - struct intel_encoder *encoder; - struct intel_digital_port *dig_port = NULL; struct intel_dp *intel_dp = dev_priv->drrs.dp; struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); enum drrs_refresh_rate_type index = DRRS_HIGH_RR; @@ -6636,9 +6634,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, return; } - dig_port = dp_to_dig_port(intel_dp); - encoder = &dig_port->base; - if (!intel_crtc) { DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); return; -- cgit v1.2.3 From 3e27d70bcc46c68aceb5dafc813db4c4ff675221 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 5 Jul 2019 11:31:38 +0000 Subject: drm/i915: Remove set but not used variable 'intel_dig_port' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/i915/display/intel_ddi.c: In function 'intel_ddi_get_config': drivers/gpu/drm/i915/display/intel_ddi.c:3774:29: warning: variable 'intel_dig_port' set but not used [-Wunused-but-set-variable] struct intel_digital_port *intel_dig_port; It is never used, so can be removed. Signed-off-by: YueHaibing Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190705113138.65880-1-yuehaibing@huawei.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index a4172595c8d8..30e48609db1d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3771,7 +3771,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - struct intel_digital_port *intel_dig_port; u32 temp, flags = 0; /* XXX: DSI transcoder paranoia */ @@ -3810,7 +3809,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, switch (temp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: pipe_config->has_hdmi_sink = true; - intel_dig_port = enc_to_dig_port(&encoder->base); pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); -- cgit v1.2.3 From f20f272f104f224010b955c2c49b0dcb4ca02e4f Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 5 Jul 2019 22:52:01 +0100 Subject: drm/i915/gtt: pde entry encoding is identical For all page directory entries, the pde encoding is identical. Don't complicate call sites with different versions of doing the same thing, so we always check the existence of physical page before writing the entry into it. This further generalizes the pd so that manipulation in callsites will be identical, removing the need to handle pdps differently for gen8. v2: squash v3: inc/dec with set/clear (Chris) v4: inlines, warn, stray set_pd (Chris) Cc: Chris Wilson Cc: Matthew Auld Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190705215204.4559-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 152 +++++++++++++++--------------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 3 - 2 files changed, 63 insertions(+), 92 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 57db2d7270c5..adf6eadd5009 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -211,10 +211,10 @@ static u64 gen8_pte_encode(dma_addr_t addr, return pte; } -static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, - const enum i915_cache_level level) +static u64 gen8_pde_encode(const dma_addr_t addr, + const enum i915_cache_level level) { - gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; + u64 pde = _PAGE_PRESENT | _PAGE_RW; pde |= addr; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE; @@ -223,9 +223,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, return pde; } -#define gen8_pdpe_encode gen8_pde_encode -#define gen8_pml4e_encode gen8_pde_encode - static u64 snb_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) @@ -777,24 +774,55 @@ static void free_pd(struct i915_address_space *vm, kfree(pd); } -static void init_pd_with_page(struct i915_address_space *vm, - struct i915_page_directory * const pd, - struct i915_page_table *pt) +#define init_pd(vm, pd, to) { \ + GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); \ + fill_px((vm), (pd), gen8_pde_encode(px_dma(to), I915_CACHE_LLC)); \ + memset_p((pd)->entry, (to), 512); \ +} + +static inline void +write_dma_entry(struct i915_page_dma * const pdma, + const unsigned short pde, + const u64 encoded_entry) +{ + u64 * const vaddr = kmap_atomic(pdma->page); + + vaddr[pde] = encoded_entry; + kunmap_atomic(vaddr); +} + +static inline void +__set_pd_entry(struct i915_page_directory * const pd, + const unsigned short pde, + struct i915_page_dma * const to, + u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { - fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC)); - memset_p(pd->entry, pt, 512); + GEM_BUG_ON(atomic_read(&pd->used) > 512); + + atomic_inc(&pd->used); + pd->entry[pde] = to; + write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC)); } -static void init_pd(struct i915_address_space *vm, - struct i915_page_directory * const pd, - struct i915_page_directory * const to) +static inline void +__clear_pd_entry(struct i915_page_directory * const pd, + const unsigned short pde, + struct i915_page_dma * const to, + u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { - GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); + GEM_BUG_ON(atomic_read(&pd->used) == 0); - fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC)); - memset_p(pd->entry, to, 512); + write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC)); + pd->entry[pde] = to; + atomic_dec(&pd->used); } +#define set_pd_entry(pd, pde, to) \ + __set_pd_entry((pd), (pde), px_base(to), gen8_pde_encode) + +#define clear_pd_entry(pd, pde, to) \ + __clear_pd_entry((pd), (pde), px_base(to), gen8_pde_encode) + /* * PDE TLBs are a pain to invalidate on GEN8+. When we modify * the page table structures, we mark them dirty so that @@ -824,18 +852,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, return !atomic_sub_return(num_entries, &pt->used); } -static void gen8_ppgtt_set_pde(struct i915_address_space *vm, - struct i915_page_directory *pd, - struct i915_page_table *pt, - unsigned int pde) -{ - gen8_pde_t *vaddr; - - vaddr = kmap_atomic_px(pd); - vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); - kunmap_atomic(vaddr); -} - static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, struct i915_page_directory *pd, u64 start, u64 length) @@ -853,11 +869,7 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, spin_lock(&pd->lock); if (!atomic_read(&pt->used)) { - gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); - pd->entry[pde] = vm->scratch_pt; - - GEM_BUG_ON(!atomic_read(&pd->used)); - atomic_dec(&pd->used); + clear_pd_entry(pd, pde, vm->scratch_pt); free = true; } spin_unlock(&pd->lock); @@ -868,20 +880,6 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, return !atomic_read(&pd->used); } -static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp, - struct i915_page_directory *pd, - unsigned int pdpe) -{ - gen8_ppgtt_pdpe_t *vaddr; - - if (!pd_has_phys_page(pdp)) - return; - - vaddr = kmap_atomic_px(pdp); - vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); - kunmap_atomic(vaddr); -} - /* Removes entries from a single page dir pointer, releasing it if it's empty. * Caller can use the return value to update higher-level entries */ @@ -902,11 +900,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, spin_lock(&pdp->lock); if (!atomic_read(&pd->used)) { - gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); - pdp->entry[pdpe] = vm->scratch_pd; - - GEM_BUG_ON(!atomic_read(&pdp->used)); - atomic_dec(&pdp->used); + clear_pd_entry(pdp, pdpe, vm->scratch_pd); free = true; } spin_unlock(&pdp->lock); @@ -923,17 +917,6 @@ static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length); } -static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4, - struct i915_page_directory *pdp, - unsigned int pml4e) -{ - gen8_ppgtt_pml4e_t *vaddr; - - vaddr = kmap_atomic_px(pml4); - vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); - kunmap_atomic(vaddr); -} - /* Removes entries from a single pml4. * This is the top-level structure in 4-level page tables used on gen8+. * Empty entries are always scratch pml4e. @@ -957,8 +940,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, spin_lock(&pml4->lock); if (!atomic_read(&pdp->used)) { - gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - pml4->entry[pml4e] = vm->scratch_pdp; + clear_pd_entry(pml4, pml4e, vm->scratch_pdp); free = true; } spin_unlock(&pml4->lock); @@ -1275,7 +1257,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) } gen8_initialize_pt(vm, vm->scratch_pt); - init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt); + init_pd(vm, vm->scratch_pd, vm->scratch_pt); if (i915_vm_is_4lvl(vm)) init_pd(vm, vm->scratch_pdp, vm->scratch_pd); @@ -1298,6 +1280,11 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) enum vgt_g2v_type msg; int i; + if (create) + atomic_inc(&ppgtt->pd->used); /* never remove */ + else + atomic_dec(&ppgtt->pd->used); + if (i915_vm_is_4lvl(vm)) { const u64 daddr = px_dma(ppgtt->pd); @@ -1414,9 +1401,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, spin_lock(&pd->lock); if (pd->entry[pde] == vm->scratch_pt) { - gen8_ppgtt_set_pde(vm, pd, pt, pde); - pd->entry[pde] = pt; - atomic_inc(&pd->used); + set_pd_entry(pd, pde, pt); } else { alloc = pt; pt = pd->entry[pde]; @@ -1458,13 +1443,11 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto unwind; } - init_pd_with_page(vm, pd, vm->scratch_pt); + init_pd(vm, pd, vm->scratch_pt); spin_lock(&pdp->lock); if (pdp->entry[pdpe] == vm->scratch_pd) { - gen8_ppgtt_set_pdpe(pdp, pd, pdpe); - pdp->entry[pdpe] = pd; - atomic_inc(&pdp->used); + set_pd_entry(pdp, pdpe, pd); } else { alloc = pd; pd = pdp->entry[pdpe]; @@ -1490,12 +1473,9 @@ unwind_pd: } spin_lock(&pdp->lock); if (atomic_dec_and_test(&pd->used)) { - gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); - pdp->entry[pdpe] = vm->scratch_pd; - GEM_BUG_ON(!atomic_read(&pdp->used)); - atomic_dec(&pdp->used); GEM_BUG_ON(alloc); alloc = pd; /* defer the free to after the lock */ + clear_pd_entry(pdp, pdpe, vm->scratch_pd); } spin_unlock(&pdp->lock); unwind: @@ -1540,8 +1520,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, spin_lock(&pml4->lock); if (pml4->entry[pml4e] == vm->scratch_pdp) { - gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); - pml4->entry[pml4e] = pdp; + set_pd_entry(pml4, pml4e, pdp); } else { alloc = pdp; pdp = pml4->entry[pml4e]; @@ -1567,10 +1546,9 @@ unwind_pdp: } spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { - gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - pml4->entry[pml4e] = vm->scratch_pdp; GEM_BUG_ON(alloc); alloc = pdp; /* defer the free until after the lock */ + clear_pd_entry(pml4, pml4e, vm->scratch_pdp); } spin_unlock(&pml4->lock); unwind: @@ -1595,20 +1573,16 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) if (IS_ERR(pd)) goto unwind; - init_pd_with_page(vm, pd, vm->scratch_pt); - gen8_ppgtt_set_pdpe(pdp, pd, pdpe); - - atomic_inc(&pdp->used); + init_pd(vm, pd, vm->scratch_pt); + set_pd_entry(pdp, pdpe, pd); } - atomic_inc(&pdp->used); /* never remove */ - return 0; unwind: start -= from; gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { - gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); + clear_pd_entry(pdp, pdpe, vm->scratch_pd); free_pd(vm, pd); } atomic_set(&pdp->used, 0); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index d0e0905acbbb..57a68ef4eda7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -72,9 +72,6 @@ struct intel_gt; typedef u32 gen6_pte_t; typedef u64 gen8_pte_t; -typedef u64 gen8_pde_t; -typedef u64 gen8_ppgtt_pdpe_t; -typedef u64 gen8_ppgtt_pml4e_t; #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) -- cgit v1.2.3 From 72230b874a2071593e9f493bee072e41cc734646 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 5 Jul 2019 22:52:02 +0100 Subject: drm/i915/gtt: Tear down setup and cleanup macros for page dma We don't use common codepaths to setup and cleanup page directories vs page tables. So their setup and cleanup macros are of no use and can be removed. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190705215204.4559-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index adf6eadd5009..f011ce1ae03a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -594,8 +594,6 @@ static void cleanup_page_dma(struct i915_address_space *vm, #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) -#define setup_px(vm, px) setup_page_dma((vm), px_base(px)) -#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v)) #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v)) @@ -697,7 +695,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) if (unlikely(!pt)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_px(vm, pt))) { + if (unlikely(setup_page_dma(vm, &pt->base))) { kfree(pt); return ERR_PTR(-ENOMEM); } @@ -709,7 +707,7 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) { - cleanup_px(vm, pt); + cleanup_page_dma(vm, &pt->base); kfree(pt); } @@ -752,7 +750,7 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) if (unlikely(!pd)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_px(vm, pd))) { + if (unlikely(setup_page_dma(vm, &pd->base))) { kfree(pd); return ERR_PTR(-ENOMEM); } @@ -769,7 +767,7 @@ static void free_pd(struct i915_address_space *vm, struct i915_page_directory *pd) { if (likely(pd_has_phys_page(pd))) - cleanup_px(vm, pd); + cleanup_page_dma(vm, &pd->base); kfree(pd); } @@ -1649,7 +1647,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) } if (i915_vm_is_4lvl(&ppgtt->vm)) { - err = setup_px(&ppgtt->vm, ppgtt->pd); + err = setup_page_dma(&ppgtt->vm, &ppgtt->pd->base); if (err) goto err_free_pdp; -- cgit v1.2.3 From 73a8fdef5d6f525932a361104c0d46e5e2585c28 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 5 Jul 2019 22:52:03 +0100 Subject: drm/i915/gtt: Setup phys pages for 3lvl pdps If we setup backing phys page for 3lvl pdps, as they are not used, we will lose 5 pages per ppgtt. Trading this memory on bsw, we gain more common code paths for all gen8+ directory manipulation. And those paths are now void of checks for page directory type, making the hot paths faster. v2: don't shortcut vm (Chris) Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190705215204.4559-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 77 ++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f011ce1ae03a..0a55b0932c86 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -758,22 +758,14 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) return pd; } -static inline bool pd_has_phys_page(const struct i915_page_directory * const pd) -{ - return pd->base.page; -} - static void free_pd(struct i915_address_space *vm, struct i915_page_directory *pd) { - if (likely(pd_has_phys_page(pd))) - cleanup_page_dma(vm, &pd->base); - + cleanup_page_dma(vm, &pd->base); kfree(pd); } #define init_pd(vm, pd, to) { \ - GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); \ fill_px((vm), (pd), gen8_pde_encode(px_dma(to), I915_CACHE_LLC)); \ memset_p((pd)->entry, (to), 512); \ } @@ -1604,6 +1596,50 @@ static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) ppgtt->vm.vma_ops.clear_pages = clear_pages; } +static void init_pd_n(struct i915_address_space *vm, + struct i915_page_directory *pd, + struct i915_page_directory *to, + const unsigned int entries) +{ + const u64 daddr = gen8_pde_encode(px_dma(to), I915_CACHE_LLC); + u64 * const vaddr = kmap_atomic(pd->base.page); + + memset64(vaddr, daddr, entries); + kunmap_atomic(vaddr); + + memset_p(pd->entry, to, entries); +} + +static struct i915_page_directory * +gen8_alloc_top_pd(struct i915_address_space *vm) +{ + struct i915_page_directory *pd; + + if (i915_vm_is_4lvl(vm)) { + pd = alloc_pd(vm); + if (!IS_ERR(pd)) + init_pd(vm, pd, vm->scratch_pdp); + + return pd; + } + + /* 3lvl */ + pd = __alloc_pd(); + if (!pd) + return ERR_PTR(-ENOMEM); + + pd->entry[GEN8_3LVL_PDPES] = NULL; + + if (unlikely(setup_page_dma(vm, &pd->base))) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } + + init_pd_n(vm, pd, vm->scratch_pd, GEN8_3LVL_PDPES); + + return pd; +} + /* * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -1640,34 +1676,21 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (err) goto err_free; - ppgtt->pd = __alloc_pd(); - if (!ppgtt->pd) { - err = -ENOMEM; + ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm); + if (IS_ERR(ppgtt->pd)) { + err = PTR_ERR(ppgtt->pd); goto err_free_scratch; } if (i915_vm_is_4lvl(&ppgtt->vm)) { - err = setup_page_dma(&ppgtt->vm, &ppgtt->pd->base); - if (err) - goto err_free_pdp; - - init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp); - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; } else { - /* - * We don't need to setup dma for top level pdp, only - * for entries. So point entries to scratch. - */ - memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd, - GEN8_3LVL_PDPES); - if (intel_vgpu_active(i915)) { err = gen8_preallocate_top_level_pdp(ppgtt); if (err) - goto err_free_pdp; + goto err_free_pd; } ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; @@ -1682,7 +1705,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) return ppgtt; -err_free_pdp: +err_free_pd: free_pd(&ppgtt->vm, ppgtt->pd); err_free_scratch: gen8_free_scratch(&ppgtt->vm); -- cgit v1.2.3 From 50b38bc4d39aa6fa5639aabcccfb828c19b89278 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 5 Jul 2019 22:52:04 +0100 Subject: drm/i915/gtt: Introduce release_pd_entry By encapsulating the locking upper level and used check for entry into a helper function, we can use it in all callsites. v2: get rid of atomic_reads on lower level clears (Chris) Cc: Chris Wilson Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190705215204.4559-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 114 +++++++++++++----------------------- 1 file changed, 42 insertions(+), 72 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0a55b0932c86..236c964dd761 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -811,7 +811,25 @@ __clear_pd_entry(struct i915_page_directory * const pd, __set_pd_entry((pd), (pde), px_base(to), gen8_pde_encode) #define clear_pd_entry(pd, pde, to) \ - __clear_pd_entry((pd), (pde), px_base(to), gen8_pde_encode) + __clear_pd_entry((pd), (pde), (to), gen8_pde_encode) + +static bool +release_pd_entry(struct i915_page_directory * const pd, + const unsigned short pde, + atomic_t *counter, + struct i915_page_dma * const scratch) +{ + bool free = false; + + spin_lock(&pd->lock); + if (atomic_dec_and_test(counter)) { + clear_pd_entry(pd, pde, scratch); + free = true; + } + spin_unlock(&pd->lock); + + return free; +} /* * PDE TLBs are a pain to invalidate on GEN8+. When we modify @@ -827,11 +845,11 @@ static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt) /* Removes entries from a single page table, releasing it if it's empty. * Caller can use the return value to update higher-level entries. */ -static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, +static void gen8_ppgtt_clear_pt(const struct i915_address_space *vm, struct i915_page_table *pt, u64 start, u64 length) { - unsigned int num_entries = gen8_pte_count(start, length); + const unsigned int num_entries = gen8_pte_count(start, length); gen8_pte_t *vaddr; vaddr = kmap_atomic_px(pt); @@ -839,10 +857,11 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, kunmap_atomic(vaddr); GEM_BUG_ON(num_entries > atomic_read(&pt->used)); - return !atomic_sub_return(num_entries, &pt->used); + + atomic_sub(num_entries, &pt->used); } -static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, +static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, struct i915_page_directory *pd, u64 start, u64 length) { @@ -850,30 +869,20 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, u32 pde; gen8_for_each_pde(pt, pd, start, length, pde) { - bool free = false; - GEM_BUG_ON(pt == vm->scratch_pt); - if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) - continue; - - spin_lock(&pd->lock); - if (!atomic_read(&pt->used)) { - clear_pd_entry(pd, pde, vm->scratch_pt); - free = true; - } - spin_unlock(&pd->lock); - if (free) + atomic_inc(&pt->used); + gen8_ppgtt_clear_pt(vm, pt, start, length); + if (release_pd_entry(pd, pde, &pt->used, + px_base(vm->scratch_pt))) free_pt(vm, pt); } - - return !atomic_read(&pd->used); } /* Removes entries from a single page dir pointer, releasing it if it's empty. * Caller can use the return value to update higher-level entries */ -static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, +static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, struct i915_page_directory * const pdp, u64 start, u64 length) { @@ -881,24 +890,14 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, unsigned int pdpe; gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - bool free = false; - GEM_BUG_ON(pd == vm->scratch_pd); - if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) - continue; - - spin_lock(&pdp->lock); - if (!atomic_read(&pd->used)) { - clear_pd_entry(pdp, pdpe, vm->scratch_pd); - free = true; - } - spin_unlock(&pdp->lock); - if (free) + atomic_inc(&pd->used); + gen8_ppgtt_clear_pd(vm, pd, start, length); + if (release_pd_entry(pdp, pdpe, &pd->used, + px_base(vm->scratch_pd))) free_pd(vm, pd); } - - return !atomic_read(&pdp->used); } static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, @@ -922,19 +921,12 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, GEM_BUG_ON(!i915_vm_is_4lvl(vm)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - bool free = false; GEM_BUG_ON(pdp == vm->scratch_pdp); - if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) - continue; - - spin_lock(&pml4->lock); - if (!atomic_read(&pdp->used)) { - clear_pd_entry(pml4, pml4e, vm->scratch_pdp); - free = true; - } - spin_unlock(&pml4->lock); - if (free) + atomic_inc(&pdp->used); + gen8_ppgtt_clear_pdp(vm, pdp, start, length); + if (release_pd_entry(pml4, pml4e, &pdp->used, + px_base(vm->scratch_pdp))) free_pd(vm, pdp); } } @@ -1457,17 +1449,8 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto out; unwind_pd: - if (alloc) { - free_pd(vm, alloc); - alloc = NULL; - } - spin_lock(&pdp->lock); - if (atomic_dec_and_test(&pd->used)) { - GEM_BUG_ON(alloc); - alloc = pd; /* defer the free to after the lock */ - clear_pd_entry(pdp, pdpe, vm->scratch_pd); - } - spin_unlock(&pdp->lock); + if (release_pd_entry(pdp, pdpe, &pd->used, px_base(vm->scratch_pd))) + free_pd(vm, pd); unwind: gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); out: @@ -1530,17 +1513,8 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto out; unwind_pdp: - if (alloc) { - free_pd(vm, alloc); - alloc = NULL; - } - spin_lock(&pml4->lock); - if (atomic_dec_and_test(&pdp->used)) { - GEM_BUG_ON(alloc); - alloc = pdp; /* defer the free until after the lock */ - clear_pd_entry(pml4, pml4e, vm->scratch_pdp); - } - spin_unlock(&pml4->lock); + if (release_pd_entry(pml4, pml4e, &pdp->used, px_base(vm->scratch_pdp))) + free_pd(vm, pdp); unwind: gen8_ppgtt_clear_4lvl(vm, from, start - from); out: @@ -1570,11 +1544,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) return 0; unwind: - start -= from; - gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { - clear_pd_entry(pdp, pdpe, vm->scratch_pd); - free_pd(vm, pd); - } + gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); atomic_set(&pdp->used, 0); return -ENOMEM; } -- cgit v1.2.3 From 15e7facb7b428f5f0dac3ee85ecb519a46caaa34 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 7 Jul 2019 16:11:35 +0100 Subject: drm/i915: Pull assert_forcewake_active() underneath the lock Make no assumption that something in the background is not acquiring the fw_domain -- but we still do not track owner so assume that any active domain is intended by the caller. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190707151135.11700-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index bb9e0da30e94..5f0367fd3200 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -747,6 +747,8 @@ void assert_forcewakes_active(struct intel_uncore *uncore, if (!uncore->funcs.force_wake_get) return; + spin_lock_irq(&uncore->lock); + assert_rpm_wakelock_held(uncore->rpm); fw_domains &= uncore->fw_domains; @@ -770,6 +772,8 @@ void assert_forcewakes_active(struct intel_uncore *uncore, domain->id, actual)) break; } + + spin_unlock_irq(&uncore->lock); } /* We give fast paths for the really cool registers */ -- cgit v1.2.3 From 77adbd8fbf3a10e149a76abee6384a4968ed318e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Jul 2019 16:49:14 +0100 Subject: drm/i915: Explicitly track active fw_domain timers Stop guessing over whether we have an extra wakeref held by the delayed fw put, and track it explicitly for the sake of debug. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190708154914.26850-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.c | 15 ++++++++------- drivers/gpu/drm/i915/intel_uncore.h | 1 + 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 5f0367fd3200..4015e964c6fc 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -78,6 +78,8 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d) static inline void fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) { + GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask); + d->uncore->fw_domains_timer |= d->mask; d->wake_count++; hrtimer_start_range_ns(&d->timer, NSEC_PER_MSEC, @@ -353,9 +355,10 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) return HRTIMER_RESTART; spin_lock_irqsave(&uncore->lock, irqflags); - if (WARN_ON(domain->wake_count == 0)) - domain->wake_count++; + uncore->fw_domains_timer &= ~domain->mask; + + GEM_BUG_ON(!domain->wake_count); if (--domain->wake_count == 0) uncore->funcs.force_wake_put(uncore, domain->mask); @@ -673,8 +676,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, fw_domains &= uncore->fw_domains; for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { - if (WARN_ON(domain->wake_count == 0)) - continue; + GEM_BUG_ON(!domain->wake_count); if (--domain->wake_count) { domain->active = true; @@ -764,7 +766,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore, unsigned int actual = READ_ONCE(domain->wake_count); unsigned int expect = 1; - if (hrtimer_active(&domain->timer) && READ_ONCE(domain->active)) + if (uncore->fw_domains_timer & domain->mask) expect++; /* pending automatic release */ if (WARN(actual < expect, @@ -1160,8 +1162,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore, static inline void __force_wake_auto(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - if (WARN_ON(!fw_domains)) - return; + GEM_BUG_ON(!fw_domains); /* Turn on all requested but inactive supported forcewake domains. */ fw_domains &= uncore->fw_domains; diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 7108475d9b24..2f6ffa309669 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -123,6 +123,7 @@ struct intel_uncore { enum forcewake_domains fw_domains; enum forcewake_domains fw_domains_active; + enum forcewake_domains fw_domains_timer; enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ struct intel_uncore_forcewake_domain { -- cgit v1.2.3 From cf3bd1a0f53eae790a83af445c7795b4ce2e1a9b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Jul 2019 16:23:21 +0100 Subject: drm/i915/selftests: Reorder error cleanup for whitelist checking Reorder the error paths so that we unwind all the locals from any error path and so avoid setting off divers alarum in case we find an error in case we find an error. References: https://bugs.freedesktop.org/show_bug.cgi?id=111048 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190708152321.22187-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 35 ++++++++++++++------------ 1 file changed, 19 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index b933d831eeb1..fa01ea7855de 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -287,7 +287,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, const char *name) { struct drm_i915_private *i915 = engine->i915; - struct i915_gem_context *ctx; + struct i915_gem_context *ctx, *tmp; struct igt_spinner spin; intel_wakeref_t wakeref; int err; @@ -295,56 +295,59 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", engine->whitelist.count, name); - err = igt_spinner_init(&spin, i915); - if (err) - return err; - ctx = kernel_context(i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); + err = igt_spinner_init(&spin, i915); + if (err) + goto out_ctx; + err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *before* %s reset!\n", name); - goto out; + goto out_spin; } err = switch_to_scratch_context(engine, &spin); if (err) - goto out; + goto out_spin; with_intel_runtime_pm(&i915->runtime_pm, wakeref) err = reset(engine); igt_spinner_end(&spin); - igt_spinner_fini(&spin); if (err) { pr_err("%s reset failed\n", name); - goto out; + goto out_spin; } err = check_whitelist(ctx, engine); if (err) { pr_err("Whitelist not preserved in context across %s reset!\n", name); - goto out; + goto out_spin; } + tmp = kernel_context(i915); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto out_spin; + } kernel_context_close(ctx); - - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ctx = tmp; err = check_whitelist(ctx, engine); if (err) { pr_err("Invalid whitelist *after* %s reset in fresh context!\n", name); - goto out; + goto out_spin; } -out: +out_spin: + igt_spinner_fini(&spin); +out_ctx: kernel_context_close(ctx); return err; } -- cgit v1.2.3 From a17ce803dffa8e34fda39f8af721a4005c451fef Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Mon, 8 Jul 2019 13:09:06 -0700 Subject: drm/i915: Update DRIVER_DATE to 20190708 Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c35de1380da9..a9381e404fd5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -99,8 +99,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20190619" -#define DRIVER_TIMESTAMP 1560947544 +#define DRIVER_DATE "20190708" +#define DRIVER_TIMESTAMP 1562616546 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- cgit v1.2.3 From baf08ed50a614f21398f30e11d4b6ba2ffecc36e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Jul 2019 22:55:24 +0100 Subject: drm/i915/selftests: Set igt_spinner.gt for early exit Set up a default gt pointer for an early cleanup of igt_spinnter, before a request is created and igt_spinner.gt set to the active engine's. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190708215524.31639-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/igt_spinner.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 0c1f65262a63..89b6552a6497 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -19,6 +19,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) memset(spin, 0, sizeof(*spin)); spin->i915 = i915; + spin->gt = &i915->gt; spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(spin->hws)) { -- cgit v1.2.3 From cb6d7c7dc7ff8cace666ddec66334117a6068ce2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Jul 2019 15:03:27 +0100 Subject: drm/i915/userptr: Acquire the page lock around set_page_dirty() set_page_dirty says: For pages with a mapping this should be done under the page lock for the benefit of asynchronous memory errors who prefer a consistent dirty state. This rule can be broken in some special cases, but should be better not to. Under those rules, it is only safe for us to use the plain set_page_dirty calls for shmemfs/anonymous memory. Userptr may be used with real mappings and so needs to use the locked version (set_page_dirty_lock). Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203317 Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl") References: 6dcc693bc57f ("ext4: warn when page is dirtied without buffers") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: stable@vger.kernel.org Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190708140327.26825-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 16ccec7fb7da..32d208ede343 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -665,7 +665,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) - set_page_dirty(page); + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + */ + set_page_dirty_lock(page); mark_page_accessed(page); put_page(page); -- cgit v1.2.3 From cbcec57e9dd75c5bb857ead091e471ecb25afd1d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Jul 2019 12:30:38 +0100 Subject: drm/i915/selftests: Fill in a little more of the dummy fence Initialise the dma_fence innards in preparation for making dma_fence_signal() always check the callback list. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190708113038.19251-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 11f490502ca6..672bdaa66540 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -859,6 +859,10 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine) i915_sw_fence_init(&rq->submit, dummy_notify); set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); + spin_lock_init(&rq->lock); + rq->fence.lock = &rq->lock; + INIT_LIST_HEAD(&rq->fence.cb_list); + return rq; } -- cgit v1.2.3 From 7c6d6867e9723e9548ed5ae0c6b070652f4d43e3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 5 Jul 2019 13:43:24 +0100 Subject: drm/i915/gt: Apply RCS workarounds to the render class Treat all render engines to the RCS workarounds, simply to avoid using engine->id when we are trying to think in terms of classes. Signed-off-by: Chris Wilson Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20190705124325.14270-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 53fe1eb7c7bd..0342b7d761b8 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1354,7 +1354,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) return; - if (engine->id == RCS0) + if (engine->class == RENDER_CLASS) rcs_engine_wa_init(engine, wal); else xcs_engine_wa_init(engine, wal); -- cgit v1.2.3 From 4a5fdc962fc4ce143791496def78c2acc3e51810 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 5 Jul 2019 13:43:25 +0100 Subject: drm/i915/gt: Remove presumption of RCS0 We now track features correctly instead of probing i915->engine[RCS0] which is much more flexible and avoids any nasty surprises. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190705124325.14270-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index df5932f5f578..bdf279fa3b2e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -448,12 +448,6 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) if (WARN_ON(mask != engine_mask)) device_info->engine_mask = mask; - /* We always presume we have at least RCS available for later probing */ - if (WARN_ON(!HAS_ENGINE(i915, RCS0))) { - err = -ENODEV; - goto cleanup; - } - RUNTIME_INFO(i915)->num_engines = hweight32(mask); intel_gt_check_and_clear_faults(&i915->gt); -- cgit v1.2.3 From 4fb76782ad012d657df51f437a0dfcebce6aea53 Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Mon, 8 Jul 2019 17:03:19 +0530 Subject: drm/i915/hdcp: debug logs for sink related failures Adding few more debug logs to identify the sink specific HDCP failures along with a out of mem failure. v2: Capturing the Bug and a-b Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110991 Signed-off-by: Ramalingam C Signed-off-by: Uma Shankar cc: Daniel Vetter Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20190708113319.2517-1-ramalingam.c@intel.com --- drivers/gpu/drm/i915/display/intel_hdcp.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index bc3a94d491c4..a78139f9e847 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -523,12 +523,16 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) * authentication. */ num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); - if (num_downstream == 0) + if (num_downstream == 0) { + DRM_DEBUG_KMS("Repeater with zero downstream devices\n"); return -EINVAL; + } ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); - if (!ksv_fifo) + if (!ksv_fifo) { + DRM_DEBUG_KMS("Out of mem: ksv_fifo\n"); return -ENOMEM; + } ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); if (ret) @@ -1206,8 +1210,10 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (ret < 0) return ret; - if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) + if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) { + DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n"); return -EINVAL; + } hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); -- cgit v1.2.3 From f384e48d7641ef284ae760d150c80920ffc3adbb Mon Sep 17 00:00:00 2001 From: Vandita Kulkarni Date: Thu, 27 Jun 2019 20:54:57 +0530 Subject: drm/i915: Add icl mipi dsi properties MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add scaling and panel orientation properties for icl mipi dsi. v2: Add platform specific function (Ville) v3: Remove redundant check and update scaler call (Jani, Ville) Signed-off-by: Vandita Kulkarni Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190627152457.26146-1-vandita.kulkarni@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/i915/display/vlv_dsi.c | 4 ++-- 2 files changed, 24 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index b8673debf932..3cf95c34143c 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1508,6 +1508,26 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi) intel_dsi_log_params(intel_dsi); } +static void icl_dsi_add_properties(struct intel_connector *connector) +{ + u32 allowed_scalers; + + allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | + BIT(DRM_MODE_SCALE_FULLSCREEN) | + BIT(DRM_MODE_SCALE_CENTER); + + drm_connector_attach_scaling_mode_property(&connector->base, + allowed_scalers); + + connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; + + connector->base.display_info.panel_orientation = + intel_dsi_get_panel_orientation(connector); + drm_connector_init_panel_orientation_property(&connector->base, + connector->panel.fixed_mode->hdisplay, + connector->panel.fixed_mode->vdisplay); +} + void icl_dsi_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; @@ -1601,6 +1621,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) } icl_dphy_param_init(intel_dsi); + + icl_dsi_add_properties(intel_connector); return; err: diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index e272d826210a..c8002ffd29e7 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1644,7 +1644,7 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector) return intel_dsi_get_panel_orientation(connector); } -static void intel_dsi_add_properties(struct intel_connector *connector) +static void vlv_dsi_add_properties(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1983,7 +1983,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_panel_setup_backlight(connector, INVALID_PIPE); - intel_dsi_add_properties(intel_connector); + vlv_dsi_add_properties(intel_connector); return; -- cgit v1.2.3 From c598a66491cff68237cb3221fa444e4ca9f0d7f7 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 21:03:08 +0300 Subject: drm/i915/sdvo: Use named initializers for the SDVO command names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use named initializers to make it easier to associate the SDVO debug prints with the SDVO command defines. Also switch to using ARRAY_SIZE() instead of assuming that SDVO_CMD_STATUS_SCALING_NOT_SUPP is the last command type. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190619180312.31817-2-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_sdvo.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 0860ae36bb87..e144c1203828 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -429,13 +429,13 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, } static const char * const cmd_status_names[] = { - "Power on", - "Success", - "Not supported", - "Invalid arg", - "Pending", - "Target not specified", - "Scaling not supported" + [SDVO_CMD_STATUS_POWER_ON] = "Power on", + [SDVO_CMD_STATUS_SUCCESS] = "Success", + [SDVO_CMD_STATUS_NOTSUPP] = "Not supported", + [SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg", + [SDVO_CMD_STATUS_PENDING] = "Pending", + [SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified", + [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported", }; static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, @@ -562,7 +562,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, #define BUF_PRINT(args...) \ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args) - if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) + if (status < ARRAY_SIZE(cmd_status_names)) BUF_PRINT("(%s)", cmd_status_names[status]); else BUF_PRINT("(??? %d)", status); -- cgit v1.2.3 From 8a9c8028399e47ced073e13890ce8912c655ef7e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 21:03:09 +0300 Subject: drm/i915/sdvo: Remove duplicate SET_INPUT_TIMINGS_PART1 cmd name string MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sdvo_cmd_names[] contains two entries for SET_INPUT_TIMINGS_PART1. Remove one of them. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190619180312.31817-3-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_sdvo.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index e144c1203828..b3d51d7c076f 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -299,7 +299,6 @@ static const struct _sdvo_cmd_name { SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), -- cgit v1.2.3 From 7b8062ea60d9e207e42c58ff714c911b01f4c99a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 21:03:10 +0300 Subject: drm/i915/sdvo: Shrink sdvo_cmd_names[] strings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the redundant "SDVO_CMD_" prefix from the command name strings in sdvo_cmd_names[]. While at it throw away the unused struct name, and undef SDVO_CMD_NAME_ENTRY() when we're done. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190619180312.31817-4-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_sdvo.c | 219 +++++++++++++++--------------- 1 file changed, 111 insertions(+), 108 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index b3d51d7c076f..bde11c03cd5a 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -274,124 +274,127 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch) return false; } -#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} +#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ } + /** Mapping of command numbers to names, for debug output */ -static const struct _sdvo_cmd_name { +static const struct { u8 cmd; const char *name; } __attribute__ ((packed)) sdvo_cmd_names[] = { - SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS), + SDVO_CMD_NAME_ENTRY(RESET), + SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS), + SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV), + SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS), + SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS), + SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP), + SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS), + SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT), + SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG), + SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE), + SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT), + SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT), + SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1), + SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2), + SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING), + SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1), + SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2), + SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS), + SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS), + SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES), + SDVO_CMD_NAME_ENTRY(GET_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE), + SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH), + SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT), + SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT), + SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS), /* Add the op code for SDVO enhancements */ - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS), + SDVO_CMD_NAME_ENTRY(GET_HPOS), + SDVO_CMD_NAME_ENTRY(SET_HPOS), + SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS), + SDVO_CMD_NAME_ENTRY(GET_VPOS), + SDVO_CMD_NAME_ENTRY(SET_VPOS), + SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION), + SDVO_CMD_NAME_ENTRY(GET_SATURATION), + SDVO_CMD_NAME_ENTRY(SET_SATURATION), + SDVO_CMD_NAME_ENTRY(GET_MAX_HUE), + SDVO_CMD_NAME_ENTRY(GET_HUE), + SDVO_CMD_NAME_ENTRY(SET_HUE), + SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST), + SDVO_CMD_NAME_ENTRY(GET_CONTRAST), + SDVO_CMD_NAME_ENTRY(SET_CONTRAST), + SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS), + SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H), + SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V), + SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER), + SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE), + SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D), + SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS), + SDVO_CMD_NAME_ENTRY(GET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(SET_SHARPNESS), + SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL), + SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER), + SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER), /* HDMI op code */ - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA), - SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), + SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE), + SDVO_CMD_NAME_ENTRY(GET_ENCODE), + SDVO_CMD_NAME_ENTRY(SET_ENCODE), + SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI), + SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI), + SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP), + SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY), + SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY), + SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER), + SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT), + SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT), + SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX), + SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX), + SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO), + SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT), + SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT), + SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE), + SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE), + SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA), + SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA), }; +#undef SDVO_CMD_NAME_ENTRY + #define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, -- cgit v1.2.3 From a46f4e9e5ae99cc6da81d84b6eb4ec7c19a81a2f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 21:03:12 +0300 Subject: drm/i915/sdvo: Add helpers to get the cmd/status string MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add sdvo_cmd_name() and sdvo_cmd_status() helpers to avoid bothering the callers with the implementation details of the storage for these strings. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190619180312.31817-6-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_sdvo.c | 40 +++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index bde11c03cd5a..651bf26bd1a3 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -395,11 +395,24 @@ static const struct { #undef SDVO_CMD_NAME_ENTRY +static const char *sdvo_cmd_name(u8 cmd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { + if (cmd == sdvo_cmd_names[i].cmd) + return sdvo_cmd_names[i].name; + } + + return NULL; +} + #define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC") static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len) { + const char *cmd_name; int i, pos = 0; #define BUF_LEN 256 char buffer[BUF_LEN]; @@ -414,15 +427,12 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd, for (; i < 8; i++) { BUF_PRINT(" "); } - for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) { - if (cmd == sdvo_cmd_names[i].cmd) { - BUF_PRINT("(%s)", sdvo_cmd_names[i].name); - break; - } - } - if (i == ARRAY_SIZE(sdvo_cmd_names)) { + + cmd_name = sdvo_cmd_name(cmd); + if (cmd_name) + BUF_PRINT("(%s)", cmd_name); + else BUF_PRINT("(%02X)", cmd); - } BUG_ON(pos >= BUF_LEN - 1); #undef BUF_PRINT #undef BUF_LEN @@ -440,6 +450,14 @@ static const char * const cmd_status_names[] = { [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported", }; +static const char *sdvo_cmd_status(u8 status) +{ + if (status < ARRAY_SIZE(cmd_status_names)) + return cmd_status_names[status]; + else + return NULL; +} + static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, const void *args, int args_len, bool unlocked) @@ -518,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, void *response, int response_len) { + const char *cmd_status; u8 retry = 15; /* 5 quick checks, followed by 10 long checks */ u8 status; int i, pos = 0; @@ -564,8 +583,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, #define BUF_PRINT(args...) \ pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args) - if (status < ARRAY_SIZE(cmd_status_names)) - BUF_PRINT("(%s)", cmd_status_names[status]); + cmd_status = sdvo_cmd_status(status); + if (cmd_status) + BUF_PRINT("(%s)", cmd_status); else BUF_PRINT("(??? %d)", status); -- cgit v1.2.3 From 5c28e3a567fed9c22cb79f01294b1aaefee344fd Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 8 Jul 2019 17:07:34 +0300 Subject: drm/i915: Clear the shared PLL from the put_dplls() hook MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For symmetry with the get_dplls() hook which sets the shared_dpll pointer clear the same pointer from the put_dplls() hook. While at it also constify the old crtc state. v2: - Constify the old crtc state. (Ville) Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190708140735.20198-1-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 2 -- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 20 ++++++++++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f09eda75711a..f07081815b80 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13307,8 +13307,6 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state) if (!needs_modeset(new_crtc_state)) continue; - new_crtc_state->shared_dpll = NULL; - intel_release_shared_dplls(state, crtc); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 67cfe836286e..5c18f9012062 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -323,13 +323,17 @@ static void intel_unreference_shared_dpll(struct intel_atomic_state *state, static void intel_put_dpll(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct intel_crtc_state *crtc_state = + const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + new_crtc_state->shared_dpll = NULL; - if (!crtc_state->shared_dpll) + if (!old_crtc_state->shared_dpll) return; - intel_unreference_shared_dpll(state, crtc, crtc_state->shared_dpll); + intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll); } /** @@ -3015,13 +3019,17 @@ static bool icl_get_dplls(struct intel_atomic_state *state, static void icl_put_dplls(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct intel_crtc_state *crtc_state = + const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); enum icl_port_dpll_id id; + new_crtc_state->shared_dpll = NULL; + for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) { - struct icl_port_dpll *port_dpll = - &crtc_state->icl_port_dplls[id]; + const struct icl_port_dpll *port_dpll = + &old_crtc_state->icl_port_dplls[id]; if (!port_dpll->pll) continue; -- cgit v1.2.3 From f7ddc80ecb0e19e2c706747d1540b7699b4198d3 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 8 Jul 2019 17:07:35 +0300 Subject: drm/i915/icl: Clear the shared port PLLs from the new crtc state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For consistency clear the icl_port_dplls from the new crtc state, when releasing the DPLLs from the old crtc state. Leaving them set could result in releasing the same PLLs multiple times from the same CRTC state incorrectly (if the same CRTC was first used for a TypeC port then for a combo PHY port). Leaving the stale pointers behind happens not to cause a problem atm (since the incorrect releasing will be a NOP), but we need to fix that for consistency. Signed-off-by: Imre Deak Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190708140735.20198-2-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 5c18f9012062..30d7500eb66c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3028,15 +3028,17 @@ static void icl_put_dplls(struct intel_atomic_state *state, new_crtc_state->shared_dpll = NULL; for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) { - const struct icl_port_dpll *port_dpll = + const struct icl_port_dpll *old_port_dpll = &old_crtc_state->icl_port_dplls[id]; + struct icl_port_dpll *new_port_dpll = + &new_crtc_state->icl_port_dplls[id]; - if (!port_dpll->pll) - continue; + new_port_dpll->pll = NULL; - intel_unreference_shared_dpll(state, crtc, port_dpll->pll); + if (!old_port_dpll->pll) + continue; - /* FIXME: Clear the icl_port_dplls from the new crtc state */ + intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll); } } -- cgit v1.2.3 From 681c774d349018c0f47a770133b952f50328d957 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 9 Jul 2019 09:17:18 +0100 Subject: drm/i915/userptr: Don't mark readonly objects as dirty If we map an object as readonly into the GTT, we know that the GPU cannot have written to it and so the object is not dirty and we don't need to flush the writes back to the system. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190709081718.27843-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 32d208ede343..b9d2bb15e4a6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -663,6 +663,14 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); + /* + * We always mark objects as dirty when they are used by the GPU, + * just in case. However, if we set the vma as being read-only we know + * that the object will never have been written to. + */ + if (i915_gem_object_is_readonly(obj)) + obj->mm.dirty = false; + for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) /* -- cgit v1.2.3 From a5af1df716c123a09341351008fc497bea137b77 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Tue, 9 Jul 2019 15:33:39 +0300 Subject: drm/i915/perf: ensure we keep a reference on the driver The i915 perf stream has its own file descriptor and is tied to reference of the driver. We haven't taken care of keep the driver alive. Signed-off-by: Lionel Landwerlin Suggested-by: Chris Wilson Fixes: eec688e1420da5 ("drm/i915: Add i915 perf infrastructure") Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190709123351.5645-2-lionel.g.landwerlin@intel.com --- drivers/gpu/drm/i915/i915_perf.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 357e63beb373..27842e7bcfed 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2517,6 +2517,9 @@ static int i915_perf_release(struct inode *inode, struct file *file) i915_perf_destroy_locked(stream); mutex_unlock(&dev_priv->perf.lock); + /* Release the reference the perf stream kept on the driver. */ + drm_dev_put(&dev_priv->drm); + return 0; } @@ -2652,6 +2655,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, if (!(param->flags & I915_PERF_FLAG_DISABLED)) i915_perf_enable_locked(stream); + /* Take a reference on the driver that will be kept with stream_fd + * until its release. + */ + drm_dev_get(&dev_priv->drm); + return stream_fd; err_open: -- cgit v1.2.3 From 46c5847e3d97f5afced32f7474e7daea819da7c9 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Tue, 9 Jul 2019 15:33:43 +0300 Subject: drm/i915: enumerate scratch fields We have a bunch of offsets in the scratch buffer. As we're about to add some more, let's group all of the offsets in a common location. Signed-off-by: Lionel Landwerlin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190709123351.5645-6-lionel.g.landwerlin@intel.com --- drivers/gpu/drm/i915/gt/intel_gt.h | 5 +++-- drivers/gpu/drm/i915/gt/intel_gt_types.h | 15 +++++++++++++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 18 ++++++++++++----- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 31 ++++++++++++++++++++---------- 4 files changed, 52 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index cf3c6cecc8ee..1093dcf36f63 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -24,9 +24,10 @@ void intel_gt_chipset_flush(struct intel_gt *gt); int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size); void intel_gt_fini_scratch(struct intel_gt *gt); -static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt) +static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, + enum intel_gt_scratch_field field) { - return i915_ggtt_offset(gt->scratch); + return i915_ggtt_offset(gt->scratch) + field; } #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 37da428bef62..3563ce970102 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -60,4 +60,19 @@ struct intel_gt { u32 pm_ier; }; +enum intel_gt_scratch_field { + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, + + /* 8 bytes */ + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, + +}; + #endif /* __INTEL_GT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index e1ae1399c72b..dec735405195 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1782,7 +1782,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) /* NB no one else is allowed to scribble over scratch + 256! */ *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = intel_gt_scratch_offset(engine->gt) + 256; + *batch++ = intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); *batch++ = 0; *batch++ = MI_LOAD_REGISTER_IMM(1); @@ -1796,12 +1797,19 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); - *batch++ = intel_gt_scratch_offset(engine->gt) + 256; + *batch++ = intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); *batch++ = 0; return batch; } +static u32 slm_offset(struct intel_engine_cs *engine) +{ + return intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA); +} + /* * Typically we only have one indirect_ctx and per_ctx batch buffer which are * initialized at the beginning and shared across all contexts but this field @@ -1833,8 +1841,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE, - intel_gt_scratch_offset(engine->gt) + - 2 * CACHELINE_BYTES); + slm_offset(engine)); *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -2528,7 +2535,8 @@ static int gen8_emit_flush_render(struct i915_request *request, { struct intel_engine_cs *engine = request->engine; u32 scratch_addr = - intel_gt_scratch_offset(engine->gt) + 2 * CACHELINE_BYTES; + intel_gt_scratch_offset(engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); bool vf_flush_wa = false, dc_flush_wa = false; u32 *cs, flags = 0; int len; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index b33cfc56f623..a98652e4055c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -76,7 +76,8 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode) *cs++ = cmd; while (num_store_dw--) { *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; - *cs++ = intel_gt_scratch_offset(rq->engine->gt); + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); *cs++ = 0; } *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; @@ -149,7 +150,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) */ if (mode & EMIT_INVALIDATE) { *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; - *cs++ = intel_gt_scratch_offset(rq->engine->gt) | + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; *cs++ = 0; @@ -158,7 +160,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) *cs++ = MI_FLUSH; *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; - *cs++ = intel_gt_scratch_offset(rq->engine->gt) | + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; *cs++ = 0; @@ -212,7 +215,8 @@ static int gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) { u32 scratch_addr = - intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES; + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); u32 *cs; cs = intel_ring_begin(rq, 6); @@ -246,7 +250,8 @@ static int gen6_render_ring_flush(struct i915_request *rq, u32 mode) { u32 scratch_addr = - intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES; + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); u32 *cs, flags = 0; int ret; @@ -304,7 +309,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = GFX_OP_PIPE_CONTROL(4); *cs++ = PIPE_CONTROL_QW_WRITE; - *cs++ = intel_gt_scratch_offset(rq->engine->gt) | + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT) | PIPE_CONTROL_GLOBAL_GTT; *cs++ = 0; @@ -349,7 +355,8 @@ static int gen7_render_ring_flush(struct i915_request *rq, u32 mode) { u32 scratch_addr = - intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES; + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); u32 *cs, flags = 0; /* @@ -1078,7 +1085,9 @@ i830_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, unsigned int dispatch_flags) { - u32 *cs, cs_offset = intel_gt_scratch_offset(rq->engine->gt); + u32 *cs, cs_offset = + intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); @@ -1522,7 +1531,8 @@ static int flush_pd_dir(struct i915_request *rq) /* Stall until the page table load is complete */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); - *cs++ = intel_gt_scratch_offset(rq->engine->gt); + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); *cs++ = MI_NOOP; intel_ring_advance(rq, cs); @@ -1638,7 +1648,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) /* Insert a delay before the next switch! */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(last_reg); - *cs++ = intel_gt_scratch_offset(rq->engine->gt); + *cs++ = intel_gt_scratch_offset(rq->engine->gt, + INTEL_GT_SCRATCH_FIELD_DEFAULT); *cs++ = MI_NOOP; } *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; -- cgit v1.2.3 From 2a98f4e65bba2db83489aaadd3f9db26547cbd35 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Tue, 9 Jul 2019 17:42:27 +0100 Subject: drm/i915: add infrastructure to hold off preemption on a request We want to set this flag in the next commit on requests containing perf queries so that the result of the perf query can just be a delta of global counters, rather than doing post processing of the OA buffer. Signed-off-by: Lionel Landwerlin Reviewed-by: Chris Wilson [ickle: add basic selftest for nopreempt] Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190709164227.25859-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 11 +++ drivers/gpu/drm/i915/gt/selftest_lrc.c | 109 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_priolist_types.h | 10 +++ drivers/gpu/drm/i915/i915_request.c | 4 +- drivers/gpu/drm/i915/i915_request.h | 15 +++- drivers/gpu/drm/i915/intel_guc_submission.c | 13 +++- drivers/gpu/drm/i915/intel_pm.c | 5 +- 7 files changed, 161 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index dec735405195..19ce8eb5e5c9 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -258,6 +258,17 @@ static int effective_prio(const struct i915_request *rq) { int prio = rq_prio(rq); + /* + * If this request is special and must not be interrupted at any + * cost, so be it. Note we are only checking the most recent request + * in the context and so may be masking an earlier vip request. It + * is hoped that under the conditions where nopreempt is used, this + * will not matter (i.e. all requests to that context will be + * nopreempt for as long as desired). + */ + if (i915_request_has_nopreempt(rq)) + prio = I915_PRIORITY_UNPREEMPTABLE; + /* * On unwinding the active request, we give it a priority bump * if it has completed waiting on any semaphore. If we know that diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 672bdaa66540..b9b881ab8e7c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -721,6 +721,114 @@ static void preempt_client_fini(struct preempt_client *c) kernel_context_close(c->ctx); } +static int live_nopreempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct preempt_client a, b; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + + /* + * Verify that we can disable preemption for an individual request + * that may be being observed and not want to be interrupted. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + if (preempt_client_init(i915, &a)) + goto err_unlock; + if (preempt_client_init(i915, &b)) + goto err_client_a; + b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); + + for_each_engine(engine, i915, id) { + struct i915_request *rq_a, *rq_b; + + if (!intel_engine_has_preemption(engine)) + continue; + + engine->execlists.preempt_hang.count = 0; + + rq_a = igt_spinner_create_request(&a.spin, + a.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + goto err_client_b; + } + + /* Low priority client, but unpreemptable! */ + rq_a->flags |= I915_REQUEST_NOPREEMPT; + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + goto err_wedged; + } + + rq_b = igt_spinner_create_request(&b.spin, + b.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + goto err_client_b; + } + + i915_request_add(rq_b); + + /* B is much more important than A! (But A is unpreemptable.) */ + GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a)); + + /* Wait long enough for preemption and timeslicing */ + if (igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client started too early!\n"); + goto err_wedged; + } + + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + goto err_wedged; + } + + igt_spinner_end(&b.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption recorded x%d; should have been suppressed!\n", + engine->execlists.preempt_hang.count); + err = -EINVAL; + goto err_wedged; + } + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); +err_unlock: + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_client_b; +} + static int live_suppress_self_preempt(void *arg) { struct drm_i915_private *i915 = arg; @@ -2028,6 +2136,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_busywait_preempt), SUBTEST(live_preempt), SUBTEST(live_late_preempt), + SUBTEST(live_nopreempt), SUBTEST(live_suppress_self_preempt), SUBTEST(live_suppress_wait_preempt), SUBTEST(live_chain_preempt), diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h index 49709de69875..b02dea17dcab 100644 --- a/drivers/gpu/drm/i915/i915_priolist_types.h +++ b/drivers/gpu/drm/i915/i915_priolist_types.h @@ -17,6 +17,16 @@ enum { I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, + /* + * Requests containing performance queries must not be preempted by + * another context. They get scheduled with their default priority and + * once they reach the execlist ports we ensure that they stick on the + * HW until finished by pretending that they have maximum priority, + * i.e. nothing can have higher priority and force us to usurp the + * active request. + */ + I915_PRIORITY_UNPREEMPTABLE = INT_MAX, + I915_PRIORITY_INVALID = INT_MIN }; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 5ff87c4a0cd5..222c9c56e9de 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -292,7 +292,7 @@ static bool i915_request_retire(struct i915_request *rq) dma_fence_signal_locked(&rq->fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) i915_request_cancel_breadcrumb(rq); - if (rq->waitboost) { + if (i915_request_has_waitboost(rq)) { GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); atomic_dec(&rq->i915->gt_pm.rps.num_waiters); } @@ -684,7 +684,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->file_priv = NULL; rq->batch = NULL; rq->capture_list = NULL; - rq->waitboost = false; + rq->flags = 0; rq->execution_mask = ALL_ENGINES; INIT_LIST_HEAD(&rq->active_list); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index b58ceef92e20..313df3c37158 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -216,7 +216,9 @@ struct i915_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; - bool waitboost; + unsigned long flags; +#define I915_REQUEST_WAITBOOST BIT(0) +#define I915_REQUEST_NOPREEMPT BIT(1) /** timeline->request entry for this request */ struct list_head link; @@ -430,6 +432,17 @@ static inline void i915_request_mark_complete(struct i915_request *rq) rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */ } +static inline bool i915_request_has_waitboost(const struct i915_request *rq) +{ + return rq->flags & I915_REQUEST_WAITBOOST; +} + +static inline bool i915_request_has_nopreempt(const struct i915_request *rq) +{ + /* Preemption should only be disabled very rarely */ + return unlikely(rq->flags & I915_REQUEST_NOPREEMPT); +} + bool i915_retire_requests(struct drm_i915_private *i915); #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 12c22359fdac..f104b94c14ef 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -707,6 +707,16 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority | __NO_PREEMPTION; } +static inline int effective_prio(const struct i915_request *rq) +{ + int prio = rq_prio(rq); + + if (i915_request_has_nopreempt(rq)) + prio = I915_PRIORITY_UNPREEMPTABLE; + + return prio; +} + static struct i915_request *schedule_in(struct i915_request *rq, int idx) { trace_i915_request_in(rq, idx); @@ -747,7 +757,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine) &engine->i915->guc.preempt_work[engine->id]; int prio = execlists->queue_priority_hint; - if (i915_scheduler_need_preempt(prio, rq_prio(last))) { + if (i915_scheduler_need_preempt(prio, + effective_prio(last))) { intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, GUC_PREEMPT_INPROGRESS); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 87244d8215a7..0cecea228546 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6876,9 +6876,10 @@ void gen6_rps_boost(struct i915_request *rq) /* Serializes with i915_request_retire() */ boost = false; spin_lock_irqsave(&rq->lock, flags); - if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) { + if (!i915_request_has_waitboost(rq) && + !dma_fence_is_signaled_locked(&rq->fence)) { boost = !atomic_fetch_inc(&rps->num_waiters); - rq->waitboost = true; + rq->flags |= I915_REQUEST_WAITBOOST; } spin_unlock_irqrestore(&rq->lock, flags); if (!boost) -- cgit v1.2.3 From b33c61db7291d376e956f2657c453d966e558974 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 9 Jul 2019 10:31:05 +0100 Subject: drm/i915: Remove unused i915_gem_context_lookup_engine There are no known plans to start using it either. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190709093105.24699-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 9691dd062f72..106e2ccf7a4c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -197,12 +197,6 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx) mutex_unlock(&ctx->engines_mutex); } -static inline struct intel_context * -i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx) -{ - return i915_gem_context_engines(ctx)->engines[idx]; -} - static inline struct intel_context * i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx) { -- cgit v1.2.3 From 58d1b427148a3bd16e62f16558d2f45527727eb1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 10 Jul 2019 07:44:41 +0100 Subject: drm/i915/execlists: Record preemption for selftests Put back the preemption counters lost in commit 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") so that our selftests that assert no preemption took place continue to function. v2: But a timeslice is only a "soft" preemption! Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190710064454.682-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 19ce8eb5e5c9..270ef417dd1a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -921,6 +921,11 @@ enable_timeslice(struct intel_engine_cs *engine) return last && need_timeslice(engine, last); } +static void record_preemption(struct intel_engine_execlists *execlists) +{ + (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -989,6 +994,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) last->fence.seqno, last->sched.attr.priority, execlists->queue_priority_hint); + record_preemption(execlists); + /* * Don't let the RING_HEAD advance past the breadcrumb * as we unwind (and until we resubmit) so that we do -- cgit v1.2.3 From 14bfcd3e0daeb0f757a02aac85fd03e0933ab37e Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Wed, 10 Jul 2019 11:55:24 +0100 Subject: drm/i915/perf: add missing delay for OA muxes configuration This was dropped from the original patch series, we weren't sure whether it was needed at the time. More recent tests show it's definitely needed to have acurate performance data. Signed-off-by: Lionel Landwerlin Fixes: 19f81df2859eb1 ("drm/i915/perf: Add OA unit support for Gen 8+") Acked-by: Chris Wilson [ickle: combine duplicate code and comments] Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190710105524.23017-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_perf.c | 49 +++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 27842e7bcfed..007826ded9b3 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1569,28 +1569,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv, } } -static int hsw_enable_metric_set(struct i915_perf_stream *stream) +static void delay_after_mux(void) { - struct drm_i915_private *dev_priv = stream->dev_priv; - const struct i915_oa_config *oa_config = stream->oa_config; - - /* PRM: - * - * OA unit is using “crclk” for its functionality. When trunk - * level clock gating takes place, OA clock would be gated, - * unable to count the events from non-render clock domain. - * Render clock gating must be disabled when OA is enabled to - * count the events from non-render domain. Unit level clock - * gating for RCS should also be disabled. - */ - I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & - ~GEN7_DOP_CLOCK_GATE_ENABLE)); - I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | - GEN6_CSUNIT_CLOCK_GATE_DISABLE)); - - config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); - - /* It apparently takes a fairly long time for a new MUX + /* + * It apparently takes a fairly long time for a new MUX * configuration to be be applied after these register writes. * This delay duration was derived empirically based on the * render_basic config but hopefully it covers the maximum @@ -1612,6 +1594,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream) * a delay at this location would mitigate any invalid reports. */ usleep_range(15000, 20000); +} + +static int hsw_enable_metric_set(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + const struct i915_oa_config *oa_config = stream->oa_config; + + /* + * PRM: + * + * OA unit is using “crclk” for its functionality. When trunk + * level clock gating takes place, OA clock would be gated, + * unable to count the events from non-render clock domain. + * Render clock gating must be disabled when OA is enabled to + * count the events from non-render domain. Unit level clock + * gating for RCS should also be disabled. + */ + I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & + ~GEN7_DOP_CLOCK_GATE_ENABLE)); + I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | + GEN6_CSUNIT_CLOCK_GATE_DISABLE)); + + config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); @@ -1837,6 +1843,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) return ret; config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); -- cgit v1.2.3 From 27ee72477c9b77ebf0e2827b8679b547ab556673 Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Fri, 28 Jun 2019 13:32:28 +0530 Subject: drm/i915/icl: Handle YCbCr to RGB conversion for BT2020 case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently input csc for YCbCR to RGB conversion handles only BT601 and Bt709. Extending it to support BT2020 as well. v2: Fixed the co-efficients for LR to FR conversion, as suggested by Ville. v3: Fixed Y Pre-offset in case of Full Range YCbCr as suggested by Ville. v4: Split the v2 and v3 changes. v5: Rebase v6: Fix a rebase fumble. Reviewed-by: Ville Syrjälä Signed-off-by: Uma Shankar Signed-off-by: Shashank Sharma Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628080230.27492-2-uma.shankar@intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 34586f29be60..aeeeffbe4683 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -444,6 +444,18 @@ icl_program_input_csc(struct intel_plane *plane, 0x9EF8, 0x7800, 0xABF8, 0x0, 0x7800, 0x7ED8, }, + /* + * BT.2020 full range YCbCr -> full range RGB + * The matrix required is : + * [1.000, 0.000, 1.474, + * 1.000, -0.1645, -0.5713, + * 1.000, 1.8814, 0.0000] + */ + [DRM_COLOR_YCBCR_BT2020] = { + 0x7BC8, 0x7800, 0x0, + 0x8928, 0x7800, 0xAA88, + 0x0, 0x7800, 0x7F10, + }, }; /* Matrix for Limited Range to Full Range Conversion */ @@ -472,6 +484,18 @@ icl_program_input_csc(struct intel_plane *plane, 0x8888, 0x7918, 0xADA8, 0x0, 0x7918, 0x6870, }, + /* + * BT.2020 Limited range YCbCr -> full range RGB + * The matrix required is : + * [1.164, 0.000, 1.678, + * 1.164, -0.1873, -0.6504, + * 1.164, 2.1417, 0.0000] + */ + [DRM_COLOR_YCBCR_BT2020] = { + 0x7D70, 0x7950, 0x0, + 0x8A68, 0x7950, 0xAC00, + 0x0, 0x7950, 0x6890, + }, }; const u16 *csc; -- cgit v1.2.3 From 9856bf7f779a6613bad0a9d628b2e82e00fdec66 Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Fri, 28 Jun 2019 13:32:29 +0530 Subject: drm/i915/icl: Fix Y pre-offset for Full Range YCbCr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed Y Pre-offset in case of Full Range YCbCr. v2: Rebase Reviewed-by: Ville Syrjälä Suggested-by: Ville Syrjälä Signed-off-by: Uma Shankar Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628080230.27492-3-uma.shankar@intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index aeeeffbe4683..57b0d70f4d7b 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -516,8 +516,11 @@ icl_program_input_csc(struct intel_plane *plane, I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), PREOFF_YUV_TO_RGB_HI); - I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); + if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0); + else + I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), PREOFF_YUV_TO_RGB_LO); I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); -- cgit v1.2.3 From caceff96d5b7b487da19c0916e0048ff8f1709a9 Mon Sep 17 00:00:00 2001 From: Uma Shankar Date: Fri, 28 Jun 2019 13:32:30 +0530 Subject: drm/i915/icl: Fixed Input CSC Co-efficients for BT601/709 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Input CSC Co-efficients for BT601 and BT709 YCbCR to RGB conversion were slightly off. Fixed the same. v2: Fixed the co-eficients as there was issue with reference matrix, spotted by Ville. v3: Rebase Signed-off-by: Uma Shankar Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190628080230.27492-4-uma.shankar@intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 57b0d70f4d7b..5e5ea867aae9 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -441,7 +441,7 @@ icl_program_input_csc(struct intel_plane *plane, */ [DRM_COLOR_YCBCR_BT709] = { 0x7C98, 0x7800, 0x0, - 0x9EF8, 0x7800, 0xABF8, + 0x9EF8, 0x7800, 0xAC00, 0x0, 0x7800, 0x7ED8, }, /* @@ -463,26 +463,26 @@ icl_program_input_csc(struct intel_plane *plane, /* * BT.601 Limted range YCbCr -> full range RGB * The matrix required is : - * [1.164384, 0.000, 1.596370, - * 1.138393, -0.382500, -0.794598, - * 1.138393, 1.971696, 0.0000] + * [1.164384, 0.000, 1.596027, + * 1.164384, -0.39175, -0.812813, + * 1.164384, 2.017232, 0.0000] */ [DRM_COLOR_YCBCR_BT601] = { 0x7CC8, 0x7950, 0x0, - 0x8CB8, 0x7918, 0x9C40, - 0x0, 0x7918, 0x7FC8, + 0x8D00, 0x7950, 0x9C88, + 0x0, 0x7950, 0x6810, }, /* * BT.709 Limited range YCbCr -> full range RGB * The matrix required is : - * [1.164, 0.000, 1.833671, - * 1.138393, -0.213249, -0.532909, - * 1.138393, 2.112402, 0.0000] + * [1.164384, 0.000, 1.792741, + * 1.164384, -0.213249, -0.532909, + * 1.164384, 2.112402, 0.0000] */ [DRM_COLOR_YCBCR_BT709] = { - 0x7EA8, 0x7950, 0x0, - 0x8888, 0x7918, 0xADA8, - 0x0, 0x7918, 0x6870, + 0x7E58, 0x7950, 0x0, + 0x8888, 0x7950, 0xADA8, + 0x0, 0x7950, 0x6870, }, /* * BT.2020 Limited range YCbCr -> full range RGB -- cgit v1.2.3 From b41e434fdc423995b99cb79f86ec7ec428f5f379 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 8 Jul 2019 10:28:12 -0700 Subject: drm/i915: make new intel_tc.c use uncore accessors Let's make the just created intel_tc.c already follow the trend of using i915 instead of dev_priv and calling the intel_uncore_*() functions. Signed-off-by: Lucas De Marchi Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190708172815.6814-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 57 ++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 53103a9aa8a7..1a9dd32fb0a5 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -24,11 +24,12 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; u32 lane_mask; - lane_mask = I915_READ(PORT_TX_DFLEXDPSP); + lane_mask = intel_uncore_read(uncore, PORT_TX_DFLEXDPSP); WARN_ON(lane_mask == 0xffffffff); @@ -38,7 +39,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); intel_wakeref_t wakeref; u32 lane_mask; @@ -46,7 +47,7 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) return 4; lane_mask = 0; - with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) + with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) lane_mask = intel_tc_port_get_lane_mask(dig_port); switch (lane_mask) { @@ -89,12 +90,13 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; u32 mask = 0; u32 val; - val = I915_READ(PORT_TX_DFLEXDPSP); + val = intel_uncore_read(uncore, PORT_TX_DFLEXDPSP); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n", @@ -107,7 +109,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) if (val & TC_LIVE_STATE_TC(tc_port)) mask |= BIT(TC_PORT_DP_ALT); - if (I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) + if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) mask |= BIT(TC_PORT_LEGACY); /* The sink can be connected only in a single mode. */ @@ -119,11 +121,12 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; u32 val; - val = I915_READ(PORT_TX_DFLEXDPPMS); + val = intel_uncore_read(uncore, PORT_TX_DFLEXDPPMS); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n", dig_port->tc_port_name); @@ -136,11 +139,12 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, bool enable) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; u32 val; - val = I915_READ(PORT_TX_DFLEXDPCSSS); + val = intel_uncore_read(uncore, PORT_TX_DFLEXDPCSSS); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", dig_port->tc_port_name, @@ -153,7 +157,7 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, if (!enable) val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - I915_WRITE(PORT_TX_DFLEXDPCSSS, val); + intel_uncore_write(uncore, PORT_TX_DFLEXDPCSSS, val); if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10)) DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n", @@ -164,11 +168,12 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + struct intel_uncore *uncore = &i915->uncore; u32 val; - val = I915_READ(PORT_TX_DFLEXDPCSSS); + val = intel_uncore_read(uncore, PORT_TX_DFLEXDPCSSS); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n", dig_port->tc_port_name); @@ -317,11 +322,11 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port) static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port, int required_lanes) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); enum tc_port_mode old_tc_mode = dig_port->tc_mode; - intel_display_power_flush_work(dev_priv); - WARN_ON(intel_display_power_is_enabled(dev_priv, + intel_display_power_flush_work(i915); + WARN_ON(intel_display_power_is_enabled(i915, intel_aux_power_domain(dig_port))); icl_tc_phy_disconnect(dig_port); @@ -404,10 +409,10 @@ bool intel_tc_port_connected(struct intel_digital_port *dig_port) static void __intel_tc_port_lock(struct intel_digital_port *dig_port, int required_lanes) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); intel_wakeref_t wakeref; - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); + wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE); mutex_lock(&dig_port->tc_lock); @@ -426,12 +431,12 @@ void intel_tc_port_lock(struct intel_digital_port *dig_port) void intel_tc_port_unlock(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref); mutex_unlock(&dig_port->tc_lock); - intel_display_power_put_async(dev_priv, POWER_DOMAIN_DISPLAY_CORE, + intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref); } -- cgit v1.2.3 From c99fd7b3908e287ccd20be4f9eefe597c7d354fd Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 9 Jul 2019 08:54:03 -0700 Subject: drm/i915: fix include order in intel_tc.* Separate local includes with a blank line and sort the groups alphabetically. v2: don't make intel_tc.h be the first include v3: don't make local includes be included first Signed-off-by: Lucas De Marchi Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190709155403.29370-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 2 +- drivers/gpu/drm/i915/display/intel_tc.h | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 1a9dd32fb0a5..0c969f6fd714 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -3,9 +3,9 @@ * Copyright © 2019 Intel Corporation */ +#include "i915_drv.h" #include "intel_display.h" #include "intel_dp_mst.h" -#include "i915_drv.h" #include "intel_tc.h" static const char *tc_port_mode_name(enum tc_port_mode mode) diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 0d8411d4a91d..706c5bc050a5 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -6,8 +6,9 @@ #ifndef __INTEL_TC_H__ #define __INTEL_TC_H__ -#include #include +#include + #include "intel_drv.h" bool intel_tc_port_connected(struct intel_digital_port *dig_port); -- cgit v1.2.3 From 9d44dcb9088aadd11718a7ad5a8567d6b8a1174a Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 8 Jul 2019 10:28:14 -0700 Subject: drm/i915: move intel_ddi_set_fia_lane_count to intel_tc.c PORT_TX_DFLEXDPMLE1 is a FIA register so move it to intel_tc.c where we access other FIA registers. In Tiger Lake we have multiple/modular FIAs so it makes sense to start moving all access to their registers to a common place. While at it, make it clear that we will only ever call this function for ports with TC phy. Previously we were relying on tc_mode being TC_PORT_TBT_ALT for combo phy ports. However it's confusing since in this same function we have checks for is_tc_port. Also, if we manage to make each phy access only their own field, we may in future add them as a union inside intel_digital_port. v2: Fix coding style while moving the code Signed-off-by: Lucas De Marchi Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190708172815.6814-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 49 +++++--------------------------- drivers/gpu/drm/i915/display/intel_tc.c | 33 +++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_tc.h | 2 ++ 3 files changed, 42 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 30e48609db1d..ad638e7f27bb 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3594,37 +3594,6 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, intel_hdcp_disable(to_intel_connector(conn_state->connector)); } -static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, - enum port port) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 val = I915_READ(PORT_TX_DFLEXDPMLE1); - bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - - WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); - - val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); - switch (pipe_config->lane_count) { - case 1: - val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : - DFLEXDPMLE1_DPMLETC_ML0(tc_port); - break; - case 2: - val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : - DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); - break; - case 4: - val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); - break; - default: - MISSING_CASE(pipe_config->lane_count); - } - I915_WRITE(PORT_TX_DFLEXDPMLE1, val); -} - static void intel_ddi_update_prepare(struct intel_atomic_state *state, struct intel_encoder *encoder, @@ -3657,7 +3626,6 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); bool is_tc_port = intel_port_is_tc(dev_priv, encoder->port); - enum port port = encoder->port; if (is_tc_port) intel_tc_port_get_link(dig_port, crtc_state->lane_count); @@ -3666,18 +3634,15 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, intel_display_power_get(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); - if (IS_GEN9_LP(dev_priv)) + if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT) + /* + * Program the lane count for static/dynamic connections on + * Type-C ports. Skip this step for TBT. + */ + intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count); + else if (IS_GEN9_LP(dev_priv)) bxt_ddi_phy_set_lane_optim_mask(encoder, crtc_state->lane_lat_optim_mask); - - /* - * Program the lane count for static/dynamic connections on Type-C ports. - * Skip this step for TBT. - */ - if (dig_port->tc_mode == TC_PORT_TBT_ALT) - return; - - intel_ddi_set_fia_lane_count(encoder, crtc_state, port); } static void diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 0c969f6fd714..f44ee4bfe7c8 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -67,6 +67,39 @@ int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) } } +void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, + int required_lanes) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); + bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + struct intel_uncore *uncore = &i915->uncore; + u32 val; + + WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); + + val = intel_uncore_read(uncore, PORT_TX_DFLEXDPMLE1); + val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); + + switch (required_lanes) { + case 1: + val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : + DFLEXDPMLE1_DPMLETC_ML0(tc_port); + break; + case 2: + val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : + DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); + break; + case 4: + val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); + break; + default: + MISSING_CASE(required_lanes); + } + + intel_uncore_write(uncore, PORT_TX_DFLEXDPMLE1, val); +} + static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, u32 live_status_mask) { diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 706c5bc050a5..22fe922ac9cf 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -14,6 +14,8 @@ bool intel_tc_port_connected(struct intel_digital_port *dig_port); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); +void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, + int required_lanes); void intel_tc_port_sanitize(struct intel_digital_port *dig_port); void intel_tc_port_lock(struct intel_digital_port *dig_port); -- cgit v1.2.3 From 358633e71c07475c75476058a289e2ef8aa74be4 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 9 Jul 2019 11:39:30 -0700 Subject: drm/i915/gen11: Start distinguishing 'phy' from 'port' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our past DDI-based Intel platforms have had a fixed DDI<->PHY mapping. Because of this, both the bspec documentation and our i915 code has used the term "port" when talking about either DDI's or PHY's; it was always easy to tell what terms like "Port A" were referring to from the context. Unfortunately this is starting to break down now that EHL allows PHY-A to be driven by either DDI-A or DDI-D. Is a setup with DDI-D driving PHY-A considered "Port A" or "Port D?" The answer depends on which register we're working with, and even the bspec doesn't do a great job of clarifying this. Let's try to be more explicit about whether we're talking about the DDI or the PHY on gen11+ by using 'port' to refer to the DDI and creating a new 'enum phy' namespace to refer to the PHY in use. This patch just adds the new PHY namespace, new phy-based versions of intel_port_is_*(), and a helper to convert a port to a PHY. Transitioning various areas of the code over to using the PHY namespace will be done in subsequent patches to make review easier. We'll remove the intel_port_is_*() functions at the end of the series when we transition all callers over to using the PHY-based versions. v2: - Convert a few more 'port' uses to 'phy.' (Sparse) v3: - Switch DDI_CLK_SEL() back to 'port.' (Jose) - Add a code comment clarifying why DPCLKA_CFGCR0_ICL needs to use PHY for its bit definitions, even though the register description is given in terms of DDI. - To avoid confusion, switch CNL's DPCLKA_CFGCR0 defines back to using port and create separate ICL+ definitions that work in terms of PHY. v4: - Rebase and resolve conflicts with Imre's TC series. - This patch now just adds the namespace and a few convenience functions; the important changes are now split out into separate patches to make review easier. Suggested-by: Ville Syrjala Cc: José Roberto de Souza Cc: Lucas De Marchi Cc: Ville Syrjälä Cc: Imre Deak Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190709183934.445-2-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 32 +++++++++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_display.h | 16 ++++++++++++++ drivers/gpu/drm/i915/intel_drv.h | 2 ++ 3 files changed, 49 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f07081815b80..43caee6d3c2f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6685,6 +6685,20 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port) return false; } +bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) +{ + if (phy == PHY_NONE) + return false; + + if (IS_ELKHARTLAKE(dev_priv)) + return phy <= PHY_C; + + if (INTEL_GEN(dev_priv) >= 11) + return phy <= PHY_B; + + return false; +} + bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port) { if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) @@ -6693,9 +6707,25 @@ bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port) return false; } +bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) +{ + if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) + return phy >= PHY_C && phy <= PHY_F; + + return false; +} + +enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) +{ + if (IS_ELKHARTLAKE(i915) && port == PORT_D) + return PHY_A; + + return (enum phy)port; +} + enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) { - if (!intel_port_is_tc(dev_priv, port)) + if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) return PORT_TC_NONE; return port - PORT_C; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index d296556ed82e..d53285fb883f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -228,6 +228,21 @@ struct intel_link_m_n { u32 link_n; }; +enum phy { + PHY_NONE = -1, + + PHY_A = 0, + PHY_B, + PHY_C, + PHY_D, + PHY_E, + PHY_F, + + I915_MAX_PHYS +}; + +#define phy_name(a) ((a) + 'A') + #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) @@ -356,5 +371,6 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); bool intel_plane_can_remap(const struct intel_plane_state *plane_state); +enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); #endif diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 24c63ed45c6f..815c26c0b98c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1493,7 +1493,9 @@ void intel_encoder_destroy(struct drm_encoder *encoder); struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder); bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); +bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy); bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); +bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy); enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port); int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From befa372b990a3c02fde475be2a457d2c8fceac69 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 9 Jul 2019 11:39:31 -0700 Subject: drm/i915/gen11: Program ICL_DPCLKA_CFGCR0 according to PHY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although the register name implies that it operates on DDI's, DPCLKA_CFGCR0_ICL actually needs to be programmed according to the PHY that's in use. I.e., when using EHL's DDI-D on combo PHY A, the bits described as "port A" in the bspec are what we need to set. The bspec clarifies: "[For EHL] DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA Clock Select chooses the PLL for both DDIA and DDID and drives port A in all cases." Also, since the CNL DPCLKA_CFGCR0 bit defines are still port-based, we create separate ICL-specific defines that accept the PHY rather than trying to share the same bit definitions between CNL and ICL. v5: Make icl_dpclka_cfgcr0_clk_off() take phy rather than port. When splitting the original patch the hunk to handle this wound up too late in the series. (Sparse) v6: Since we're already changing this code, s/DPCLKA_CFGCR0_ICL/ICL_DPCLKA_CFGCR0/ for consistency. (Jose) Bspec: 33148 Cc: José Roberto de Souza Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190709183934.445-3-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 33 +++++++++------ drivers/gpu/drm/i915/display/intel_ddi.c | 63 +++++++++++++++++----------- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 12 ++++-- 4 files changed, 67 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 3cf95c34143c..8f1324c2f539 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -560,14 +560,16 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; enum port port; + enum phy phy; mutex_lock(&dev_priv->dpll_lock); - tmp = I915_READ(DPCLKA_CFGCR0_ICL); + tmp = I915_READ(ICL_DPCLKA_CFGCR0); for_each_dsi_port(port, intel_dsi->ports) { - tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port); + phy = intel_port_to_phy(dev_priv, port); + tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->dpll_lock); } @@ -577,14 +579,16 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; enum port port; + enum phy phy; mutex_lock(&dev_priv->dpll_lock); - tmp = I915_READ(DPCLKA_CFGCR0_ICL); + tmp = I915_READ(ICL_DPCLKA_CFGCR0); for_each_dsi_port(port, intel_dsi->ports) { - tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + phy = intel_port_to_phy(dev_priv, port); + tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - I915_WRITE(DPCLKA_CFGCR0_ICL, tmp); + I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->dpll_lock); } @@ -595,23 +599,26 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_shared_dpll *pll = crtc_state->shared_dpll; enum port port; + enum phy phy; u32 val; mutex_lock(&dev_priv->dpll_lock); - val = I915_READ(DPCLKA_CFGCR0_ICL); + val = I915_READ(ICL_DPCLKA_CFGCR0); for_each_dsi_port(port, intel_dsi->ports) { - val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); + phy = intel_port_to_phy(dev_priv, port); + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); for_each_dsi_port(port, intel_dsi->ports) { - val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); + phy = intel_port_to_phy(dev_priv, port); + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); - POSTING_READ(DPCLKA_CFGCR0_ICL); + POSTING_READ(ICL_DPCLKA_CFGCR0); mutex_unlock(&dev_priv->dpll_lock); } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index ad638e7f27bb..e5b55b4928bb 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2729,12 +2729,13 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp) static inline u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { - if (intel_port_is_combophy(dev_priv, port)) { - return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port); - } else if (intel_port_is_tc(dev_priv, port)) { - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); + if (intel_phy_is_combo(dev_priv, phy)) { + return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + } else if (intel_phy_is_tc(dev_priv, phy)) { + enum tc_port tc_port = intel_port_to_tc(dev_priv, + (enum port)phy); return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port); } @@ -2747,23 +2748,33 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; mutex_lock(&dev_priv->dpll_lock); - val = I915_READ(DPCLKA_CFGCR0_ICL); - WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0); + val = I915_READ(ICL_DPCLKA_CFGCR0); + WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0); - if (intel_port_is_combophy(dev_priv, port)) { - val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); - POSTING_READ(DPCLKA_CFGCR0_ICL); + if (intel_phy_is_combo(dev_priv, phy)) { + /* + * Even though this register references DDIs, note that we + * want to pass the PHY rather than the port (DDI). For + * ICL, port=phy in all cases so it doesn't matter, but for + * EHL the bspec notes the following: + * + * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA + * Clock Select chooses the PLL for both DDIA and DDID and + * drives port A in all cases." + */ + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); + POSTING_READ(ICL_DPCLKA_CFGCR0); } - val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); mutex_unlock(&dev_priv->dpll_lock); } @@ -2771,14 +2782,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder, static void icl_unmap_plls_to_ports(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; mutex_lock(&dev_priv->dpll_lock); - val = I915_READ(DPCLKA_CFGCR0_ICL); - val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + val = I915_READ(ICL_DPCLKA_CFGCR0); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); mutex_unlock(&dev_priv->dpll_lock); } @@ -2836,11 +2847,13 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) ddi_clk_needed = false; } - val = I915_READ(DPCLKA_CFGCR0_ICL); + val = I915_READ(ICL_DPCLKA_CFGCR0); for_each_port_masked(port, port_mask) { + enum phy phy = intel_port_to_phy(dev_priv, port); + bool ddi_clk_ungated = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, - port)); + phy)); if (ddi_clk_needed == ddi_clk_ungated) continue; @@ -2852,10 +2865,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) if (WARN_ON(ddi_clk_needed)) continue; - DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", - port_name(port)); - val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); - I915_WRITE(DPCLKA_CFGCR0_ICL, val); + DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n", + phy_name(port)); + val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy); + I915_WRITE(ICL_DPCLKA_CFGCR0, val); } } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 43caee6d3c2f..44c79f8bd028 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10096,7 +10096,7 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, u32 temp; if (intel_port_is_combophy(dev_priv, port)) { - temp = I915_READ(DPCLKA_CFGCR0_ICL) & + temp = I915_READ(ICL_DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); port_dpll_id = ICL_PORT_DPLL_DEFAULT; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5898f59e3dd7..d3fc575a94db 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9697,17 +9697,21 @@ enum skl_power_gate { * CNL Clocks */ #define DPCLKA_CFGCR0 _MMIO(0x6C200) -#define DPCLKA_CFGCR0_ICL _MMIO(0x164280) #define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \ (port) + 10)) -#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) + 10)) -#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \ - 21 : (tc_port) + 12)) #define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \ (port) * 2) #define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) #define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)) +#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24)) +#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \ + 21 : (tc_port) + 12)) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) +#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) + /* CNL PLL */ #define DPLL0_ENABLE 0x46010 #define DPLL1_ENABLE 0x46014 -- cgit v1.2.3 From dc867bc7d88769910b58748040240906540ea8c1 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 9 Jul 2019 11:39:32 -0700 Subject: drm/i915/gen11: Convert combo PHY logic to use new 'enum phy' namespace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert the code that operates directly on gen11 combo PHY's to use the new namespace. Combo PHY registers are those named "ICL_PORT_*" plus ICL_DPHY_CHKN. Note that a lot of the PHY programming happens in the MIPI DSI code. For clarity I've added a for_each_dsi_phy() to loop over the phys used by DSI. Since DSI always uses A & B on gen11, port=phy in all cases so it doesn't actually matter which form we use in the DSI code. I've used the phy iterator in code that's explicitly working with the combo PHY, but left the rest of the DSI code using the port iterator and namespace to minimize patch deltas. We can switch the rest of the DSI code over to use phy terminology later if this winds up being too confusing. v6: Drop an include of drm/i915_drm.h; that was previously included just for the definition of 'enum port' which this patch removes the need for. (Jose) Cc: José Roberto de Souza Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190709183934.445-4-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 127 +++++++++--------- drivers/gpu/drm/i915/display/intel_combo_phy.c | 143 ++++++++++----------- drivers/gpu/drm/i915/display/intel_combo_phy.h | 4 +- drivers/gpu/drm/i915/display/intel_ddi.c | 45 +++---- drivers/gpu/drm/i915/display/intel_display.h | 4 + drivers/gpu/drm/i915/display/intel_display_power.c | 16 +-- drivers/gpu/drm/i915/display/intel_dsi.h | 12 +- drivers/gpu/drm/i915/i915_reg.h | 74 +++++------ 8 files changed, 213 insertions(+), 212 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 8f1324c2f539..4d952accfaaa 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -202,63 +202,62 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - enum port port; + enum phy phy; u32 tmp; int lane; - for_each_dsi_port(port, intel_dsi->ports) { - + for_each_dsi_phy(phy, intel_dsi->phys) { /* * Program voltage swing and pre-emphasis level values as per * table in BSPEC under DDI buffer programing */ - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); tmp |= SCALING_MODE_SEL(0x2); tmp |= TAP2_DISABLE | TAP3_DISABLE; tmp |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); tmp |= SCALING_MODE_SEL(0x2); tmp |= TAP2_DISABLE | TAP3_DISABLE; tmp |= RTERM_SELECT(0x6); - I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); tmp |= SWING_SEL_UPPER(0x2); tmp |= SWING_SEL_LOWER(0x2); tmp |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy)); tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); tmp |= SWING_SEL_UPPER(0x2); tmp |= SWING_SEL_LOWER(0x2); tmp |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp); for (lane = 0; lane <= 3; lane++) { /* Bspec: must not use GRP register for write */ - tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp); } } } @@ -364,10 +363,10 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - enum port port; + enum phy phy; - for_each_dsi_port(port, intel_dsi->ports) - intel_combo_phy_power_up_lanes(dev_priv, port, true, + for_each_dsi_phy(phy, intel_dsi->phys) + intel_combo_phy_power_up_lanes(dev_priv, phy, true, intel_dsi->lane_count, false); } @@ -375,46 +374,46 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - enum port port; + enum phy phy; u32 tmp; int lane; /* Step 4b(i) set loadgen select for transmit and aux lanes */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy)); tmp &= ~LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp); for (lane = 0; lane <= 3; lane++) { - tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy)); tmp &= ~LOADGEN_SELECT; if (lane != 2) tmp |= LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp); } } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); - tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp); + tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); + I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp); /* For EHL set latency optimization for PCS_DW1 lanes */ if (IS_ELKHARTLAKE(dev_priv)) { - tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port)); + tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0); - I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp); + I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp); - tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); tmp &= ~LATENCY_OPTIM_MASK; tmp |= LATENCY_OPTIM_VAL(0x1); - I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp); + I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp); } } @@ -425,16 +424,16 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; - enum port port; + enum phy phy; /* clear common keeper enable bit */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); tmp &= ~COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp); - tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port)); + I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp); + tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy)); tmp &= ~COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp); + I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp); } /* @@ -442,33 +441,33 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * Note: loadgen select program is done * as part of lane phy sequence configuration */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_CL_DW5(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_CL_DW5(phy)); tmp |= SUS_CLOCK_CONFIG; - I915_WRITE(ICL_PORT_CL_DW5(port), tmp); + I915_WRITE(ICL_PORT_CL_DW5(phy), tmp); } /* Clear training enable to change swing values */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); tmp &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); tmp &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); } /* Program swing and de-emphasis */ dsi_program_swing_and_deemphasis(encoder); /* Set training enable to trigger update */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); tmp |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); - tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp); + tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy)); tmp |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); + I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp); } } @@ -497,6 +496,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; enum port port; + enum phy phy; /* Program T-INIT master registers */ for_each_dsi_port(port, intel_dsi->ports) { @@ -546,10 +546,10 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) } if (IS_ELKHARTLAKE(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) { - tmp = I915_READ(ICL_DPHY_CHKN(port)); + for_each_dsi_phy(phy, intel_dsi->phys) { + tmp = I915_READ(ICL_DPHY_CHKN(phy)); tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; - I915_WRITE(ICL_DPHY_CHKN(port), tmp); + I915_WRITE(ICL_DPHY_CHKN(phy), tmp); } } } @@ -559,15 +559,12 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; - enum port port; enum phy phy; mutex_lock(&dev_priv->dpll_lock); tmp = I915_READ(ICL_DPCLKA_CFGCR0); - for_each_dsi_port(port, intel_dsi->ports) { - phy = intel_port_to_phy(dev_priv, port); + for_each_dsi_phy(phy, intel_dsi->phys) tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - } I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->dpll_lock); @@ -578,15 +575,12 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); u32 tmp; - enum port port; enum phy phy; mutex_lock(&dev_priv->dpll_lock); tmp = I915_READ(ICL_DPCLKA_CFGCR0); - for_each_dsi_port(port, intel_dsi->ports) { - phy = intel_port_to_phy(dev_priv, port); + for_each_dsi_phy(phy, intel_dsi->phys) tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - } I915_WRITE(ICL_DPCLKA_CFGCR0, tmp); mutex_unlock(&dev_priv->dpll_lock); @@ -598,22 +592,19 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); struct intel_shared_dpll *pll = crtc_state->shared_dpll; - enum port port; enum phy phy; u32 val; mutex_lock(&dev_priv->dpll_lock); val = I915_READ(ICL_DPCLKA_CFGCR0); - for_each_dsi_port(port, intel_dsi->ports) { - phy = intel_port_to_phy(dev_priv, port); + for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy); } I915_WRITE(ICL_DPCLKA_CFGCR0, val); - for_each_dsi_port(port, intel_dsi->ports) { - phy = intel_port_to_phy(dev_priv, port); + for_each_dsi_phy(phy, intel_dsi->phys) { val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } I915_WRITE(ICL_DPCLKA_CFGCR0, val); diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index d3d5244765e6..ac8218a040ab 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -6,13 +6,13 @@ #include "intel_combo_phy.h" #include "intel_drv.h" -#define for_each_combo_port(__dev_priv, __port) \ - for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ - for_each_if(intel_port_is_combophy(__dev_priv, __port)) +#define for_each_combo_phy(__dev_priv, __phy) \ + for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \ + for_each_if(intel_phy_is_combo(__dev_priv, __phy)) -#define for_each_combo_port_reverse(__dev_priv, __port) \ - for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \ - for_each_if(intel_port_is_combophy(__dev_priv, __port)) +#define for_each_combo_phy_reverse(__dev_priv, __phy) \ + for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \ + for_each_if(intel_phy_is_combo(__dev_priv, __phy)) enum { PROCMON_0_85V_DOT_0, @@ -38,18 +38,17 @@ static const struct cnl_procmon { }; /* - * CNL has just one set of registers, while ICL has two sets: one for port A and - * the other for port B. The CNL registers are equivalent to the ICL port A - * registers, that's why we call the ICL macros even though the function has CNL - * on its name. + * CNL has just one set of registers, while gen11 has a set for each combo PHY. + * The CNL registers are equivalent to the gen11 PHY A registers, that's why we + * call the ICL macros even though the function has CNL on its name. */ static const struct cnl_procmon * -cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port) +cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) { const struct cnl_procmon *procmon; u32 val; - val = I915_READ(ICL_PORT_COMP_DW3(port)); + val = I915_READ(ICL_PORT_COMP_DW3(phy)); switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { default: MISSING_CASE(val); @@ -75,32 +74,32 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port) } static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { const struct cnl_procmon *procmon; u32 val; - procmon = cnl_get_procmon_ref_values(dev_priv, port); + procmon = cnl_get_procmon_ref_values(dev_priv, phy); - val = I915_READ(ICL_PORT_COMP_DW1(port)); + val = I915_READ(ICL_PORT_COMP_DW1(phy)); val &= ~((0xff << 16) | 0xff); val |= procmon->dw1; - I915_WRITE(ICL_PORT_COMP_DW1(port), val); + I915_WRITE(ICL_PORT_COMP_DW1(phy), val); - I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); - I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); + I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9); + I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10); } static bool check_phy_reg(struct drm_i915_private *dev_priv, - enum port port, i915_reg_t reg, u32 mask, + enum phy phy, i915_reg_t reg, u32 mask, u32 expected_val) { u32 val = I915_READ(reg); if ((val & mask) != expected_val) { - DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: " + DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: " "current %08x mask %08x expected %08x\n", - port_name(port), + phy_name(phy), reg.reg, val, mask, expected_val); return false; } @@ -109,18 +108,18 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv, } static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { const struct cnl_procmon *procmon; bool ret; - procmon = cnl_get_procmon_ref_values(dev_priv, port); + procmon = cnl_get_procmon_ref_values(dev_priv, phy); - ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port), + ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy), (0xff << 16) | 0xff, procmon->dw1); - ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy), -1U, procmon->dw9); - ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy), -1U, procmon->dw10); return ret; @@ -134,15 +133,15 @@ static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv) static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv) { - enum port port = PORT_A; + enum phy phy = PHY_A; bool ret; if (!cnl_combo_phy_enabled(dev_priv)) return false; - ret = cnl_verify_procmon_ref_values(dev_priv, port); + ret = cnl_verify_procmon_ref_values(dev_priv, phy); - ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5, + ret &= check_phy_reg(dev_priv, phy, CNL_PORT_CL1CM_DW5, CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); return ret; @@ -157,7 +156,7 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv) I915_WRITE(CHICKEN_MISC_2, val); /* Dummy PORT_A to get the correct CNL register from the ICL macro */ - cnl_set_procmon_ref_values(dev_priv, PORT_A); + cnl_set_procmon_ref_values(dev_priv, PHY_A); val = I915_READ(CNL_PORT_COMP_DW0); val |= COMP_INIT; @@ -181,39 +180,39 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) } static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { /* The PHY C added by EHL has no PHY_MISC register */ - if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C) - return I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT; + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) + return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT; else - return !(I915_READ(ICL_PHY_MISC(port)) & + return !(I915_READ(ICL_PHY_MISC(phy)) & ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && - (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT); + (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT); } static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, - enum port port) + enum phy phy) { bool ret; - if (!icl_combo_phy_enabled(dev_priv, port)) + if (!icl_combo_phy_enabled(dev_priv, phy)) return false; - ret = cnl_verify_procmon_ref_values(dev_priv, port); + ret = cnl_verify_procmon_ref_values(dev_priv, phy); - if (port == PORT_A) - ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port), + if (phy == PHY_A) + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy), IREFGEN, IREFGEN); - ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port), + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy), CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); return ret; } void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, - enum port port, bool is_dsi, + enum phy phy, bool is_dsi, int lane_count, bool lane_reversal) { u8 lane_mask; @@ -258,10 +257,10 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, } } - val = I915_READ(ICL_PORT_CL_DW10(port)); + val = I915_READ(ICL_PORT_CL_DW10(phy)); val &= ~PWR_DOWN_LN_MASK; val |= lane_mask << PWR_DOWN_LN_SHIFT; - I915_WRITE(ICL_PORT_CL_DW10(port), val); + I915_WRITE(ICL_PORT_CL_DW10(phy), val); } static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val) @@ -292,14 +291,14 @@ static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val) static void icl_combo_phys_init(struct drm_i915_private *dev_priv) { - enum port port; + enum phy phy; - for_each_combo_port(dev_priv, port) { + for_each_combo_phy(dev_priv, phy) { u32 val; - if (icl_combo_phy_verify_state(dev_priv, port)) { - DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n", - port_name(port)); + if (icl_combo_phy_verify_state(dev_priv, phy)) { + DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n", + phy_name(phy)); continue; } @@ -308,7 +307,7 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) * register for it and no need to program the * DE_IO_COMP_PWR_DOWN setting on PHY C. */ - if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C) + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) goto skip_phy_misc; /* @@ -319,59 +318,59 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) * based on whether our VBT indicates the presence of any * "internal" child devices. */ - val = I915_READ(ICL_PHY_MISC(port)); - if (IS_ELKHARTLAKE(dev_priv) && port == PORT_A) + val = I915_READ(ICL_PHY_MISC(phy)); + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) val = ehl_combo_phy_a_mux(dev_priv, val); val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(port), val); + I915_WRITE(ICL_PHY_MISC(phy), val); skip_phy_misc: - cnl_set_procmon_ref_values(dev_priv, port); + cnl_set_procmon_ref_values(dev_priv, phy); - if (port == PORT_A) { - val = I915_READ(ICL_PORT_COMP_DW8(port)); + if (phy == PHY_A) { + val = I915_READ(ICL_PORT_COMP_DW8(phy)); val |= IREFGEN; - I915_WRITE(ICL_PORT_COMP_DW8(port), val); + I915_WRITE(ICL_PORT_COMP_DW8(phy), val); } - val = I915_READ(ICL_PORT_COMP_DW0(port)); + val = I915_READ(ICL_PORT_COMP_DW0(phy)); val |= COMP_INIT; - I915_WRITE(ICL_PORT_COMP_DW0(port), val); + I915_WRITE(ICL_PORT_COMP_DW0(phy), val); - val = I915_READ(ICL_PORT_CL_DW5(port)); + val = I915_READ(ICL_PORT_CL_DW5(phy)); val |= CL_POWER_DOWN_ENABLE; - I915_WRITE(ICL_PORT_CL_DW5(port), val); + I915_WRITE(ICL_PORT_CL_DW5(phy), val); } } static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) { - enum port port; + enum phy phy; - for_each_combo_port_reverse(dev_priv, port) { + for_each_combo_phy_reverse(dev_priv, phy) { u32 val; - if (port == PORT_A && - !icl_combo_phy_verify_state(dev_priv, port)) - DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n", - port_name(port)); + if (phy == PHY_A && + !icl_combo_phy_verify_state(dev_priv, phy)) + DRM_WARN("Combo PHY %c HW state changed unexpectedly\n", + phy_name(phy)); /* * Although EHL adds a combo PHY C, there's no PHY_MISC * register for it and no need to program the * DE_IO_COMP_PWR_DOWN setting on PHY C. */ - if (IS_ELKHARTLAKE(dev_priv) && port == PORT_C) + if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C) goto skip_phy_misc; - val = I915_READ(ICL_PHY_MISC(port)); + val = I915_READ(ICL_PHY_MISC(phy)); val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - I915_WRITE(ICL_PHY_MISC(port), val); + I915_WRITE(ICL_PHY_MISC(phy), val); skip_phy_misc: - val = I915_READ(ICL_PORT_COMP_DW0(port)); + val = I915_READ(ICL_PORT_COMP_DW0(phy)); val &= ~COMP_INIT; - I915_WRITE(ICL_PORT_COMP_DW0(port), val); + I915_WRITE(ICL_PORT_COMP_DW0(phy), val); } } diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h index e6e195a83b19..660886f86c59 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.h +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h @@ -7,14 +7,14 @@ #define __INTEL_COMBO_PHY_H__ #include -#include struct drm_i915_private; +enum phy; void intel_combo_phy_init(struct drm_i915_private *dev_priv); void intel_combo_phy_uninit(struct drm_i915_private *dev_priv); void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, - enum port port, bool is_dsi, + enum phy phy, bool is_dsi, int lane_count, bool lane_reversal); #endif /* __INTEL_COMBO_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index e5b55b4928bb..b5c30a02761b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2414,7 +2414,7 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, - u32 level, enum port port, int type, + u32 level, enum phy phy, int type, int rate) { const struct cnl_ddi_buf_trans *ddi_translations = NULL; @@ -2432,41 +2432,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, } /* Set PORT_TX_DW5 */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | TAP2_DISABLE | TAP3_DISABLE); val |= SCALING_MODE_SEL(0x2); val |= RTERM_SELECT(0x6); val |= TAP3_DISABLE; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ - val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW2_LN0(phy)); val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | RCOMP_SCALAR_MASK); val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); /* Program Rcomp scalar for every table entry */ val |= RCOMP_SCALAR(0x98); - I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val); /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); - I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val); } /* Program PORT_TX_DW7 */ - val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW7_LN0(phy)); val &= ~N_SCALAR_MASK; val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); - I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val); } static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -2474,7 +2474,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, enum intel_output_type type) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int width = 0; int rate = 0; u32 val; @@ -2495,12 +2495,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * set PORT_PCS_DW1 cmnkeeper_enable to 1b, * else clear to 0b. */ - val = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); + val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy)); if (type == INTEL_OUTPUT_HDMI) val &= ~COMMON_KEEPER_EN; else val |= COMMON_KEEPER_EN; - I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val); + I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val); /* 2. Program loadgen select */ /* @@ -2510,33 +2510,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - val = I915_READ(ICL_PORT_CL_DW5(port)); + val = I915_READ(ICL_PORT_CL_DW5(phy)); val |= SUS_CLOCK_CONFIG; - I915_WRITE(ICL_PORT_CL_DW5(port), val); + I915_WRITE(ICL_PORT_CL_DW5(phy), val); /* 4. Clear training enable to change swing values */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); val &= ~TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); + icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate); /* 6. Set training enable to trigger update */ - val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); + val = I915_READ(ICL_PORT_TX_DW5_LN0(phy)); val |= TX_TRAINING_EN; - I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); + I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val); } static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, @@ -3121,6 +3121,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); @@ -3156,7 +3157,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - intel_combo_phy_power_up_lanes(dev_priv, port, false, + intel_combo_phy_power_up_lanes(dev_priv, phy, false, crtc_state->lane_count, lane_reversal); } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index d53285fb883f..8a4a57ef82a2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -268,6 +268,10 @@ enum phy { for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ for_each_if((__ports_mask) & BIT(__port)) +#define for_each_phy_masked(__phy, __phys_mask) \ + for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \ + for_each_if((__phys_mask) & BIT(__phy)) + #define for_each_crtc(dev, crtc) \ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 7437fc71d289..a24d1859b37b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -397,7 +397,7 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_disable(dev_priv, power_well); } -#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) +#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, @@ -405,21 +405,21 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; - enum port port = ICL_AUX_PW_TO_PORT(pw_idx); + enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; val = I915_READ(regs->driver); I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - val = I915_READ(ICL_PORT_CL_DW12(port)); - I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); + val = I915_READ(ICL_PORT_CL_DW12(phy)); + I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); hsw_wait_for_power_well_enable(dev_priv, power_well); /* Display WA #1178: icl */ if (IS_ICELAKE(dev_priv) && pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_bios_is_port_edp(dev_priv, port)) { + !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); @@ -432,11 +432,11 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; int pw_idx = power_well->desc->hsw.idx; - enum port port = ICL_AUX_PW_TO_PORT(pw_idx); + enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; - val = I915_READ(ICL_PORT_CL_DW12(port)); - I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX); + val = I915_READ(ICL_PORT_CL_DW12(phy)); + I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); val = I915_READ(regs->driver); I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index 6d20434636cd..1cd24bd46518 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -49,8 +49,11 @@ struct intel_dsi { struct intel_connector *attached_connector; - /* bit mask of ports being driven */ - u16 ports; + /* bit mask of ports (vlv dsi) or phys (icl dsi) being driven */ + union { + u16 ports; /* VLV DSI */ + u16 phys; /* ICL DSI */ + }; /* if true, use HS mode, otherwise LP */ bool hs; @@ -132,7 +135,10 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) return container_of(h, struct intel_dsi_host, base); } -#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask) +#define for_each_dsi_port(__port, __ports_mask) \ + for_each_port_masked(__port, __ports_mask) +#define for_each_dsi_phy(__phy, __phys_mask) \ + for_each_phy_masked(__phy, __phys_mask) static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d3fc575a94db..95b9ca1fda2e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1794,20 +1794,20 @@ enum i915_power_well_id { #define _ICL_COMBOPHY_A 0x162000 #define _ICL_COMBOPHY_B 0x6C000 #define _EHL_COMBOPHY_C 0x160000 -#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \ +#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \ _ICL_COMBOPHY_B, \ _EHL_COMBOPHY_C) /* CNL/ICL Port CL_DW registers */ -#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ 4 * (dw)) #define CNL_PORT_CL1CM_DW5 _MMIO(0x162014) -#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port)) +#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy)) #define CL_POWER_DOWN_ENABLE (1 << 4) #define SUS_CLOCK_CONFIG (3 << 0) -#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port)) +#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy)) #define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) #define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 #define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) @@ -1822,23 +1822,23 @@ enum i915_power_well_id { #define PWR_DOWN_LN_MASK (0xf << 4) #define PWR_DOWN_LN_SHIFT 4 -#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port)) +#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) #define ICL_LANE_ENABLE_AUX (1 << 0) /* CNL/ICL Port COMP_DW registers */ #define _ICL_PORT_COMP 0x100 -#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_COMP + 4 * (dw)) #define CNL_PORT_COMP_DW0 _MMIO(0x162100) -#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port)) +#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy)) #define COMP_INIT (1 << 31) #define CNL_PORT_COMP_DW1 _MMIO(0x162104) -#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port)) +#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy)) #define CNL_PORT_COMP_DW3 _MMIO(0x16210c) -#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port)) +#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy)) #define PROCESS_INFO_DOT_0 (0 << 26) #define PROCESS_INFO_DOT_1 (1 << 26) #define PROCESS_INFO_DOT_4 (2 << 26) @@ -1850,14 +1850,14 @@ enum i915_power_well_id { #define VOLTAGE_INFO_MASK (3 << 24) #define VOLTAGE_INFO_SHIFT 24 -#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port)) +#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy)) #define IREFGEN (1 << 24) #define CNL_PORT_COMP_DW9 _MMIO(0x162124) -#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port)) +#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy)) #define CNL_PORT_COMP_DW10 _MMIO(0x162128) -#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port)) +#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy)) /* CNL/ICL Port PCS registers */ #define _CNL_PORT_PCS_DW1_GRP_AE 0x162304 @@ -1870,14 +1870,14 @@ enum i915_power_well_id { #define _CNL_PORT_PCS_DW1_LN0_C 0x162C04 #define _CNL_PORT_PCS_DW1_LN0_D 0x162E04 #define _CNL_PORT_PCS_DW1_LN0_F 0x162804 -#define CNL_PORT_PCS_DW1_GRP(port) _MMIO(_PICK(port, \ +#define CNL_PORT_PCS_DW1_GRP(phy) _MMIO(_PICK(phy, \ _CNL_PORT_PCS_DW1_GRP_AE, \ _CNL_PORT_PCS_DW1_GRP_B, \ _CNL_PORT_PCS_DW1_GRP_C, \ _CNL_PORT_PCS_DW1_GRP_D, \ _CNL_PORT_PCS_DW1_GRP_AE, \ _CNL_PORT_PCS_DW1_GRP_F)) -#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ +#define CNL_PORT_PCS_DW1_LN0(phy) _MMIO(_PICK(phy, \ _CNL_PORT_PCS_DW1_LN0_AE, \ _CNL_PORT_PCS_DW1_LN0_B, \ _CNL_PORT_PCS_DW1_LN0_C, \ @@ -1888,15 +1888,15 @@ enum i915_power_well_id { #define _ICL_PORT_PCS_AUX 0x300 #define _ICL_PORT_PCS_GRP 0x600 #define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) -#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_PCS_AUX + 4 * (dw)) -#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_PCS_GRP + 4 * (dw)) -#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_PCS_LN(ln) + 4 * (dw)) -#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port)) -#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port)) -#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port)) +#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) +#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) +#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy)) #define COMMON_KEEPER_EN (1 << 26) #define LATENCY_OPTIM_MASK (0x3 << 2) #define LATENCY_OPTIM_VAL(x) ((x) << 2) @@ -1933,18 +1933,18 @@ enum i915_power_well_id { #define _ICL_PORT_TX_GRP 0x680 #define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) -#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_TX_AUX + 4 * (dw)) -#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_TX_GRP + 4 * (dw)) -#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ +#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ _ICL_PORT_TX_LN(ln) + 4 * (dw)) #define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port)) #define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port)) -#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port)) -#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port)) -#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port)) +#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) +#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) +#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy)) #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) #define SWING_SEL_UPPER_MASK (1 << 15) #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) @@ -1961,10 +1961,10 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) -#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) -#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port)) -#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port)) -#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) +#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) +#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) +#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy)) +#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -1975,9 +1975,9 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port)) #define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port)) -#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port)) -#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port)) -#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port)) +#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) +#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) +#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy)) #define TX_TRAINING_EN (1 << 31) #define TAP2_DISABLE (1 << 30) #define TAP3_DISABLE (1 << 29) @@ -1988,10 +1988,10 @@ enum i915_power_well_id { #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) -#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) -#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) -#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) -#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) +#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) +#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) +#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy)) +#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) -- cgit v1.2.3 From d8fe2ab6bb283dd9b511ea9bea52874fafd3de61 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 9 Jul 2019 11:39:33 -0700 Subject: drm/i915: Transition port type checks to phy checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Transition the remaining uses of intel_port_is_* over to the equivalent intel_phy_is_* functions and drop the port functions. v5: Fix a call in a debug function that's only called when CONFIG_DRM_I915_DEBUG_RUNTIME_PM is on. (CI) Cc: José Roberto de Souza Cc: Lucas De Marchi Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190709183934.445-5-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 4 ++- drivers/gpu/drm/i915/display/intel_ddi.c | 38 ++++++++++++++-------- drivers/gpu/drm/i915/display/intel_display.c | 38 ++++++---------------- drivers/gpu/drm/i915/display/intel_display_power.c | 4 ++- drivers/gpu/drm/i915/display/intel_dp.c | 15 +++++---- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 11 ++++--- drivers/gpu/drm/i915/intel_drv.h | 2 -- 7 files changed, 55 insertions(+), 57 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 0c9808132d67..4fdbb5c35d87 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -28,6 +28,7 @@ #include #include +#include "display/intel_display.h" #include "display/intel_gmbus.h" #include "i915_drv.h" @@ -1733,12 +1734,13 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv) for (port = PORT_A; port < I915_MAX_PORTS; port++) { struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; + enum phy phy = intel_port_to_phy(dev_priv, port); /* * VBT has the TypeC mode (native,TBT/USB) and we don't want * to detect it. */ - if (intel_port_is_tc(dev_priv, port)) + if (intel_phy_is_tc(dev_priv, phy)) continue; info->supports_dvi = (port != PORT_A && port != PORT_E); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index b5c30a02761b..1662e5c2be1c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -868,11 +868,12 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) { int n_entries, level, default_entry; + enum phy phy = intel_port_to_phy(dev_priv, port); level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; if (INTEL_GEN(dev_priv) >= 11) { - if (intel_port_is_combophy(dev_priv, port)) + if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); else @@ -1487,9 +1488,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); int link_clock; - if (intel_port_is_combophy(dev_priv, port)) { + if (intel_phy_is_combo(dev_priv, phy)) { link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); } else { enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv, @@ -2086,6 +2088,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); /* * TODO: Add support for MST encoders. Atm, the following should never @@ -2103,7 +2106,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, * ports. */ if (intel_crtc_has_dp_encoder(crtc_state) || - intel_port_is_tc(dev_priv, encoder->port)) + intel_phy_is_tc(dev_priv, phy)) intel_display_power_get(dev_priv, intel_ddi_main_link_aux_domain(dig_port)); @@ -2228,10 +2231,11 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; if (INTEL_GEN(dev_priv) >= 11) { - if (intel_port_is_combophy(dev_priv, port)) + if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); else @@ -2664,9 +2668,9 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, enum intel_output_type type) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (intel_port_is_combophy(dev_priv, port)) + if (intel_phy_is_combo(dev_priv, phy)) icl_combo_phy_ddi_vswing_sequence(encoder, level, type); else icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); @@ -2877,6 +2881,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); u32 val; const struct intel_shared_dpll *pll = crtc_state->shared_dpll; @@ -2886,7 +2891,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, mutex_lock(&dev_priv->dpll_lock); if (INTEL_GEN(dev_priv) >= 11) { - if (!intel_port_is_combophy(dev_priv, port)) + if (!intel_phy_is_combo(dev_priv, phy)) I915_WRITE(DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); } else if (IS_CANNONLAKE(dev_priv)) { @@ -2926,9 +2931,10 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); if (INTEL_GEN(dev_priv) >= 11) { - if (!intel_port_is_combophy(dev_priv, port)) + if (!intel_phy_is_combo(dev_priv, phy)) I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } else if (IS_CANNONLAKE(dev_priv)) { I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) | @@ -3135,7 +3141,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_clk_select(encoder, crtc_state); - if (!intel_port_is_tc(dev_priv, port) || + if (!intel_phy_is_tc(dev_priv, phy) || dig_port->tc_mode != TC_PORT_TBT_ALT) intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); @@ -3153,7 +3159,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, else intel_prepare_dp_ddi_buffers(encoder, crtc_state); - if (intel_port_is_combophy(dev_priv, port)) { + if (intel_phy_is_combo(dev_priv, phy)) { bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; @@ -3305,6 +3311,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, struct intel_dp *intel_dp = &dig_port->dp; bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); if (!is_mst) { intel_ddi_disable_pipe_clock(old_crtc_state); @@ -3320,7 +3327,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); - if (!intel_port_is_tc(dev_priv, encoder->port) || + if (!intel_phy_is_tc(dev_priv, phy) || dig_port->tc_mode != TC_PORT_TBT_ALT) intel_display_power_put_unchecked(dev_priv, dig_port->ddi_io_power_domain); @@ -3639,7 +3646,8 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - bool is_tc_port = intel_port_is_tc(dev_priv, encoder->port); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + bool is_tc_port = intel_phy_is_tc(dev_priv, phy); if (is_tc_port) intel_tc_port_get_link(dig_port, crtc_state->lane_count); @@ -3666,7 +3674,8 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); - bool is_tc_port = intel_port_is_tc(dev_priv, encoder->port); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + bool is_tc_port = intel_phy_is_tc(dev_priv, phy); if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) intel_display_power_put_unchecked(dev_priv, @@ -4178,6 +4187,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) struct drm_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; enum pipe pipe; + enum phy phy = intel_port_to_phy(dev_priv, port); init_hdmi = port_info->supports_dvi || port_info->supports_hdmi; init_dp = port_info->supports_dp; @@ -4241,7 +4251,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); - if (intel_port_is_tc(dev_priv, port)) { + if (intel_phy_is_tc(dev_priv, phy)) { bool is_legacy = !port_info->supports_typec_usb && !port_info->supports_tbt; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 44c79f8bd028..c2ed4bd8d56b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6671,20 +6671,6 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) I915_WRITE(BCLRPAT(crtc->pipe), 0); } -bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port) -{ - if (port == PORT_NONE) - return false; - - if (IS_ELKHARTLAKE(dev_priv)) - return port <= PORT_C; - - if (INTEL_GEN(dev_priv) >= 11) - return port <= PORT_B; - - return false; -} - bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) { if (phy == PHY_NONE) @@ -6699,14 +6685,6 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) return false; } -bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port) -{ - if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) - return port >= PORT_C && port <= PORT_F; - - return false; -} - bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) @@ -6756,8 +6734,9 @@ enum intel_display_power_domain intel_aux_power_domain(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_port_is_tc(dev_priv, dig_port->base.port) && + if (intel_phy_is_tc(dev_priv, phy) && dig_port->tc_mode == TC_PORT_TBT_ALT) { switch (dig_port->aux_ch) { case AUX_CH_C: @@ -10091,16 +10070,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + enum phy phy = intel_port_to_phy(dev_priv, port); enum icl_port_dpll_id port_dpll_id; enum intel_dpll_id id; u32 temp; - if (intel_port_is_combophy(dev_priv, port)) { + if (intel_phy_is_combo(dev_priv, phy)) { temp = I915_READ(ICL_DPCLKA_CFGCR0) & - DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); + ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy); + id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy); port_dpll_id = ICL_PORT_DPLL_DEFAULT; - } else if (intel_port_is_tc(dev_priv, port)) { + } else if (intel_phy_is_tc(dev_priv, phy)) { u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK; if (clk_sel == DDI_CLK_SEL_MG) { @@ -16962,9 +16942,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev, /* Sanitize the TypeC port mode upfront, encoders depend on this */ for_each_intel_encoder(dev, encoder) { + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + /* We need to sanitize only the MST primary port. */ if (encoder->type != INTEL_OUTPUT_DP_MST && - intel_port_is_tc(dev_priv, encoder->port)) + intel_phy_is_tc(dev_priv, phy)) intel_tc_port_sanitize(enc_to_dig_port(&encoder->base)); } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index a24d1859b37b..7e22a2704843 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -489,7 +489,9 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); for_each_intel_encoder(&dev_priv->drm, encoder) { - if (!intel_port_is_tc(dev_priv, encoder->port)) + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + + if (!intel_phy_is_tc(dev_priv, phy)) continue; /* We'll check the MST primary port */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0bdb7ecc5a81..a9db16de2999 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -297,9 +297,9 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_port_is_combophy(dev_priv, port) && + if (intel_phy_is_combo(dev_priv, phy) && !IS_ELKHARTLAKE(dev_priv) && !intel_dp_is_edp(intel_dp)) return 540000; @@ -1192,7 +1192,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_uncore *uncore = &i915->uncore; - bool is_tc_port = intel_port_is_tc(i915, intel_dig_port->base.port); + enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); + bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; enum intel_display_power_domain aux_domain = @@ -5211,10 +5212,11 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (intel_port_is_combophy(dev_priv, encoder->port)) + if (intel_phy_is_combo(dev_priv, phy)) return icl_combo_port_connected(dev_priv, dig_port); - else if (intel_port_is_tc(dev_priv, encoder->port)) + else if (intel_phy_is_tc(dev_priv, phy)) return intel_tc_port_connected(dig_port); else MISSING_CASE(encoder->hpd_pin); @@ -7113,6 +7115,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, port); int type; /* Initialize the work for modeset in case of link train failure */ @@ -7139,7 +7142,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, * Currently we don't support eDP on TypeC ports, although in * theory it could work on TypeC legacy ports. */ - WARN_ON(intel_port_is_tc(dev_priv, port)); + WARN_ON(intel_phy_is_tc(dev_priv, phy)); type = DRM_MODE_CONNECTOR_eDP; } else { type = DRM_MODE_CONNECTOR_DisplayPort; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 30d7500eb66c..fc6f3c52629a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2584,7 +2584,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, struct skl_wrpll_params pll_params = { 0 }; bool ret; - if (intel_port_is_tc(dev_priv, encoder->port)) + if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, + encoder->port))) ret = icl_calc_tbt_pll(crtc_state, &pll_params); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) @@ -3004,14 +3005,14 @@ static bool icl_get_dplls(struct intel_atomic_state *state, struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - enum port port = encoder->port; + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - if (intel_port_is_combophy(dev_priv, port)) + if (intel_phy_is_combo(dev_priv, phy)) return icl_get_combo_phy_dpll(state, crtc, encoder); - else if (intel_port_is_tc(dev_priv, port)) + else if (intel_phy_is_tc(dev_priv, phy)) return icl_get_tc_phy_dplls(state, crtc, encoder); - MISSING_CASE(port); + MISSING_CASE(phy); return false; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 815c26c0b98c..770f9f6aad84 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1492,9 +1492,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); void intel_encoder_destroy(struct drm_encoder *encoder); struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder); -bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy); -bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy); enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port); -- cgit v1.2.3 From 719d240026022547e3d908e29f4926bf63584c17 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 9 Jul 2019 11:39:34 -0700 Subject: drm/i915/ehl: Enable DDI-D MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL has four DDI's (DDI-A and DDI-D share combo PHY A). Cc: José Roberto de Souza Signed-off-by: Matt Roper Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190709183934.445-6-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c2ed4bd8d56b..0286b97caa22 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15308,6 +15308,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); + intel_ddi_init(dev_priv, PORT_D); icl_dsi_init(dev_priv); } else if (INTEL_GEN(dev_priv) >= 11) { intel_ddi_init(dev_priv, PORT_A); -- cgit v1.2.3 From 47c4bdd6d3ea771f31ab2ef48219d713e808970f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 10 Jul 2019 21:04:28 +0100 Subject: drm/i915/gt: Drop the duplicate icl workaround The extra w/a was revived in the backmerge that was meant to fix it! Fixes: 88c90e800675 ("Merge drm/drm-next into drm-intel-next-queued") Signed-off-by: Chris Wilson Cc: Rodrigo Vivi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190710200428.3275-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index f6fd6905ee6f..9e069286d3ce 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -531,12 +531,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, { struct drm_i915_private *i915 = engine->i915; - /* WaDisableBankHangMode:icl */ - wa_write(wal, - GEN8_L3CNTLREG, - intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) | - GEN8_ERRDETBCTRL); - /* WaDisableBankHangMode:icl */ wa_write(wal, GEN8_L3CNTLREG, -- cgit v1.2.3 From bf1315b830a4ea2e3842400982cf66f15b40b7ec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 10 Jul 2019 17:14:13 +0100 Subject: drm/i915/selftests: Ensure we don't clamp a random offset to 32b Specify that we do want a 64b value for sizeof(u32) as we want to compute the mask of the upper 62bits. v2: Use round_down() for automatic type promotion Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190710161413.7115-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 3abe15a08b6d..695bfb18b0d4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1539,7 +1539,7 @@ static int igt_vm_isolation(void *arg) div64_u64_rem(i915_prandom_u64_state(&prng), vm_total, &offset); - offset &= -sizeof(u32); + offset = round_down(offset, alignof_dword); offset += I915_GTT_PAGE_SIZE; err = write_to_scratch(ctx_a, engine, diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index eec31e36aca7..69f34737325f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -7,6 +7,13 @@ #ifndef _INTEL_GPU_COMMANDS_H_ #define _INTEL_GPU_COMMANDS_H_ +/* + * Target address alignments required for GPU access e.g. + * MI_STORE_DWORD_IMM. + */ +#define alignof_dword 4 +#define alignof_qword 8 + /* * Instruction field definitions used by the command parser */ -- cgit v1.2.3 From 71b0846c17b9c86ffa9a18f09f8cfda4fe41ee86 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 9 Jul 2019 17:54:26 -0700 Subject: drm/i915/guc: Remove preemption support for current fw MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preemption via GuC submission is not being supported with its current legacy incarnation. The current FW does support a similar pre-emption flow via H2G, but it is class-based instead of being instance-based, which doesn't fit well with the i915 tracking. To fix this, the firmware is being updated to better support our needs with a new flow, so we can safely remove the old code. v2 (Daniele): resurrect & rebase, reword commit message, remove preempt_context as well Signed-off-by: Chris Wilson Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Michal Wajdeczko Cc: Matthew Brost Cc: John Harrison Acked-by: Matthew Brost Reviewed-by: Michał Winiarski Link: https://patchwork.freedesktop.org/patch/msgid/20190710005437.3496-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 17 -- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 13 -- drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 - drivers/gpu/drm/i915/gt/intel_gt_pm.c | 4 - drivers/gpu/drm/i915/i915_debugfs.c | 5 - drivers/gpu/drm/i915/i915_drv.h | 2 - drivers/gpu/drm/i915/intel_guc.c | 31 ---- drivers/gpu/drm/i915/intel_guc.h | 9 -- drivers/gpu/drm/i915/intel_guc_submission.c | 231 +-------------------------- drivers/gpu/drm/i915/selftests/intel_guc.c | 31 +--- 10 files changed, 14 insertions(+), 330 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index e367dce2a696..078592912d97 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -644,18 +644,12 @@ static void init_contexts(struct drm_i915_private *i915) init_llist_head(&i915->contexts.free_list); } -static bool needs_preempt_context(struct drm_i915_private *i915) -{ - return USES_GUC_SUBMISSION(i915); -} - int i915_gem_contexts_init(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; /* Reassure ourselves we are only called once */ GEM_BUG_ON(dev_priv->kernel_context); - GEM_BUG_ON(dev_priv->preempt_context); init_contexts(dev_priv); @@ -676,15 +670,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); dev_priv->kernel_context = ctx; - /* highest priority; preempting task */ - if (needs_preempt_context(dev_priv)) { - ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); - if (!IS_ERR(ctx)) - dev_priv->preempt_context = ctx; - else - DRM_ERROR("Failed to create preempt context; disabling preemption\n"); - } - DRM_DEBUG_DRIVER("%s context support initialized\n", DRIVER_CAPS(dev_priv)->has_logical_contexts ? "logical" : "fake"); @@ -695,8 +680,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) { lockdep_assert_held(&i915->drm.struct_mutex); - if (i915->preempt_context) - destroy_kernel_context(&i915->preempt_context); destroy_kernel_context(&i915->kernel_context); /* Must free all deferred contexts (via flush_workqueue) first */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index bdf279fa3b2e..76b5c068a26d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -841,15 +841,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine) if (ret) return ret; - /* - * Similarly the preempt context must always be available so that - * we can interrupt the engine at any time. However, as preemption - * is optional, we allow it to fail. - */ - if (i915->preempt_context) - pin_context(i915->preempt_context, engine, - &engine->preempt_context); - ret = measure_breadcrumb_dw(engine); if (ret < 0) goto err_unpin; @@ -861,8 +852,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine) return 0; err_unpin: - if (engine->preempt_context) - intel_context_unpin(engine->preempt_context); intel_context_unpin(engine->kernel_context); return ret; } @@ -887,8 +876,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - if (engine->preempt_context) - intel_context_unpin(engine->preempt_context); intel_context_unpin(engine->kernel_context); GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 7e056114344e..8be63019d707 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -288,7 +288,6 @@ struct intel_engine_cs { struct llist_head barrier_tasks; struct intel_context *kernel_context; /* pinned */ - struct intel_context *preempt_context; /* pinned; optional */ intel_engine_mask_t saturated; /* submitting semaphores too late? */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 36ba80e6a0b7..da81b3a92d16 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -145,10 +145,6 @@ int intel_gt_resume(struct intel_gt *gt) if (ce) ce->ops->reset(ce); - ce = engine->preempt_context; - if (ce) - ce->ops->reset(ce); - engine->serial++; /* kernel context lost */ err = engine->resume(engine); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3e4f58f19362..b4d195677877 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2010,11 +2010,6 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); i915_guc_client_info(m, dev_priv, guc->execbuf_client); - if (guc->preempt_client) { - seq_printf(m, "\nGuC preempt client @ %p:\n", - guc->preempt_client); - i915_guc_client_info(m, dev_priv, guc->preempt_client); - } /* Add more as required ... */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a9381e404fd5..2fa1d35efcb8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1378,8 +1378,6 @@ struct drm_i915_private { struct intel_engine_cs *engine[I915_NUM_ENGINES]; /* Context used internally to idle the GPU and setup initial state */ struct i915_gem_context *kernel_context; - /* Context only to be used for injecting preemption commands */ - struct i915_gem_context *preempt_context; struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index c40a6efdd33a..501b74f44374 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -101,8 +101,6 @@ void intel_guc_init_early(struct intel_guc *guc) static int guc_init_wq(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - /* * GuC log buffer flush work item has to do register access to * send the ack to GuC and this work item, if not synced before @@ -122,31 +120,6 @@ static int guc_init_wq(struct intel_guc *guc) return -ENOMEM; } - /* - * Even though both sending GuC action, and adding a new workitem to - * GuC workqueue are serialized (each with its own locking), since - * we're using mutliple engines, it's possible that we're going to - * issue a preempt request with two (or more - each for different - * engine) workitems in GuC queue. In this situation, GuC may submit - * all of them, which will make us very confused. - * Our preemption contexts may even already be complete - before we - * even had the chance to sent the preempt action to GuC!. Rather - * than introducing yet another lock, we can just use ordered workqueue - * to make sure we're always sending a single preemption request with a - * single workitem. - */ - if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) && - USES_GUC_SUBMISSION(dev_priv)) { - guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt", - WQ_HIGHPRI); - if (!guc->preempt_wq) { - destroy_workqueue(guc->log.relay.flush_wq); - DRM_ERROR("Couldn't allocate workqueue for GuC " - "preemption\n"); - return -ENOMEM; - } - } - return 0; } @@ -154,10 +127,6 @@ static void guc_fini_wq(struct intel_guc *guc) { struct workqueue_struct *wq; - wq = fetch_and_zero(&guc->preempt_wq); - if (wq) - destroy_workqueue(wq); - wq = fetch_and_zero(&guc->log.relay.flush_wq); if (wq) destroy_workqueue(wq); diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index d91c96679dbb..ec1038c1f50e 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -37,11 +37,6 @@ struct __guc_ads_blob; -struct guc_preempt_work { - struct work_struct work; - struct intel_engine_cs *engine; -}; - /* * Top level structure of GuC. It handles firmware loading and manages client * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy @@ -76,10 +71,6 @@ struct intel_guc { void *shared_data_vaddr; struct intel_guc_client *execbuf_client; - struct intel_guc_client *preempt_client; - - struct guc_preempt_work preempt_work[I915_NUM_ENGINES]; - struct workqueue_struct *preempt_wq; DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); /* Cyclic counter mod pagesize */ diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index f104b94c14ef..8520bb224175 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -46,11 +46,10 @@ enum { * * GuC client: * A intel_guc_client refers to a submission path through GuC. Currently, there - * are two clients. One of them (the execbuf_client) is charged with all - * submissions to the GuC, the other one (preempt_client) is responsible for - * preempting the execbuf_client. This struct is the owner of a doorbell, a - * process descriptor and a workqueue (all of them inside a single gem object - * that contains all required pages for these elements). + * is only one client, which is charged with all submissions to the GuC. This + * struct is the owner of a doorbell, a process descriptor and a workqueue (all + * of them inside a single gem object that contains all required pages for these + * elements). * * GuC stage descriptor: * During initialization, the driver allocates a static pool of 1024 such @@ -88,12 +87,6 @@ enum { * */ -static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine) -{ - return (i915_ggtt_offset(engine->status_page.vma) + - I915_GEM_HWS_PREEMPT_ADDR); -} - static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -563,126 +556,6 @@ static void flush_ggtt_writes(struct i915_vma *vma) intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); } -static void inject_preempt_context(struct work_struct *work) -{ - struct guc_preempt_work *preempt_work = - container_of(work, typeof(*preempt_work), work); - struct intel_engine_cs *engine = preempt_work->engine; - struct intel_guc *guc = container_of(preempt_work, typeof(*guc), - preempt_work[engine->id]); - struct intel_guc_client *client = guc->preempt_client; - struct guc_stage_desc *stage_desc = __get_stage_desc(client); - struct intel_context *ce = engine->preempt_context; - u32 data[7]; - - if (!ce->ring->emit) { /* recreate upon load/resume */ - u32 addr = intel_hws_preempt_done_address(engine); - u32 *cs; - - cs = ce->ring->vaddr; - if (engine->class == RENDER_CLASS) { - cs = gen8_emit_ggtt_write_rcs(cs, - GUC_PREEMPT_FINISHED, - addr, - PIPE_CONTROL_CS_STALL); - } else { - cs = gen8_emit_ggtt_write(cs, - GUC_PREEMPT_FINISHED, - addr, - 0); - *cs++ = MI_NOOP; - *cs++ = MI_NOOP; - } - *cs++ = MI_USER_INTERRUPT; - *cs++ = MI_NOOP; - - ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES; - GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit); - - flush_ggtt_writes(ce->ring->vma); - } - - spin_lock_irq(&client->wq_lock); - guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc), - GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0); - spin_unlock_irq(&client->wq_lock); - - /* - * If GuC firmware performs an engine reset while that engine had - * a preemption pending, it will set the terminated attribute bit - * on our preemption stage descriptor. GuC firmware retains all - * pending work items for a high-priority GuC client, unlike the - * normal-priority GuC client where work items are dropped. It - * wants to make sure the preempt-to-idle work doesn't run when - * scheduling resumes, and uses this bit to inform its scheduler - * and presumably us as well. Our job is to clear it for the next - * preemption after reset, otherwise that and future preemptions - * will never complete. We'll just clear it every time. - */ - stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED; - - data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION; - data[1] = client->stage_id; - data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q | - INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q; - data[3] = engine->guc_id; - data[4] = guc->execbuf_client->priority; - data[5] = guc->execbuf_client->stage_id; - data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); - - if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) { - intel_write_status_page(engine, - I915_GEM_HWS_PREEMPT, - GUC_PREEMPT_NONE); - tasklet_schedule(&engine->execlists.tasklet); - } - - (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++); -} - -/* - * We're using user interrupt and HWSP value to mark that preemption has - * finished and GPU is idle. Normally, we could unwind and continue similar to - * execlists submission path. Unfortunately, with GuC we also need to wait for - * it to finish its own postprocessing, before attempting to submit. Otherwise - * GuC may silently ignore our submissions, and thus we risk losing request at - * best, executing out-of-order and causing kernel panic at worst. - */ -#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10 -static void wait_for_guc_preempt_report(struct intel_engine_cs *engine) -{ - struct intel_guc *guc = &engine->i915->guc; - struct guc_shared_ctx_data *data = guc->shared_data_vaddr; - struct guc_ctx_report *report = - &data->preempt_ctx_report[engine->guc_id]; - - if (wait_for_atomic(report->report_return_status == - INTEL_GUC_REPORT_STATUS_COMPLETE, - GUC_PREEMPT_POSTPROCESS_DELAY_MS)) - DRM_ERROR("Timed out waiting for GuC preemption report\n"); - /* - * GuC is expecting that we're also going to clear the affected context - * counter, let's also reset the return status to not depend on GuC - * resetting it after recieving another preempt action - */ - report->affected_count = 0; - report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN; -} - -static void complete_preempt_context(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists *execlists = &engine->execlists; - - if (inject_preempt_hang(execlists)) - return; - - execlists_cancel_port_requests(execlists); - execlists_unwind_incomplete_requests(execlists); - - wait_for_guc_preempt_report(engine); - intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, GUC_PREEMPT_NONE); -} - static void guc_submit(struct intel_engine_cs *engine, struct i915_request **out, struct i915_request **end) @@ -707,16 +580,6 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority | __NO_PREEMPTION; } -static inline int effective_prio(const struct i915_request *rq) -{ - int prio = rq_prio(rq); - - if (i915_request_has_nopreempt(rq)) - prio = I915_PRIORITY_UNPREEMPTABLE; - - return prio; -} - static struct i915_request *schedule_in(struct i915_request *rq, int idx) { trace_i915_request_in(rq, idx); @@ -752,22 +615,6 @@ static void __guc_dequeue(struct intel_engine_cs *engine) lockdep_assert_held(&engine->active.lock); if (last) { - if (intel_engine_has_preemption(engine)) { - struct guc_preempt_work *preempt_work = - &engine->i915->guc.preempt_work[engine->id]; - int prio = execlists->queue_priority_hint; - - if (i915_scheduler_need_preempt(prio, - effective_prio(last))) { - intel_write_status_page(engine, - I915_GEM_HWS_PREEMPT, - GUC_PREEMPT_INPROGRESS); - queue_work(engine->i915->guc.preempt_wq, - &preempt_work->work); - return; - } - } - if (*++first) return; @@ -831,12 +678,7 @@ static void guc_submission_tasklet(unsigned long data) memmove(execlists->inflight, port, rem * sizeof(*port)); } - if (intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) == - GUC_PREEMPT_FINISHED) - complete_preempt_context(engine); - - if (!intel_read_status_page(engine, I915_GEM_HWS_PREEMPT)) - __guc_dequeue(engine); + __guc_dequeue(engine); spin_unlock_irqrestore(&engine->active.lock, flags); } @@ -857,16 +699,6 @@ static void guc_reset_prepare(struct intel_engine_cs *engine) * prevents the race. */ __tasklet_disable_sync_once(&execlists->tasklet); - - /* - * We're using worker to queue preemption requests from the tasklet in - * GuC submission mode. - * Even though tasklet was disabled, we may still have a worker queued. - * Let's make sure that all workers scheduled before disabling the - * tasklet are completed before continuing with the reset. - */ - if (engine->i915->guc.preempt_wq) - flush_workqueue(engine->i915->guc.preempt_wq); } static void guc_reset(struct intel_engine_cs *engine, bool stalled) @@ -1123,7 +955,6 @@ static int guc_clients_create(struct intel_guc *guc) struct intel_guc_client *client; GEM_BUG_ON(guc->execbuf_client); - GEM_BUG_ON(guc->preempt_client); client = guc_client_alloc(dev_priv, INTEL_INFO(dev_priv)->engine_mask, @@ -1135,20 +966,6 @@ static int guc_clients_create(struct intel_guc *guc) } guc->execbuf_client = client; - if (dev_priv->preempt_context) { - client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->engine_mask, - GUC_CLIENT_PRIORITY_KMD_HIGH, - dev_priv->preempt_context); - if (IS_ERR(client)) { - DRM_ERROR("Failed to create GuC client for preemption!\n"); - guc_client_free(guc->execbuf_client); - guc->execbuf_client = NULL; - return PTR_ERR(client); - } - guc->preempt_client = client; - } - return 0; } @@ -1156,10 +973,6 @@ static void guc_clients_destroy(struct intel_guc *guc) { struct intel_guc_client *client; - client = fetch_and_zero(&guc->preempt_client); - if (client) - guc_client_free(client); - client = fetch_and_zero(&guc->execbuf_client); if (client) guc_client_free(client); @@ -1202,28 +1015,11 @@ static void __guc_client_disable(struct intel_guc_client *client) static int guc_clients_enable(struct intel_guc *guc) { - int ret; - - ret = __guc_client_enable(guc->execbuf_client); - if (ret) - return ret; - - if (guc->preempt_client) { - ret = __guc_client_enable(guc->preempt_client); - if (ret) { - __guc_client_disable(guc->execbuf_client); - return ret; - } - } - - return 0; + return __guc_client_enable(guc->execbuf_client); } static void guc_clients_disable(struct intel_guc *guc) { - if (guc->preempt_client) - __guc_client_disable(guc->preempt_client); - if (guc->execbuf_client) __guc_client_disable(guc->execbuf_client); } @@ -1234,9 +1030,6 @@ static void guc_clients_disable(struct intel_guc *guc) */ int intel_guc_submission_init(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_engine_cs *engine; - enum intel_engine_id id; int ret; if (guc->stage_desc_pool) @@ -1256,11 +1049,6 @@ int intel_guc_submission_init(struct intel_guc *guc) if (ret) goto err_pool; - for_each_engine(engine, dev_priv, id) { - guc->preempt_work[id].engine = engine; - INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context); - } - return 0; err_pool: @@ -1270,13 +1058,6 @@ err_pool: void intel_guc_submission_fini(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, dev_priv, id) - cancel_work_sync(&guc->preempt_work[id].work); - guc_clients_destroy(guc); WARN_ON(!guc_verify_doorbells(guc)); diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 6ca8584cd64c..1a1915e44f6b 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -103,13 +103,10 @@ static int ring_doorbell_nop(struct intel_guc_client *client) /* * Basic client sanity check, handy to validate create_clients. */ -static int validate_client(struct intel_guc_client *client, - int client_priority, - bool is_preempt_client) +static int validate_client(struct intel_guc_client *client, int client_priority) { struct drm_i915_private *dev_priv = guc_to_i915(client->guc); - struct i915_gem_context *ctx_owner = is_preempt_client ? - dev_priv->preempt_context : dev_priv->kernel_context; + struct i915_gem_context *ctx_owner = dev_priv->kernel_context; if (client->owner != ctx_owner || client->engines != INTEL_INFO(dev_priv)->engine_mask || @@ -163,7 +160,7 @@ static int igt_guc_clients(void *args) */ guc_clients_disable(guc); guc_clients_destroy(guc); - if (guc->execbuf_client || guc->preempt_client) { + if (guc->execbuf_client) { pr_err("guc_clients_destroy lied!\n"); err = -EINVAL; goto unlock; @@ -177,24 +174,14 @@ static int igt_guc_clients(void *args) GEM_BUG_ON(!guc->execbuf_client); err = validate_client(guc->execbuf_client, - GUC_CLIENT_PRIORITY_KMD_NORMAL, false); + GUC_CLIENT_PRIORITY_KMD_NORMAL); if (err) { pr_err("execbug client validation failed\n"); goto out; } - if (guc->preempt_client) { - err = validate_client(guc->preempt_client, - GUC_CLIENT_PRIORITY_KMD_HIGH, true); - if (err) { - pr_err("preempt client validation failed\n"); - goto out; - } - } - - /* each client should now have reserved a doorbell */ - if (!has_doorbell(guc->execbuf_client) || - (guc->preempt_client && !has_doorbell(guc->preempt_client))) { + /* the client should now have reserved a doorbell */ + if (!has_doorbell(guc->execbuf_client)) { pr_err("guc_clients_create didn't reserve doorbells\n"); err = -EINVAL; goto out; @@ -204,8 +191,7 @@ static int igt_guc_clients(void *args) guc_clients_enable(guc); /* each client should now have received a doorbell */ - if (!client_doorbell_in_sync(guc->execbuf_client) || - !client_doorbell_in_sync(guc->preempt_client)) { + if (!client_doorbell_in_sync(guc->execbuf_client)) { pr_err("failed to initialize the doorbells\n"); err = -EINVAL; goto out; @@ -300,8 +286,7 @@ static int igt_guc_doorbells(void *arg) goto out; } - err = validate_client(clients[i], - i % GUC_CLIENT_PRIORITY_NUM, false); + err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM); if (err) { pr_err("[%d] client_alloc sanity check failed!\n", i); err = -EINVAL; -- cgit v1.2.3 From aebf052bb6474f21179f2469b5c3172c4f5e4fd1 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 9 Jul 2019 17:54:27 -0700 Subject: drm/i915/guc: Simplify guc client MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We originally added support, in some cases partial, for different modes of operations via guc clients: - proxy vs direct submission; - variable engine mask per-client. We only ever used one flow (all submissions via a single proxy), so the other code paths haven't been exercised and are most likely non-functional. The guc firmware interface is also in the process of being updated to better fit the i915 flow and our client abstraction will need to change accordingly (or possibly go away entirely), so these old unused paths can be considered dead and removed. Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Michal Wajdeczko Cc: Matthew Brost Cc: John Harrison Acked-by: Matthew Brost Reviewed-by: Michał Winiarski Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190710005437.3496-3-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +- drivers/gpu/drm/i915/intel_guc_submission.c | 73 ++--------------------------- drivers/gpu/drm/i915/intel_guc_submission.h | 2 - drivers/gpu/drm/i915/selftests/intel_guc.c | 12 +---- 4 files changed, 8 insertions(+), 82 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b4d195677877..dc65a6131a5b 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2021,7 +2021,6 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); const struct intel_guc *guc = &dev_priv->guc; struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; - struct intel_guc_client *client = guc->execbuf_client; intel_engine_mask_t tmp; int index; @@ -2051,7 +2050,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data) desc->wq_addr, desc->wq_size); seq_putc(m, '\n'); - for_each_engine_masked(engine, dev_priv, client->engines, tmp) { + for_each_engine(engine, dev_priv, tmp) { u32 guc_engine_id = engine->guc_id; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 8520bb224175..30692f8289bd 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -363,10 +363,7 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc) static void guc_stage_desc_init(struct intel_guc_client *client) { struct intel_guc *guc = client->guc; - struct i915_gem_context *ctx = client->owner; - struct i915_gem_engines_iter it; struct guc_stage_desc *desc; - struct intel_context *ce; u32 gfx_addr; desc = __get_stage_desc(client); @@ -380,55 +377,6 @@ static void guc_stage_desc_init(struct intel_guc_client *client) desc->priority = client->priority; desc->db_id = client->doorbell_id; - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - struct guc_execlist_context *lrc; - - if (!(ce->engine->mask & client->engines)) - continue; - - /* TODO: We have a design issue to be solved here. Only when we - * receive the first batch, we know which engine is used by the - * user. But here GuC expects the lrc and ring to be pinned. It - * is not an issue for default context, which is the only one - * for now who owns a GuC client. But for future owner of GuC - * client, need to make sure lrc is pinned prior to enter here. - */ - if (!ce->state) - break; /* XXX: continue? */ - - /* - * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy - * submission or, in other words, not using a direct submission - * model) the KMD's LRCA is not used for any work submission. - * Instead, the GuC uses the LRCA of the user mode context (see - * guc_add_request below). - */ - lrc = &desc->lrc[ce->engine->guc_id]; - lrc->context_desc = lower_32_bits(ce->lrc_desc); - - /* The state page is after PPHWSP */ - lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) + - LRC_STATE_PN * PAGE_SIZE; - - /* XXX: In direct submission, the GuC wants the HW context id - * here. In proxy submission, it wants the stage id - */ - lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) | - (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET); - - lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma); - lrc->ring_end = lrc->ring_begin + ce->ring->size - 1; - lrc->ring_next_free_location = lrc->ring_begin; - lrc->ring_current_tail_pointer_value = 0; - - desc->engines_used |= BIT(ce->engine->guc_id); - } - i915_gem_context_unlock_engines(ctx); - - DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n", - client->engines, desc->engines_used); - WARN_ON(desc->engines_used == 0); - /* * The doorbell, process descriptor, and workqueue are all parts * of the client object, which the GuC will reference via the GGTT @@ -836,8 +784,7 @@ static bool guc_verify_doorbells(struct intel_guc *guc) /** * guc_client_alloc() - Allocate an intel_guc_client - * @dev_priv: driver private data structure - * @engines: The set of engines to enable for this client + * @guc: the intel_guc structure * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, @@ -848,13 +795,9 @@ static bool guc_verify_doorbells(struct intel_guc *guc) * Return: An intel_guc_client object if success, else NULL. */ static struct intel_guc_client * -guc_client_alloc(struct drm_i915_private *dev_priv, - u32 engines, - u32 priority, - struct i915_gem_context *ctx) +guc_client_alloc(struct intel_guc *guc, u32 priority) { struct intel_guc_client *client; - struct intel_guc *guc = &dev_priv->guc; struct i915_vma *vma; void *vaddr; int ret; @@ -864,8 +807,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, return ERR_PTR(-ENOMEM); client->guc = guc; - client->owner = ctx; - client->engines = engines; client->priority = priority; client->doorbell_id = GUC_DOORBELL_INVALID; spin_lock_init(&client->wq_lock); @@ -910,8 +851,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv, else client->proc_desc_offset = (GUC_DB_SIZE / 2); - DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", - priority, client, client->engines, client->stage_id); + DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n", + priority, client, client->stage_id); DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", client->doorbell_id, client->doorbell_offset); @@ -951,15 +892,11 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce) static int guc_clients_create(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); struct intel_guc_client *client; GEM_BUG_ON(guc->execbuf_client); - client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->engine_mask, - GUC_CLIENT_PRIORITY_KMD_NORMAL, - dev_priv->kernel_context); + client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL); if (IS_ERR(client)) { DRM_ERROR("Failed to create GuC client for submission!\n"); return PTR_ERR(client); diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h index 7d823a513b9c..87a38cb6faf3 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/intel_guc_submission.h @@ -58,11 +58,9 @@ struct drm_i915_private; struct intel_guc_client { struct i915_vma *vma; void *vaddr; - struct i915_gem_context *owner; struct intel_guc *guc; /* bitmap of (host) engine ids */ - u32 engines; u32 priority; u32 stage_id; u32 proc_desc_offset; diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 1a1915e44f6b..6ca76f5a98d4 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -105,12 +105,7 @@ static int ring_doorbell_nop(struct intel_guc_client *client) */ static int validate_client(struct intel_guc_client *client, int client_priority) { - struct drm_i915_private *dev_priv = guc_to_i915(client->guc); - struct i915_gem_context *ctx_owner = dev_priv->kernel_context; - - if (client->owner != ctx_owner || - client->engines != INTEL_INFO(dev_priv)->engine_mask || - client->priority != client_priority || + if (client->priority != client_priority || client->doorbell_id == GUC_DOORBELL_INVALID) return -EINVAL; else @@ -247,10 +242,7 @@ static int igt_guc_doorbells(void *arg) goto unlock; for (i = 0; i < ATTEMPTS; i++) { - clients[i] = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->engine_mask, - i % GUC_CLIENT_PRIORITY_NUM, - dev_priv->kernel_context); + clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM); if (!clients[i]) { pr_err("[%d] No guc client\n", i); -- cgit v1.2.3 From 5a5efbf4ef504a8136963a8ae10a3c0a1d59b479 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 21:03:07 +0300 Subject: drm/i915/sdvo: Fix handling if zero hbuf size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The spec says: "A value of 0 indicates that this buffer does not exist" So we should not convert a hbuf_size of 0 into 1. Also pull the relevant code into a helper to avoid making the same mistake multiple times. And while at it fix the debug prints to not say "hbuf_len" twice. v2: s/%i/%u/ in the debug (Imre) Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190619180312.31817-1-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_sdvo.c | 32 ++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 3fe8eaef6bd8..213843a93c4e 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -951,6 +951,20 @@ static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, &audio_state, 1); } +static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo, + u8 *hbuf_size) +{ + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, + hbuf_size, 1)) + return false; + + /* Buffer size is 0 based, hooray! However zero means zero. */ + if (*hbuf_size) + (*hbuf_size)++; + + return true; +} + #if 0 static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) { @@ -994,14 +1008,10 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, set_buf_index, 2)) return false; - if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, - &hbuf_size, 1)) + if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) return false; - /* Buffer size is 0 based, hooray! */ - hbuf_size++; - - DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n", if_index, length, hbuf_size); if (hbuf_size < length) @@ -1052,14 +1062,10 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, if (tx_rate == SDVO_HBUF_TX_DISABLED) return 0; - if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, - &hbuf_size, 1)) - return -ENXIO; - - /* Buffer size is 0 based, hooray! */ - hbuf_size++; + if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size)) + return false; - DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n", if_index, length, hbuf_size); hbuf_size = min_t(unsigned int, length, hbuf_size); -- cgit v1.2.3 From f63dfc1464380833c6a2c8daa1b767c6020565af Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 11 Jul 2019 07:51:59 +0100 Subject: drm/i915/selftests: Hold the vma manager lock while modifying mmap_offset Right idea, wrong lock. We already drop struct_mutex before we free the mmap_offset when freeing the object, so we need to take the vma manager lock when manipulating the mmap_offset address space for our selftests. Fixes: 8221d21b0664 ("drm/i915/selftests: Lock the drm_mm while modifying") Signed-off-by: Chris Wilson Cc: Imre Deak Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190711065215.4004-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index b95fdc2b6bfc..5635cbb4af22 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -401,6 +401,18 @@ static void restore_retire_worker(struct drm_i915_private *i915) i915_gem_shrinker_register(i915); } +static void mmap_offset_lock(struct drm_i915_private *i915) + __acquires(&i915->drm.vma_offset_manager->vm_lock) +{ + write_lock(&i915->drm.vma_offset_manager->vm_lock); +} + +static void mmap_offset_unlock(struct drm_i915_private *i915) + __releases(&i915->drm.vma_offset_manager->vm_lock) +{ + write_unlock(&i915->drm.vma_offset_manager->vm_lock); +} + static int igt_mmap_offset_exhaustion(void *arg) { struct drm_i915_private *i915 = arg; @@ -419,9 +431,9 @@ static int igt_mmap_offset_exhaustion(void *arg) drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { resv.start = hole_start; resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */ - mutex_lock(&i915->drm.struct_mutex); + mmap_offset_lock(i915); err = drm_mm_reserve_node(mm, &resv); - mutex_unlock(&i915->drm.struct_mutex); + mmap_offset_unlock(i915); if (err) { pr_err("Failed to trim VMA manager, err=%d\n", err); goto out_park; @@ -485,9 +497,9 @@ static int igt_mmap_offset_exhaustion(void *arg) } out: - mutex_lock(&i915->drm.struct_mutex); + mmap_offset_lock(i915); drm_mm_remove_node(&resv); - mutex_unlock(&i915->drm.struct_mutex); + mmap_offset_unlock(i915); out_park: restore_retire_worker(i915); return err; -- cgit v1.2.3 From 13ce609243c7fbc17d8b272629242bc25a5401bd Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 1 Jul 2019 19:15:34 +0300 Subject: drm/i915: Use the "display core" power domain in vlv/chv set_cdclk() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The PFI credit programming performed during cdclk change on vlv/chv requires access to a register in the disp2d power well. So far we've abused pipe-A power domain for this, but now we have the more appropriate "display core" domain so let's make use of it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190701161534.6671-1-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_cdclk.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 0b8b8ae3b7fc..d0581a1ac243 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -545,10 +545,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, /* There are cases where we can end up here with power domains * off and a CDCLK frequency other than the minimum, like when * issuing a modeset without actually changing any display after - * a system suspend. So grab the PIPE-A domain, which covers + * a system suspend. So grab the display core domain, which covers * the HW blocks needed for the following programming. */ - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_CCK) | @@ -606,7 +606,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); + intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, @@ -631,10 +631,10 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, /* There are cases where we can end up here with power domains * off and a CDCLK frequency other than the minimum, like when * issuing a modeset without actually changing any display after - * a system suspend. So grab the PIPE-A domain, which covers + * a system suspend. So grab the display core domain, which covers * the HW blocks needed for the following programming. */ - wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); vlv_punit_get(dev_priv); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); @@ -653,7 +653,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, vlv_program_pfi_credits(dev_priv); - intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref); + intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); } static int bdw_calc_cdclk(int min_cdclk) -- cgit v1.2.3 From 88016a9fb7417cb99c2e58e96ff943d59fa27099 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 1 Jul 2019 19:05:45 +0300 Subject: drm/i915: Check crtc_state->wm.need_postvbl_update before grabbing wm.mutex MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit wm.mutex does not protect the crtc state so no point in grabbing it to check crtc_state->wm.need_postvbl_update. Also do a bit of s/intel_crtc/crtc/ while at it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190701160550.24205-1-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/intel_pm.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 0cecea228546..22472f2bd31b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1566,13 +1566,13 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); if (!crtc_state->wm.need_postvbl_update) return; mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; g4x_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -2185,13 +2185,13 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); if (!crtc_state->wm.need_postvbl_update) return; mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; vlv_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5723,10 +5723,10 @@ static void ilk_initial_watermarks(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); mutex_lock(&dev_priv->wm.wm_mutex); - intel_crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; + crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5735,13 +5735,14 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; mutex_lock(&dev_priv->wm.wm_mutex); - if (crtc_state->wm.need_postvbl_update) { - intel_crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; - ilk_program_watermarks(dev_priv); - } + crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; + ilk_program_watermarks(dev_priv); mutex_unlock(&dev_priv->wm.wm_mutex); } -- cgit v1.2.3 From afe0c21b6228ac241a9e537f71db418771225090 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 1 Jul 2019 19:05:46 +0300 Subject: drm/i915: Simplify modeset_get_crtc_power_domains() arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass just the crtc state to modeset_get_crtc_power_domains(). We can get the crtc from therein. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190701160550.24205-2-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 0286b97caa22..fc654d1a164e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6772,11 +6772,10 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) } } -static u64 get_crtc_power_domains(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_encoder *encoder; enum pipe pipe = crtc->pipe; u64 mask; @@ -6791,7 +6790,8 @@ static u64 get_crtc_power_domains(struct intel_crtc *crtc, crtc_state->pch_pfit.force_thru) mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); - drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { + drm_for_each_encoder_mask(encoder, &dev_priv->drm, + crtc_state->base.encoder_mask) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); mask |= BIT_ULL(intel_encoder->power_domain); @@ -6807,16 +6807,16 @@ static u64 get_crtc_power_domains(struct intel_crtc *crtc, } static u64 -modeset_get_crtc_power_domains(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain domain; u64 domains, new_domains, old_domains; old_domains = crtc->enabled_power_domains; crtc->enabled_power_domains = new_domains = - get_crtc_power_domains(crtc, crtc_state); + get_crtc_power_domains(crtc_state); domains = new_domains & ~old_domains; @@ -13906,8 +13906,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) new_crtc_state->update_pipe) { put_domains[crtc->pipe] = - modeset_get_crtc_power_domains(crtc, - new_crtc_state); + modeset_get_crtc_power_domains(new_crtc_state); } if (!needs_modeset(new_crtc_state)) @@ -17011,7 +17010,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, u64 put_domains; crtc_state = to_intel_crtc_state(crtc->base.state); - put_domains = modeset_get_crtc_power_domains(crtc, crtc_state); + put_domains = modeset_get_crtc_power_domains(crtc_state); if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } -- cgit v1.2.3 From 13d723a1177d63f3aa9cea8bc9aae9f7617adc4f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 1 Jul 2019 19:05:47 +0300 Subject: drm/i915: Polish intel_shared_dpll_swap_state() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use swap() instead of hand rolling it in intel_shared_dpll_swap_state(), and pass in the intel_atomic_state instead of drm_atomic_state. Makes the code less convoluted. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190701160550.24205-3-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 19 +++++++------------ drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 3 +-- 3 files changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index fc654d1a164e..e820082b65ea 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14187,7 +14187,7 @@ static int intel_atomic_commit(struct drm_device *dev, return ret; } dev_priv->wm.distrust_bios_wm = false; - intel_shared_dpll_swap_state(state); + intel_shared_dpll_swap_state(intel_state); intel_atomic_track_fbs(state); if (intel_state->modeset) { diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index fc6f3c52629a..5065f21fd82b 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -347,25 +347,20 @@ static void intel_put_dpll(struct intel_atomic_state *state, * i.e. it also puts the current state into @state, even though there is no * need for that at this moment. */ -void intel_shared_dpll_swap_state(struct drm_atomic_state *state) +void intel_shared_dpll_swap_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_shared_dpll_state *shared_dpll; - struct intel_shared_dpll *pll; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_shared_dpll_state *shared_dpll = state->shared_dpll; enum intel_dpll_id i; - if (!to_intel_atomic_state(state)->dpll_set) + if (!state->dpll_set) return; - shared_dpll = to_intel_atomic_state(state)->shared_dpll; for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll_state tmp; - - pll = &dev_priv->shared_dplls[i]; + struct intel_shared_dpll *pll = + &dev_priv->shared_dplls[i]; - tmp = pll->state; - pll->state = shared_dpll[i]; - shared_dpll[i] = tmp; + swap(pll->state, shared_dpll[i]); } } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 4c2c5e93aff3..ed5fae964736 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -37,7 +37,6 @@ (void) (&__a == &__b); \ __a > __b ? (__a - __b) : (__b - __a); }) -struct drm_atomic_state; struct drm_device; struct drm_i915_private; struct intel_atomic_state; @@ -358,7 +357,7 @@ void intel_update_active_dpll(struct intel_atomic_state *state, void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); -void intel_shared_dpll_swap_state(struct drm_atomic_state *state); +void intel_shared_dpll_swap_state(struct intel_atomic_state *state); void intel_shared_dpll_init(struct drm_device *dev); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From e3b4089c68ed59fcda9220db02c8ccfe9c49ccd7 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 1 Jul 2019 19:05:48 +0300 Subject: drm/i915: Polish intel_atomic_track_fbs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Streamline the code a bit by using intel_ types instead of drm_ types in intel_atomic_track_fbs(). Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190701160550.24205-4-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e820082b65ea..1de34b96f8bc 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14101,16 +14101,17 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, return NOTIFY_DONE; } -static void intel_atomic_track_fbs(struct drm_atomic_state *state) +static void intel_atomic_track_fbs(struct intel_atomic_state *state) { - struct drm_plane_state *old_plane_state, *new_plane_state; - struct drm_plane *plane; + struct intel_plane_state *old_plane_state, *new_plane_state; + struct intel_plane *plane; int i; - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) - i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), - intel_fb_obj(new_plane_state->fb), - to_intel_plane(plane)->frontbuffer_bit); + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) + i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb), + intel_fb_obj(new_plane_state->base.fb), + plane->frontbuffer_bit); } /** @@ -14188,7 +14189,7 @@ static int intel_atomic_commit(struct drm_device *dev, } dev_priv->wm.distrust_bios_wm = false; intel_shared_dpll_swap_state(intel_state); - intel_atomic_track_fbs(state); + intel_atomic_track_fbs(intel_state); if (intel_state->modeset) { memcpy(dev_priv->min_cdclk, intel_state->min_cdclk, -- cgit v1.2.3 From 6a64e985d242921a9b6050bddc5aa716c02aa6ef Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 1 Jul 2019 19:05:49 +0300 Subject: drm/i915: Use intel_ types in intel_{lock,modeset}_all_pipes() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Streamline the code a bit by using intel_ types instead of the drm_ types. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190701160550.24205-5-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 38 +++++++++++++++------------- 1 file changed, 21 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1de34b96f8bc..feb7f407813c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13382,15 +13382,16 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) return 0; } -static int intel_lock_all_pipes(struct drm_atomic_state *state) +static int intel_lock_all_pipes(struct intel_atomic_state *state) { - struct drm_crtc *crtc; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; /* Add all pipes to the state */ - for_each_crtc(state->dev, crtc) { - struct drm_crtc_state *crtc_state; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; - crtc_state = drm_atomic_get_crtc_state(state, crtc); + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); } @@ -13398,32 +13399,35 @@ static int intel_lock_all_pipes(struct drm_atomic_state *state) return 0; } -static int intel_modeset_all_pipes(struct drm_atomic_state *state) +static int intel_modeset_all_pipes(struct intel_atomic_state *state) { - struct drm_crtc *crtc; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; /* * Add all pipes to the state, and force * a modeset on all the active ones. */ - for_each_crtc(state->dev, crtc) { - struct drm_crtc_state *crtc_state; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; int ret; - crtc_state = drm_atomic_get_crtc_state(state, crtc); + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); - if (!crtc_state->active || needs_modeset(to_intel_crtc_state(crtc_state))) + if (!crtc_state->base.active || needs_modeset(crtc_state)) continue; - crtc_state->mode_changed = true; + crtc_state->base.mode_changed = true; - ret = drm_atomic_add_affected_connectors(state, crtc); + ret = drm_atomic_add_affected_connectors(&state->base, + &crtc->base); if (ret) return ret; - ret = drm_atomic_add_affected_planes(state, crtc); + ret = drm_atomic_add_affected_planes(&state->base, + &crtc->base); if (ret) return ret; } @@ -13485,7 +13489,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state) */ if (intel_cdclk_changed(&dev_priv->cdclk.logical, &state->cdclk.logical)) { - ret = intel_lock_all_pipes(&state->base); + ret = intel_lock_all_pipes(state); if (ret < 0) return ret; } @@ -13508,14 +13512,14 @@ static int intel_modeset_checks(struct intel_atomic_state *state) intel_cdclk_needs_cd2x_update(dev_priv, &dev_priv->cdclk.actual, &state->cdclk.actual)) { - ret = intel_lock_all_pipes(&state->base); + ret = intel_lock_all_pipes(state); if (ret < 0) return ret; state->cdclk.pipe = pipe; } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, &state->cdclk.actual)) { - ret = intel_modeset_all_pipes(&state->base); + ret = intel_modeset_all_pipes(state); if (ret < 0) return ret; -- cgit v1.2.3 From a85fb46777c0e078075e7f4880a1f82178d66c1a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 1 Jul 2019 19:05:50 +0300 Subject: drm/i915: Use intel_ types in intel_atomic_commit() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make life less annoying by favoring the intel_ types over the drm_ types in intel_atomic_commit(). Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190701160550.24205-6-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 76 ++++++++++++++-------------- 1 file changed, 37 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index feb7f407813c..a44d661eb535 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13686,10 +13686,10 @@ static int intel_atomic_check(struct drm_device *dev, return ret; } -static int intel_atomic_prepare_commit(struct drm_device *dev, - struct drm_atomic_state *state) +static int intel_atomic_prepare_commit(struct intel_atomic_state *state) { - return drm_atomic_helper_prepare_planes(dev, state); + return drm_atomic_helper_prepare_planes(state->base.dev, + &state->base); } u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) @@ -14131,17 +14131,17 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) * Zero for success or -errno. */ static int intel_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, + struct drm_atomic_state *_state, bool nonblock) { - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct intel_atomic_state *state = to_intel_atomic_state(_state); struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - drm_atomic_state_get(state); - i915_sw_fence_init(&intel_state->commit_ready, + drm_atomic_state_get(&state->base); + i915_sw_fence_init(&state->commit_ready, intel_atomic_commit_ready); /* @@ -14161,65 +14161,63 @@ static int intel_atomic_commit(struct drm_device *dev, * FIXME doing watermarks and fb cleanup from a vblank worker * (assuming we had any) would solve these problems. */ - if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) { + if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) { struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; int i; - for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i) + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) if (new_crtc_state->wm.need_postvbl_update || new_crtc_state->update_wm_post) - state->legacy_cursor_update = false; + state->base.legacy_cursor_update = false; } - ret = intel_atomic_prepare_commit(dev, state); + ret = intel_atomic_prepare_commit(state); if (ret) { DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); - i915_sw_fence_commit(&intel_state->commit_ready); - intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); + i915_sw_fence_commit(&state->commit_ready); + intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } - ret = drm_atomic_helper_setup_commit(state, nonblock); + ret = drm_atomic_helper_setup_commit(&state->base, nonblock); if (!ret) - ret = drm_atomic_helper_swap_state(state, true); + ret = drm_atomic_helper_swap_state(&state->base, true); if (ret) { - i915_sw_fence_commit(&intel_state->commit_ready); + i915_sw_fence_commit(&state->commit_ready); - drm_atomic_helper_cleanup_planes(dev, state); - intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); + drm_atomic_helper_cleanup_planes(dev, &state->base); + intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); return ret; } dev_priv->wm.distrust_bios_wm = false; - intel_shared_dpll_swap_state(intel_state); - intel_atomic_track_fbs(intel_state); + intel_shared_dpll_swap_state(state); + intel_atomic_track_fbs(state); - if (intel_state->modeset) { - memcpy(dev_priv->min_cdclk, intel_state->min_cdclk, - sizeof(intel_state->min_cdclk)); - memcpy(dev_priv->min_voltage_level, - intel_state->min_voltage_level, - sizeof(intel_state->min_voltage_level)); - dev_priv->active_crtcs = intel_state->active_crtcs; - dev_priv->cdclk.force_min_cdclk = - intel_state->cdclk.force_min_cdclk; + if (state->modeset) { + memcpy(dev_priv->min_cdclk, state->min_cdclk, + sizeof(state->min_cdclk)); + memcpy(dev_priv->min_voltage_level, state->min_voltage_level, + sizeof(state->min_voltage_level)); + dev_priv->active_crtcs = state->active_crtcs; + dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; - intel_cdclk_swap_state(intel_state); + intel_cdclk_swap_state(state); } - drm_atomic_state_get(state); - INIT_WORK(&state->commit_work, intel_atomic_commit_work); + drm_atomic_state_get(&state->base); + INIT_WORK(&state->base.commit_work, intel_atomic_commit_work); - i915_sw_fence_commit(&intel_state->commit_ready); - if (nonblock && intel_state->modeset) { - queue_work(dev_priv->modeset_wq, &state->commit_work); + i915_sw_fence_commit(&state->commit_ready); + if (nonblock && state->modeset) { + queue_work(dev_priv->modeset_wq, &state->base.commit_work); } else if (nonblock) { - queue_work(system_unbound_wq, &state->commit_work); + queue_work(system_unbound_wq, &state->base.commit_work); } else { - if (intel_state->modeset) + if (state->modeset) flush_workqueue(dev_priv->modeset_wq); - intel_atomic_commit_tail(intel_state); + intel_atomic_commit_tail(state); } return 0; -- cgit v1.2.3 From b12d5944fc285730d75bdd8f7cbc28d22c26f36f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 10 Jul 2019 16:49:37 +0300 Subject: drm/i915: Don't pass stack garbage to pcode in the second data register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zero initialize val2 so that we don't pass stack garbage to the pcode qgv read command. I suspect in this case pcode just ignores the initial value in that registers, but better safe than sorry. Cc: Dan Carpenter Reported-by: Dan Carpenter Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190710134937.25835-1-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_bw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 7b908e10d32e..ee52c5b4643b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -65,7 +65,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, struct intel_qgv_point *sp, int point) { - u32 val = 0, val2; + u32 val = 0, val2 = 0; int ret; ret = sandybridge_pcode_read(dev_priv, -- cgit v1.2.3 From ddafc0f7564051121285e8143948f7a5269b8eb6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 11 Jul 2019 17:24:15 +0100 Subject: drm/i915/guc: Drop redundant ctx param from kerneldoc drivers/gpu/drm/i915/intel_guc_submission.c:799: warning: Excess function parameter 'ctx' description in 'guc_client_alloc' Signed-off-by: Chris Wilson Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190711162415.2938-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_guc_submission.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 30692f8289bd..b663b5fe51a8 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -789,8 +789,6 @@ static bool guc_verify_doorbells(struct intel_guc *guc) * The kernel client to replace ExecList submission is created with * NORMAL priority. Priority of a client for scheduler can be HIGH, * while a preemption context can use CRITICAL. - * @ctx: the context that owns the client (we use the default render - * context) * * Return: An intel_guc_client object if success, else NULL. */ -- cgit v1.2.3 From 86c9640b3a1ede3215fe8fc1848abb54a36aeac8 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Wed, 10 Jul 2019 20:12:30 +0300 Subject: drm/i915: Copy name string into ring buffer for intel_update/disable_plane tracepoints MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the intel_update_plane and intel_disable_plane tracepoints record the address of plane->name in the ring buffer, and then when reading the ring buffer uses %s to get the name. The issue with this, is that those two events can be minutes, hours or even days apart. It is very dangerous to dereference a string pointer without knowing if it still exists or not. The proper way to handle this is to use the __string() macro in the tracepoint which will save the string into the ring buffer at the time of recording. Then there's no worries if the original string still exists in memory when the ring buffer is read. Signed-off-by: Steven Rostedt (VMware) [vsyrjala: Rebase on top of drm-tip] Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190710171230.7471-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_trace.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index cce426b23a24..da18b8d6b80c 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -293,16 +293,16 @@ TRACE_EVENT(intel_update_plane, TP_STRUCT__entry( __field(enum pipe, pipe) - __field(const char *, name) __field(u32, frame) __field(u32, scanline) __array(int, src, 4) __array(int, dst, 4) + __string(name, plane->name) ), TP_fast_assign( + __assign_str(name, plane->name); __entry->pipe = crtc->pipe; - __entry->name = plane->name; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); @@ -310,7 +310,7 @@ TRACE_EVENT(intel_update_plane, ), TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, - pipe_name(__entry->pipe), __entry->name, + pipe_name(__entry->pipe), __get_str(name), __entry->frame, __entry->scanline, DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) @@ -322,20 +322,20 @@ TRACE_EVENT(intel_disable_plane, TP_STRUCT__entry( __field(enum pipe, pipe) - __field(const char *, name) __field(u32, frame) __field(u32, scanline) + __string(name, plane->name) ), TP_fast_assign( + __assign_str(name, plane->name); __entry->pipe = crtc->pipe; - __entry->name = plane->name; __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pipe %c, plane %s, frame=%u, scanline=%u", - pipe_name(__entry->pipe), __entry->name, + pipe_name(__entry->pipe), __get_str(name), __entry->frame, __entry->scanline) ); -- cgit v1.2.3 From ddb3d12afa92beb67027e84bbc994b0868446022 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 10 Jul 2019 15:58:51 +0300 Subject: drm/i915: Don't overestimate 4:2:0 link symbol clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With 4:2:0 output the LS clock can be half of what it is with 4:4:4. Make that happen. Cc: Gwan-gyeong Mun Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190710125851.3275-1-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun Tested-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_dp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a9db16de2999..24592d985bcb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1854,8 +1854,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, int mode_rate, link_clock, link_avail; for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + int output_bpp = intel_dp_output_bpp(pipe_config, bpp); + mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, - bpp); + output_bpp); for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { for (lane_count = limits->min_lane_count; -- cgit v1.2.3 From f1f1d4fa5869c8b0b3e7f9e9a8a2f1983f10123a Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 11 Jul 2019 10:30:55 -0700 Subject: drm/i915: Add 4th pipe and transcoder MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add pipe D and transcoder D to prepare for platforms having them. Cc: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 3 ++- drivers/gpu/drm/i915/display/intel_display.h | 4 ++++ drivers/gpu/drm/i915/i915_reg.h | 3 +++ 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a44d661eb535..c13784b81fb1 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -17191,7 +17191,7 @@ struct intel_display_error_state { u32 vtotal; u32 vblank; u32 vsync; - } transcoder[4]; + } transcoder[5]; }; struct intel_display_error_state * @@ -17202,6 +17202,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) TRANSCODER_A, TRANSCODER_B, TRANSCODER_C, + TRANSCODER_D, TRANSCODER_EDP, }; int i; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 8a4a57ef82a2..1f75b0a627fd 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -58,6 +58,7 @@ enum pipe { PIPE_A = 0, PIPE_B, PIPE_C, + PIPE_D, _PIPE_EDP, I915_MAX_PIPES = _PIPE_EDP @@ -75,6 +76,7 @@ enum transcoder { TRANSCODER_A = PIPE_A, TRANSCODER_B = PIPE_B, TRANSCODER_C = PIPE_C, + TRANSCODER_D = PIPE_D, /* * The following transcoders can map to any pipe, their enum value @@ -98,6 +100,8 @@ static inline const char *transcoder_name(enum transcoder transcoder) return "B"; case TRANSCODER_C: return "C"; + case TRANSCODER_D: + return "D"; case TRANSCODER_EDP: return "EDP"; case TRANSCODER_DSI_A: diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 95b9ca1fda2e..6fe3d74f99ff 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4217,6 +4217,7 @@ enum { #define TRANSCODER_B_OFFSET 0x61000 #define TRANSCODER_C_OFFSET 0x62000 #define CHV_TRANSCODER_C_OFFSET 0x63000 +#define TRANSCODER_D_OFFSET 0x63000 #define TRANSCODER_EDP_OFFSET 0x6f000 #define TRANSCODER_DSI0_OFFSET 0x6b000 #define TRANSCODER_DSI1_OFFSET 0x6b800 @@ -5763,6 +5764,7 @@ enum { #define PIPE_A_OFFSET 0x70000 #define PIPE_B_OFFSET 0x71000 #define PIPE_C_OFFSET 0x72000 +#define PIPE_D_OFFSET 0x73000 #define CHV_PIPE_C_OFFSET 0x74000 /* * There's actually no pipe EDP. Some pipe registers have @@ -9346,6 +9348,7 @@ enum skl_power_gate { #define _TRANS_DDI_FUNC_CTL_A 0x60400 #define _TRANS_DDI_FUNC_CTL_B 0x61400 #define _TRANS_DDI_FUNC_CTL_C 0x62400 +#define _TRANS_DDI_FUNC_CTL_D 0x63400 #define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 #define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400 #define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00 -- cgit v1.2.3 From abd3a0fe040d51fc2943855169558b4b0b3311c6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 11 Jul 2019 10:30:56 -0700 Subject: drm/i915/tgl: add initial Tiger Lake definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tiger Lake is a Intel® Processor containing Intel® HD Graphics. This is just an initial Tiger Lake definition. PCI IDs, generic support and new features coming in following patches. v2 (Lucas): - Remove modular FIA - feature will be re-introduced in future Cc: Joonas Lahtinen Cc: Rodrigo Vivi Signed-off-by: Daniele Ceraolo Spurio Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_pci.c | 29 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_device_info.c | 1 + drivers/gpu/drm/i915/intel_device_info.h | 2 ++ 4 files changed, 33 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2fa1d35efcb8..e89a553cc902 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2086,6 +2086,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) #define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) +#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 94b588e0a1dd..da926485845d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -765,6 +765,35 @@ static const struct intel_device_info intel_elkhartlake_info = { .ppgtt_size = 36, }; +#define GEN12_FEATURES \ + GEN11_FEATURES, \ + GEN(12), \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + [TRANSCODER_D] = PIPE_D_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ + } + +static const struct intel_device_info intel_tigerlake_12_info = { + GEN12_FEATURES, + PLATFORM(INTEL_TIGERLAKE), + .num_pipes = 4, + .require_force_probe = 1, + .engine_mask = + BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), +}; + #undef GEN #undef PLATFORM diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index e64536e1fd1b..e0d9a7a37994 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -58,6 +58,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(CANNONLAKE), PLATFORM_NAME(ICELAKE), PLATFORM_NAME(ELKHARTLAKE), + PLATFORM_NAME(TIGERLAKE), }; #undef PLATFORM_NAME diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index ddafc819bf30..468582484758 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -78,6 +78,8 @@ enum intel_platform { /* gen11 */ INTEL_ICELAKE, INTEL_ELKHARTLAKE, + /* gen12 */ + INTEL_TIGERLAKE, INTEL_MAX_PLATFORMS }; -- cgit v1.2.3 From 7f02889292e2e3fe6390b98d4ebc3dd67f20a812 Mon Sep 17 00:00:00 2001 From: Radhakrishna Sripada Date: Thu, 11 Jul 2019 10:30:57 -0700 Subject: drm/i915/tgl: Introduce Tiger Lake PCH Add the enum additions to TGP. Cc: Rodrigo Vivi Cc: Joonas Lahtinen Cc: David Weinehall Cc: James Ausmus Signed-off-by: Radhakrishna Sripada Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 4 ++++ drivers/gpu/drm/i915/i915_drv.h | 3 +++ 2 files changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 12182d2fc03c..93441f2aaf15 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -224,6 +224,10 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; + case INTEL_PCH_TGP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n"); + WARN_ON(!IS_TIGERLAKE(dev_priv)); + return PCH_TGP; default: return PCH_NONE; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e89a553cc902..56527a7a1666 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -536,6 +536,7 @@ enum intel_pch { PCH_CNP, /* Cannon/Comet Lake PCH */ PCH_ICP, /* Ice Lake PCH */ PCH_MCC, /* Mule Creek Canyon PCH */ + PCH_TGP, /* Tiger Lake PCH */ }; #define QUIRK_LVDS_SSC_DISABLE (1<<1) @@ -2320,6 +2321,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880 +#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ @@ -2327,6 +2329,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) #define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC) +#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP) #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) -- cgit v1.2.3 From d8df6bec1a30097f65781faf4877e4da7a520002 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 11 Jul 2019 10:30:58 -0700 Subject: drm/i915/tgl: Add TGL PCH detection in virtualized environment Assume PCH_TGP when platform is TGL. Cc: Rodrigo Vivi Signed-off-by: Mahesh Kumar Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-5-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 93441f2aaf15..8502a2e4268e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -255,7 +255,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) * make an educated guess as to which PCH is really there. */ - if (IS_ELKHARTLAKE(dev_priv)) + if (IS_TIGERLAKE(dev_priv)) + id = INTEL_PCH_TGP_DEVICE_ID_TYPE; + else if (IS_ELKHARTLAKE(dev_priv)) id = INTEL_PCH_MCC_DEVICE_ID_TYPE; else if (IS_ICELAKE(dev_priv)) id = INTEL_PCH_ICP_DEVICE_ID_TYPE; -- cgit v1.2.3 From 9747f0c2fb9e1a6d7c907d34a373924c84093afa Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 11 Jul 2019 10:30:59 -0700 Subject: drm/i915/tgl: Add TGL PCI IDs Current list of PCI IDs for Tiger Lake. Cc: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Reviewed-by: Mika Kahola Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-6-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 1 + include/drm/i915_pciids.h | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index da926485845d..e83c94cf2744 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -865,6 +865,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_CNL_IDS(&intel_cannonlake_info), INTEL_ICL_11_IDS(&intel_icelake_11_info), INTEL_EHL_IDS(&intel_elkhartlake_info), + INTEL_TGL_12_IDS(&intel_tigerlake_12_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 6c342ac470c8..a70c982ddff9 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -583,4 +583,14 @@ INTEL_VGA_DEVICE(0x4551, info), \ INTEL_VGA_DEVICE(0x4541, info) +/* TGL */ +#define INTEL_TGL_12_IDS(info) \ + INTEL_VGA_DEVICE(0x9A49, info), \ + INTEL_VGA_DEVICE(0x9A40, info), \ + INTEL_VGA_DEVICE(0x9A59, info), \ + INTEL_VGA_DEVICE(0x9A60, info), \ + INTEL_VGA_DEVICE(0x9A68, info), \ + INTEL_VGA_DEVICE(0x9A70, info), \ + INTEL_VGA_DEVICE(0x9A78, info) + #endif /* _I915_PCIIDS_H */ -- cgit v1.2.3 From 7ff0fca4964ff19d8e16669d8d3070018cecdd0f Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 11 Jul 2019 10:31:00 -0700 Subject: drm/i915/tgl: Check if pipe D is fused MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On Tiger Lake there is one more pipe - check if it's fused. Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Mika Kahola Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-7-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_device_info.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6fe3d74f99ff..94e76fa9d114 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7633,6 +7633,7 @@ enum { #define SKL_DFSM_PIPE_A_DISABLE (1 << 30) #define SKL_DFSM_PIPE_B_DISABLE (1 << 21) #define SKL_DFSM_PIPE_C_DISABLE (1 << 28) +#define TGL_DFSM_PIPE_D_DISABLE (1 << 22) #define SKL_DSSM _MMIO(0x51004) #define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index e0d9a7a37994..f99c9fd497b2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -938,6 +938,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) enabled_mask &= ~BIT(PIPE_B); if (dfsm & SKL_DFSM_PIPE_C_DISABLE) enabled_mask &= ~BIT(PIPE_C); + if (INTEL_GEN(dev_priv) >= 12 && + (dfsm & TGL_DFSM_PIPE_D_DISABLE)) + enabled_mask &= ~BIT(PIPE_D); /* * At least one pipe should be enabled and if there are -- cgit v1.2.3 From 276199e6be6a4058e74828728eb25179b1461978 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 11 Jul 2019 10:31:01 -0700 Subject: drm/i915/tgl: rename TRANSCODER_EDP_VDSC to use on transcoder A MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On TGL the special EDP transcoder is gone and it should be handled by transcoder A. v2 (Lucas): - Reuse POWER_DOMAIN_TRANSCODER_EDP_VDSC (suggested by Ville) - Use crtc->dev since new_crtc_state->state may be NULL on atomic commit (suggested by Maarten) v3 (Lucas): - Rename power domain so it's clear it can also be used for transcoder A in TGL (requested by José and Manasi) Cc: Imre Deak Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Acked-by: José Roberto de Souza Reviewed-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-8-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 6 +++--- drivers/gpu/drm/i915/display/intel_display_power.h | 3 ++- drivers/gpu/drm/i915/display/intel_vdsc.c | 14 ++++++++++---- 3 files changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 7e22a2704843..6a5e0d0724cb 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -48,8 +48,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "TRANSCODER_C"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; - case POWER_DOMAIN_TRANSCODER_EDP_VDSC: - return "TRANSCODER_EDP_VDSC"; + case POWER_DOMAIN_TRANSCODER_VDSC_PW2: + return "TRANSCODER_VDSC_PW2"; case POWER_DOMAIN_TRANSCODER_DSI_A: return "TRANSCODER_DSI_A"; case POWER_DOMAIN_TRANSCODER_DSI_C: @@ -2450,7 +2450,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, */ #define ICL_PW_2_POWER_DOMAINS ( \ ICL_PW_3_POWER_DOMAINS | \ - BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ BIT_ULL(POWER_DOMAIN_INIT)) /* * - KVMR (HW control) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 8f43f7051a16..cc6956132ebc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -25,7 +25,8 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_EDP, - POWER_DOMAIN_TRANSCODER_EDP_VDSC, + /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */ + POWER_DOMAIN_TRANSCODER_VDSC_PW2, POWER_DOMAIN_TRANSCODER_DSI_A, POWER_DOMAIN_TRANSCODER_DSI_C, POWER_DOMAIN_PORT_DDI_A_LANES, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index ffec807b8960..4ab19c432ef5 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -459,17 +459,23 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, enum intel_display_power_domain intel_dsc_power_domain(const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; /* - * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2 - * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain. + * On ICL VDSC/joining for eDP transcoder uses a separate power well, + * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain. * For any other transcoder, VDSC/joining uses the power well associated * with the pipe/transcoder in use. Hence another reference on the * transcoder power domain will suffice. + * + * On TGL we have the same mapping, but for transcoder A (the special + * TRANSCODER_EDP is gone). */ - if (cpu_transcoder == TRANSCODER_EDP) - return POWER_DOMAIN_TRANSCODER_EDP_VDSC; + if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A) + return POWER_DOMAIN_TRANSCODER_VDSC_PW2; + else if (cpu_transcoder == TRANSCODER_EDP) + return POWER_DOMAIN_TRANSCODER_VDSC_PW2; else return POWER_DOMAIN_TRANSCODER(cpu_transcoder); } -- cgit v1.2.3 From 656409bbaf8792c015708e567d4ebcd7fb4e7728 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 11 Jul 2019 10:31:02 -0700 Subject: drm/i915/tgl: Add power well support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The patch adds the new power wells introduced by TGL (GEN 12) and maps these to existing/new power domains. The changes for GEN 12 wrt to GEN 11 are the following: - Transcoder#EDP removed from power well#1 (Transcoder#A used in low-power mode instead) - Transcoder#A is now backed by power well#1 instead of power well#3 - The DDI#B/C combo PHY ports are now backed by power well#1 instead of power well#3 - New power well#5 added for pipe#D functionality (TODO) - 2 additional TC ports (TC#5-6) backed by power well#3, 2 port specific IO power wells (only for the non-TBT modes) and 4 port specific AUX power wells (2-2 for TBT vs. non-TBT modes) - Power well#2 backs now VDSC/joining for pipe#A instead of VDSC for eDP and MIPI DSI (TODO) On TGL Port DDI#C changed to be a combo PHY (native DP/HDMI) and BSpec has renamed ports DDI#D-F to TC#4-6 respectively. Thus on ICL we have the following naming for ports: - Combo PHYs (native DP/HDMI): DDI#A-B - TBT/non-TBT (TC altmode, native DP/HDMI) PHYs: DDI#C-F Starting from GEN 12 we have the following naming for ports: - Combo PHYs (native DP/HDMI): DDI#A-C - TBT/non-TBT (TC altmode, native DP/HDMI) PHYs: DDI TC#1-6 To save some space in the power domain enum the power domain naming in the driver reflects the above change, that is power domains TC#1-3 are added as aliases for DDI#D-F and new power domains are reserved for TC#4-6. v2 (Lucas): - Separate out the bits and definitions for TGL from the ICL ones. Fix use of TRANSCODER_EDP_VDSC, that is now the correct define since we don't define TRANSCODER_A_VDSC power domain to spare a one bit in the bitmask (suggested by Ville) v3 (Lucas): - Fix missing squashes on v2 - Rebase on renamed TRANSCODER_EDP_VDSC Cc: Ville Syrjälä Cc: Anusha Srivatsa Cc: Rodrigo Vivi Cc: José Roberto de Souza Signed-off-by: Imre Deak Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-9-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 474 ++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_display_power.h | 26 +- drivers/gpu/drm/i915/i915_debugfs.c | 3 +- drivers/gpu/drm/i915/i915_reg.h | 20 +- 4 files changed, 506 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 6a5e0d0724cb..2d91cd70b05b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -23,8 +23,11 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); const char * -intel_display_power_domain_str(enum intel_display_power_domain domain) +intel_display_power_domain_str(struct drm_i915_private *i915, + enum intel_display_power_domain domain) { + bool ddi_tc_ports = IS_GEN(i915, 12); + switch (domain) { case POWER_DOMAIN_DISPLAY_CORE: return "DISPLAY_CORE"; @@ -61,11 +64,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) case POWER_DOMAIN_PORT_DDI_C_LANES: return "PORT_DDI_C_LANES"; case POWER_DOMAIN_PORT_DDI_D_LANES: - return "PORT_DDI_D_LANES"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES != + POWER_DOMAIN_PORT_DDI_TC1_LANES); + return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES"; case POWER_DOMAIN_PORT_DDI_E_LANES: - return "PORT_DDI_E_LANES"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES != + POWER_DOMAIN_PORT_DDI_TC2_LANES); + return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES"; case POWER_DOMAIN_PORT_DDI_F_LANES: - return "PORT_DDI_F_LANES"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES != + POWER_DOMAIN_PORT_DDI_TC3_LANES); + return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES"; + case POWER_DOMAIN_PORT_DDI_TC4_LANES: + return "PORT_DDI_TC4_LANES"; + case POWER_DOMAIN_PORT_DDI_TC5_LANES: + return "PORT_DDI_TC5_LANES"; + case POWER_DOMAIN_PORT_DDI_TC6_LANES: + return "PORT_DDI_TC6_LANES"; case POWER_DOMAIN_PORT_DDI_A_IO: return "PORT_DDI_A_IO"; case POWER_DOMAIN_PORT_DDI_B_IO: @@ -73,11 +88,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) case POWER_DOMAIN_PORT_DDI_C_IO: return "PORT_DDI_C_IO"; case POWER_DOMAIN_PORT_DDI_D_IO: - return "PORT_DDI_D_IO"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO != + POWER_DOMAIN_PORT_DDI_TC1_IO); + return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO"; case POWER_DOMAIN_PORT_DDI_E_IO: - return "PORT_DDI_E_IO"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO != + POWER_DOMAIN_PORT_DDI_TC2_IO); + return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO"; case POWER_DOMAIN_PORT_DDI_F_IO: - return "PORT_DDI_F_IO"; + BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO != + POWER_DOMAIN_PORT_DDI_TC3_IO); + return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO"; + case POWER_DOMAIN_PORT_DDI_TC4_IO: + return "PORT_DDI_TC4_IO"; + case POWER_DOMAIN_PORT_DDI_TC5_IO: + return "PORT_DDI_TC5_IO"; + case POWER_DOMAIN_PORT_DDI_TC6_IO: + return "PORT_DDI_TC6_IO"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -95,11 +122,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) case POWER_DOMAIN_AUX_C: return "AUX_C"; case POWER_DOMAIN_AUX_D: - return "AUX_D"; + BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1); + return ddi_tc_ports ? "AUX_TC1" : "AUX_D"; case POWER_DOMAIN_AUX_E: - return "AUX_E"; + BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2); + return ddi_tc_ports ? "AUX_TC2" : "AUX_E"; case POWER_DOMAIN_AUX_F: - return "AUX_F"; + BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3); + return ddi_tc_ports ? "AUX_TC3" : "AUX_F"; + case POWER_DOMAIN_AUX_TC4: + return "AUX_TC4"; + case POWER_DOMAIN_AUX_TC5: + return "AUX_TC5"; + case POWER_DOMAIN_AUX_TC6: + return "AUX_TC6"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; case POWER_DOMAIN_AUX_TBT1: @@ -110,6 +146,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "AUX_TBT3"; case POWER_DOMAIN_AUX_TBT4: return "AUX_TBT4"; + case POWER_DOMAIN_AUX_TBT5: + return "AUX_TBT5"; + case POWER_DOMAIN_AUX_TBT6: + return "AUX_TBT6"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: @@ -1666,12 +1706,15 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) static void print_power_domains(struct i915_power_domains *power_domains, const char *prefix, u64 mask) { + struct drm_i915_private *i915 = + container_of(power_domains, struct drm_i915_private, + power_domains); enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); for_each_power_domain(domain, mask) DRM_DEBUG_DRIVER("%s use_count %d\n", - intel_display_power_domain_str(domain), + intel_display_power_domain_str(i915, domain), power_domains->domain_use_count[domain]); } @@ -1841,7 +1884,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains; struct i915_power_well *power_well; - const char *name = intel_display_power_domain_str(domain); + const char *name = intel_display_power_domain_str(dev_priv, domain); power_domains = &dev_priv->power_domains; @@ -2497,6 +2540,88 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_TBT4)) +/* TODO: TGL_PW_5_POWER_DOMAINS: PIPE_D */ +#define TGL_PW_4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_C) | \ + BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_3_POWER_DOMAINS ( \ + TGL_PW_4_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_PIPE_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ + /* TODO: TRANSCODER_D */ \ + BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TC6) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ + BIT_ULL(POWER_DOMAIN_VGA) | \ + BIT_ULL(POWER_DOMAIN_AUDIO) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_PW_2_POWER_DOMAINS ( \ + TGL_PW_3_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ + TGL_PW_2_POWER_DOMAINS | \ + BIT_ULL(POWER_DOMAIN_MODESET) | \ + BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + +#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO)) +#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO)) +#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO)) +#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO)) +#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO)) +#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO)) + +#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC1)) +#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC2)) +#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC3)) +#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC4)) +#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC5)) +#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TC6)) +#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT5)) +#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_TBT6)) + static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, .enable = i9xx_always_on_power_well_noop, @@ -3454,6 +3579,324 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }; +static const struct i915_power_well_desc tgl_power_wells[] = { + { + .name = "always-on", + .always_on = true, + .domains = POWER_DOMAIN_MASK, + .ops = &i9xx_always_on_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 1", + /* Handled by the DMC firmware */ + .always_on = true, + .domains = 0, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_1, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_1, + .hsw.has_fuses = true, + }, + }, + { + .name = "DC off", + .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, + .ops = &gen9_dc_off_power_well_ops, + .id = DISP_PW_ID_NONE, + }, + { + .name = "power well 2", + .domains = TGL_PW_2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = SKL_DISP_PW_2, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .hsw.has_fuses = true, + }, + }, + { + .name = "power well 3", + .domains = TGL_PW_3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_3, + .hsw.irq_pipe_mask = BIT(PIPE_B), + .hsw.has_vga = true, + .hsw.has_fuses = true, + }, + }, + { + .name = "DDI A IO", + .domains = ICL_DDI_IO_A_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_A, + } + }, + { + .name = "DDI B IO", + .domains = ICL_DDI_IO_B_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_B, + } + }, + { + .name = "DDI C IO", + .domains = ICL_DDI_IO_C_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_DDI_C, + } + }, + { + .name = "DDI TC1 IO", + .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, + }, + }, + { + .name = "DDI TC2 IO", + .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, + }, + }, + { + .name = "DDI TC3 IO", + .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, + }, + }, + { + .name = "DDI TC4 IO", + .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, + }, + }, + { + .name = "DDI TC5 IO", + .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, + }, + }, + { + .name = "DDI TC6 IO", + .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_ddi_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, + }, + }, + { + .name = "AUX A", + .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_A, + }, + }, + { + .name = "AUX B", + .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_B, + }, + }, + { + .name = "AUX C", + .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .ops = &icl_combo_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_AUX_C, + }, + }, + { + .name = "AUX TC1", + .domains = TGL_AUX_TC1_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC2", + .domains = TGL_AUX_TC2_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC3", + .domains = TGL_AUX_TC3_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC4", + .domains = TGL_AUX_TC4_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC5", + .domains = TGL_AUX_TC5_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TC6", + .domains = TGL_AUX_TC6_IO_POWER_DOMAINS, + .ops = &icl_tc_phy_aux_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, + .hsw.is_tc_tbt = false, + }, + }, + { + .name = "AUX TBT1", + .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT2", + .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT3", + .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT4", + .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT5", + .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "AUX TBT6", + .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &icl_aux_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, + .hsw.is_tc_tbt = true, + }, + }, + { + .name = "power well 4", + .domains = TGL_PW_4_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = ICL_PW_CTL_IDX_PW_4, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_C), + } + }, + /* TODO: power well 5 for pipe D */ +}; + static int sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, int disable_power_well) @@ -3581,7 +4024,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_GEN(dev_priv, 11)) { + if (IS_GEN(dev_priv, 12)) { + err = set_power_wells(power_domains, tgl_power_wells); + } else if (IS_GEN(dev_priv, 11)) { err = set_power_wells(power_domains, icl_power_wells); } else if (IS_CANNONLAKE(dev_priv)) { err = set_power_wells(power_domains, cnl_power_wells); @@ -4645,7 +5090,8 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) for_each_power_domain(domain, power_well->desc->domains) DRM_DEBUG_DRIVER(" %-23s %d\n", - intel_display_power_domain_str(domain), + intel_display_power_domain_str(i915, + domain), power_domains->domain_use_count[domain]); } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index cc6956132ebc..54ad4f0b0886 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -33,14 +33,29 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_B_LANES, POWER_DOMAIN_PORT_DDI_C_LANES, POWER_DOMAIN_PORT_DDI_D_LANES, + POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES, POWER_DOMAIN_PORT_DDI_E_LANES, + POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES, POWER_DOMAIN_PORT_DDI_F_LANES, + POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES, + POWER_DOMAIN_PORT_DDI_TC4_LANES, + POWER_DOMAIN_PORT_DDI_TC5_LANES, + POWER_DOMAIN_PORT_DDI_TC6_LANES, POWER_DOMAIN_PORT_DDI_A_IO, POWER_DOMAIN_PORT_DDI_B_IO, POWER_DOMAIN_PORT_DDI_C_IO, POWER_DOMAIN_PORT_DDI_D_IO, + POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO, POWER_DOMAIN_PORT_DDI_E_IO, + POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO, POWER_DOMAIN_PORT_DDI_F_IO, + POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO, + POWER_DOMAIN_PORT_DDI_G_IO, + POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO, + POWER_DOMAIN_PORT_DDI_H_IO, + POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO, + POWER_DOMAIN_PORT_DDI_I_IO, + POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -50,13 +65,21 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, + POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_E, + POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, + POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F, + POWER_DOMAIN_AUX_TC4, + POWER_DOMAIN_AUX_TC5, + POWER_DOMAIN_AUX_TC6, POWER_DOMAIN_AUX_IO_A, POWER_DOMAIN_AUX_TBT1, POWER_DOMAIN_AUX_TBT2, POWER_DOMAIN_AUX_TBT3, POWER_DOMAIN_AUX_TBT4, + POWER_DOMAIN_AUX_TBT5, + POWER_DOMAIN_AUX_TBT6, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, @@ -229,7 +252,8 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume); void bxt_display_core_uninit(struct drm_i915_private *dev_priv); const char * -intel_display_power_domain_str(enum intel_display_power_domain domain); +intel_display_power_domain_str(struct drm_i915_private *i915, + enum intel_display_power_domain domain); bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index dc65a6131a5b..41245acb0a0f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2466,7 +2466,8 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) for_each_power_domain(power_domain, power_well->desc->domains) seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(power_domain), + intel_display_power_domain_str(dev_priv, + power_domain), power_domains->domain_use_count[power_domain]); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 94e76fa9d114..7df1584e7ff1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9147,7 +9147,7 @@ enum { #define GLK_PW_CTL_IDX_DDI_A 1 #define SKL_PW_CTL_IDX_MISC_IO 0 -/* ICL - power wells */ +/* ICL/TGL - power wells */ #define ICL_PW_CTL_IDX_PW_4 3 #define ICL_PW_CTL_IDX_PW_3 2 #define ICL_PW_CTL_IDX_PW_2 1 @@ -9156,13 +9156,25 @@ enum { #define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440) #define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444) #define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C) +#define TGL_PW_CTL_IDX_AUX_TBT6 14 +#define TGL_PW_CTL_IDX_AUX_TBT5 13 +#define TGL_PW_CTL_IDX_AUX_TBT4 12 #define ICL_PW_CTL_IDX_AUX_TBT4 11 +#define TGL_PW_CTL_IDX_AUX_TBT3 11 #define ICL_PW_CTL_IDX_AUX_TBT3 10 +#define TGL_PW_CTL_IDX_AUX_TBT2 10 #define ICL_PW_CTL_IDX_AUX_TBT2 9 +#define TGL_PW_CTL_IDX_AUX_TBT1 9 #define ICL_PW_CTL_IDX_AUX_TBT1 8 +#define TGL_PW_CTL_IDX_AUX_TC6 8 +#define TGL_PW_CTL_IDX_AUX_TC5 7 +#define TGL_PW_CTL_IDX_AUX_TC4 6 #define ICL_PW_CTL_IDX_AUX_F 5 +#define TGL_PW_CTL_IDX_AUX_TC3 5 #define ICL_PW_CTL_IDX_AUX_E 4 +#define TGL_PW_CTL_IDX_AUX_TC2 4 #define ICL_PW_CTL_IDX_AUX_D 3 +#define TGL_PW_CTL_IDX_AUX_TC1 3 #define ICL_PW_CTL_IDX_AUX_C 2 #define ICL_PW_CTL_IDX_AUX_B 1 #define ICL_PW_CTL_IDX_AUX_A 0 @@ -9170,9 +9182,15 @@ enum { #define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450) #define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454) #define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C) +#define TGL_PW_CTL_IDX_DDI_TC6 8 +#define TGL_PW_CTL_IDX_DDI_TC5 7 +#define TGL_PW_CTL_IDX_DDI_TC4 6 #define ICL_PW_CTL_IDX_DDI_F 5 +#define TGL_PW_CTL_IDX_DDI_TC3 5 #define ICL_PW_CTL_IDX_DDI_E 4 +#define TGL_PW_CTL_IDX_DDI_TC2 4 #define ICL_PW_CTL_IDX_DDI_D 3 +#define TGL_PW_CTL_IDX_DDI_TC1 3 #define ICL_PW_CTL_IDX_DDI_C 2 #define ICL_PW_CTL_IDX_DDI_B 1 #define ICL_PW_CTL_IDX_DDI_A 0 -- cgit v1.2.3 From 1db27a7291195057e3a20fb9998e2d365ee897f9 Mon Sep 17 00:00:00 2001 From: Mika Kahola Date: Thu, 11 Jul 2019 10:31:03 -0700 Subject: drm/i915/tgl: Add power well to support 4th pipe Add power well 5 to support 4th pipe and transcoder on TGL. Cc: James Ausmus Cc: Imre Deak Signed-off-by: Mika Kahola Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-10-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 28 +++++++++++++++++++--- drivers/gpu/drm/i915/display/intel_display_power.h | 3 +++ drivers/gpu/drm/i915/i915_reg.h | 1 + 3 files changed, 29 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 2d91cd70b05b..12aa9ce08d95 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -37,18 +37,24 @@ intel_display_power_domain_str(struct drm_i915_private *i915, return "PIPE_B"; case POWER_DOMAIN_PIPE_C: return "PIPE_C"; + case POWER_DOMAIN_PIPE_D: + return "PIPE_D"; case POWER_DOMAIN_PIPE_A_PANEL_FITTER: return "PIPE_A_PANEL_FITTER"; case POWER_DOMAIN_PIPE_B_PANEL_FITTER: return "PIPE_B_PANEL_FITTER"; case POWER_DOMAIN_PIPE_C_PANEL_FITTER: return "PIPE_C_PANEL_FITTER"; + case POWER_DOMAIN_PIPE_D_PANEL_FITTER: + return "PIPE_D_PANEL_FITTER"; case POWER_DOMAIN_TRANSCODER_A: return "TRANSCODER_A"; case POWER_DOMAIN_TRANSCODER_B: return "TRANSCODER_B"; case POWER_DOMAIN_TRANSCODER_C: return "TRANSCODER_C"; + case POWER_DOMAIN_TRANSCODER_D: + return "TRANSCODER_D"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; case POWER_DOMAIN_TRANSCODER_VDSC_PW2: @@ -2540,8 +2546,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, #define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_TBT4)) -/* TODO: TGL_PW_5_POWER_DOMAINS: PIPE_D */ +#define TGL_PW_5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PIPE_D) | \ + BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ + BIT_ULL(POWER_DOMAIN_INIT)) + #define TGL_PW_4_POWER_DOMAINS ( \ + TGL_PW_5_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_PIPE_C) | \ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -2551,7 +2562,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ - /* TODO: TRANSCODER_D */ \ + BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO) | \ @@ -3894,7 +3905,18 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.irq_pipe_mask = BIT(PIPE_C), } }, - /* TODO: power well 5 for pipe D */ + { + .name = "power well 5", + .domains = TGL_PW_5_POWER_DOMAINS, + .ops = &hsw_power_well_ops, + .id = DISP_PW_ID_NONE, + { + .hsw.regs = &hsw_power_well_regs, + .hsw.idx = TGL_PW_CTL_IDX_PW_5, + .hsw.has_fuses = true, + .hsw.irq_pipe_mask = BIT(PIPE_D), + }, + }, }; static int diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 54ad4f0b0886..a264f18c95f1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -18,12 +18,15 @@ enum intel_display_power_domain { POWER_DOMAIN_PIPE_A, POWER_DOMAIN_PIPE_B, POWER_DOMAIN_PIPE_C, + POWER_DOMAIN_PIPE_D, POWER_DOMAIN_PIPE_A_PANEL_FITTER, POWER_DOMAIN_PIPE_B_PANEL_FITTER, POWER_DOMAIN_PIPE_C_PANEL_FITTER, + POWER_DOMAIN_PIPE_D_PANEL_FITTER, POWER_DOMAIN_TRANSCODER_A, POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, + POWER_DOMAIN_TRANSCODER_D, POWER_DOMAIN_TRANSCODER_EDP, /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */ POWER_DOMAIN_TRANSCODER_VDSC_PW2, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7df1584e7ff1..ca70be40a467 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9148,6 +9148,7 @@ enum { #define SKL_PW_CTL_IDX_MISC_IO 0 /* ICL/TGL - power wells */ +#define TGL_PW_CTL_IDX_PW_5 4 #define ICL_PW_CTL_IDX_PW_4 3 #define ICL_PW_CTL_IDX_PW_3 2 #define ICL_PW_CTL_IDX_PW_2 1 -- cgit v1.2.3 From 68ff39c3f8c01b1e640d111abdcf814804a6b236 Mon Sep 17 00:00:00 2001 From: Vandita Kulkarni Date: Thu, 11 Jul 2019 10:31:04 -0700 Subject: drm/i915/tgl: Add new pll ids Add 2 new PLLs for additional TC ports. The names for the PLLs on TGL changed, but most registers remained the same, like MGPLL5_ENABLE, MGPLL6_ENABLE. So continue to use the name from ICL. Cc: Madhav Chauhan Cc: Rodrigo Vivi Signed-off-by: Vandita Kulkarni Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-11-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index ed5fae964736..e7588799fce5 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -111,11 +111,11 @@ enum intel_dpll_id { /** - * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0 + * @DPLL_ID_ICL_DPLL0: ICL/TGL combo PHY DPLL0 */ DPLL_ID_ICL_DPLL0 = 0, /** - * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1 + * @DPLL_ID_ICL_DPLL1: ICL/TGL combo PHY DPLL1 */ DPLL_ID_ICL_DPLL1 = 1, /** @@ -123,27 +123,40 @@ enum intel_dpll_id { */ DPLL_ID_EHL_DPLL4 = 2, /** - * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL + * @DPLL_ID_ICL_TBTPLL: ICL/TGL TBT PLL */ DPLL_ID_ICL_TBTPLL = 2, /** - * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C) + * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C), + * TGL TC PLL 1 port 1 (TC1) */ DPLL_ID_ICL_MGPLL1 = 3, /** * @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D) + * TGL TC PLL 1 port 2 (TC2) */ DPLL_ID_ICL_MGPLL2 = 4, /** * @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E) + * TGL TC PLL 1 port 3 (TC3) */ DPLL_ID_ICL_MGPLL3 = 5, /** * @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F) + * TGL TC PLL 1 port 4 (TC4) */ DPLL_ID_ICL_MGPLL4 = 6, + /** + * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5) + */ + DPLL_ID_TGL_MGPLL5 = 7, + /** + * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6) + */ + DPLL_ID_TGL_MGPLL6 = 8, }; -#define I915_NUM_PLLS 7 + +#define I915_NUM_PLLS 9 enum icl_port_dpll_id { ICL_PORT_DPLL_DEFAULT, -- cgit v1.2.3 From c9014a2c7937e40858b858200a35432075638ede Mon Sep 17 00:00:00 2001 From: Vandita Kulkarni Date: Thu, 11 Jul 2019 10:31:05 -0700 Subject: drm/i915/tgl: Add pll manager Add a new pll array for Tiger Lake. The TC pll functions for type C will be covered in later patches after its phy is implemented. Cc: Madhav Chauhan Cc: Rodrigo Vivi Signed-off-by: Vandita Kulkarni Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-12-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 5065f21fd82b..9f96d29c5060 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3461,6 +3461,21 @@ static const struct intel_dpll_mgr ehl_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct dpll_info tgl_plls[] = { + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, + /* TODO: Add typeC plls */ + { }, +}; + +static const struct intel_dpll_mgr tgl_pll_mgr = { + .dpll_info = tgl_plls, + .get_dplls = icl_get_dplls, + .put_dplls = icl_put_dplls, + .dump_hw_state = icl_dump_hw_state, +}; + /** * intel_shared_dpll_init - Initialize shared DPLLs * @dev: drm device @@ -3474,7 +3489,9 @@ void intel_shared_dpll_init(struct drm_device *dev) const struct dpll_info *dpll_info; int i; - if (IS_ELKHARTLAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 12) + dpll_mgr = &tgl_pll_mgr; + else if (IS_ELKHARTLAKE(dev_priv)) dpll_mgr = &ehl_pll_mgr; else if (INTEL_GEN(dev_priv) >= 11) dpll_mgr = &icl_pll_mgr; -- cgit v1.2.3 From 6c8337dafaa9a328216c62a79c9a03176af3ce70 Mon Sep 17 00:00:00 2001 From: Vandita Kulkarni Date: Thu, 11 Jul 2019 10:31:06 -0700 Subject: drm/i915/tgl: Add additional ports for Tiger Lake MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are 2 new additional typeC ports in Tiger Lake and PORT-C is now a combophy port. This results in 6 typeC ports and 3 combophy ports. These 6 TC ports can be DP alternate mode, DP over thunderbolt, native DP on legacy DP connector or native HDMI on legacy connector. v2: Rebase on new modular FIA code (Lucas) v3: Also add new port in port_identifier(), even though it can't possibly be used there (requested by José) v4: Add conversion port->tc_port in helper function after introction of phy namespace (Lucas) Cc: Anusha Srivatsa Signed-off-by: Vandita Kulkarni Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-13-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 12 ++++++++++++ drivers/gpu/drm/i915/display/intel_display.c | 3 +++ drivers/gpu/drm/i915/display/intel_display.h | 8 ++++++++ include/drm/i915_component.h | 2 +- include/drm/i915_drm.h | 3 +++ 5 files changed, 27 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 1662e5c2be1c..8445244aa593 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4286,6 +4286,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_F_IO; break; + case PORT_G: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_G_IO; + break; + case PORT_H: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_H_IO; + break; + case PORT_I: + intel_dig_port->ddi_io_power_domain = + POWER_DOMAIN_PORT_DDI_I_IO; + break; default: MISSING_CASE(port); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c13784b81fb1..13ff4177beba 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6706,6 +6706,9 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) return PORT_TC_NONE; + if (INTEL_GEN(dev_priv) >= 12) + return port - PORT_D; + return port - PORT_C; } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 1f75b0a627fd..72ce27079a56 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -177,6 +177,12 @@ static inline const char *port_identifier(enum port port) return "Port E"; case PORT_F: return "Port F"; + case PORT_G: + return "Port G"; + case PORT_H: + return "Port H"; + case PORT_I: + return "Port I"; default: return ""; } @@ -189,6 +195,8 @@ enum tc_port { PORT_TC2, PORT_TC3, PORT_TC4, + PORT_TC5, + PORT_TC6, I915_MAX_TC_PORTS }; diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index dcb95bd9dee6..55c3b123581b 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -34,7 +34,7 @@ enum i915_component_type { /* MAX_PORT is the number of port * It must be sync with I915_MAX_PORTS defined i915_drv.h */ -#define MAX_PORTS 6 +#define MAX_PORTS 9 /** * struct i915_audio_component - Used for direct communication between i915 and hda drivers diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7523e9a7b6e2..eb30062359d1 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -109,6 +109,9 @@ enum port { PORT_D, PORT_E, PORT_F, + PORT_G, + PORT_H, + PORT_I, I915_MAX_PORTS }; -- cgit v1.2.3 From 5c71970889d7d58d9fe26f92f454d7fa5db52202 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 11 Jul 2019 10:31:07 -0700 Subject: drm/i915/tgl: Add additional PHYs for Tiger Lake Tiger Lake has up to 3 combo phys and 6 TC phys. Extend the helper conversion functions from port to phy. Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-14-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 5 ++++- drivers/gpu/drm/i915/display/intel_display.h | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 13ff4177beba..60459b9d1ec5 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6676,7 +6676,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) if (phy == PHY_NONE) return false; - if (IS_ELKHARTLAKE(dev_priv)) + if (IS_ELKHARTLAKE(dev_priv) || INTEL_GEN(dev_priv) >= 12) return phy <= PHY_C; if (INTEL_GEN(dev_priv) >= 11) @@ -6687,6 +6687,9 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) { + if (INTEL_GEN(dev_priv) >= 12) + return phy >= PHY_D && phy <= PHY_I; + if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv)) return phy >= PHY_C && phy <= PHY_F; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 72ce27079a56..92931dc26470 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -249,6 +249,9 @@ enum phy { PHY_D, PHY_E, PHY_F, + PHY_G, + PHY_H, + PHY_I, I915_MAX_PHYS }; -- cgit v1.2.3 From 55cd5048e176318911db97668f562c1599982dfb Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 11 Jul 2019 10:31:08 -0700 Subject: drm/i915/tgl: init ddi port A-C for Tiger Lake MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch initializes DDI PORT A, B & C for Tiger lake. Other TC ports need to be initialized later once corresponding code is there. Cc: Madhav Chauhan Signed-off-by: Mahesh Kumar Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-15-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 60459b9d1ec5..0a15c0868a0c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15312,13 +15312,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_ELKHARTLAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 12) { + /* TODO: initialize TC ports as well */ + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_C); + } else if (IS_ELKHARTLAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); intel_ddi_init(dev_priv, PORT_D); icl_dsi_init(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 11) { + } else if (IS_GEN(dev_priv, 11)) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); -- cgit v1.2.3 From deea06b47574de81da72a2d2fa1ace4e119ca02a Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 11 Jul 2019 14:35:17 -0700 Subject: drm/i915/tgl: apply Display WA #1178 to fix type C dongles Add port C to workaround to cover Tiger Lake. Cc: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190708231629.9296-22-lucas.demarchi@intel.com Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190711213517.13674-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 12 +++++++++--- drivers/gpu/drm/i915/i915_reg.h | 4 +++- 2 files changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 12aa9ce08d95..d25fd5a25199 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -453,6 +453,7 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, int pw_idx = power_well->desc->hsw.idx; enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; + int wa_idx_max; val = I915_READ(regs->driver); I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); @@ -462,9 +463,14 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, hsw_wait_for_power_well_enable(dev_priv, power_well); - /* Display WA #1178: icl */ - if (IS_ICELAKE(dev_priv) && - pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && + /* Display WA #1178: icl, tgl */ + if (IS_TIGERLAKE(dev_priv)) + wa_idx_max = ICL_PW_CTL_IDX_AUX_C; + else + wa_idx_max = ICL_PW_CTL_IDX_AUX_B; + + if (!IS_ELKHARTLAKE(dev_priv) && + pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max && !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ca70be40a467..ad96c5b4975c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9244,9 +9244,11 @@ enum skl_power_gate { #define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) #define _ICL_AUX_ANAOVRD1_A 0x162398 #define _ICL_AUX_ANAOVRD1_B 0x6C398 +#define _TGL_AUX_ANAOVRD1_C 0x160398 #define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \ _ICL_AUX_ANAOVRD1_A, \ - _ICL_AUX_ANAOVRD1_B)) + _ICL_AUX_ANAOVRD1_B, \ + _TGL_AUX_ANAOVRD1_C)) #define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) #define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) -- cgit v1.2.3 From 30fcc338bc66f930dc7f692205474edb7ca6a607 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 11 Jul 2019 10:31:10 -0700 Subject: drm/i915/gen12: MBUS B credit change MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, the recommended B credit for all platforms was 24 / number of pipes, which would give 6 for newer platforms with 4 pipes. However 6 is not enough and we need 12 on these cases. We also need a different BW credit for these platforms. Cc: Arthur J Runyan Signed-off-by: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-17-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 0a15c0868a0c..79ae8f4e5213 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6423,8 +6423,14 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) u32 val; val = MBUS_DBOX_A_CREDIT(2); - val |= MBUS_DBOX_BW_CREDIT(1); - val |= MBUS_DBOX_B_CREDIT(8); + + if (INTEL_GEN(dev_priv) >= 12) { + val |= MBUS_DBOX_BW_CREDIT(2); + val |= MBUS_DBOX_B_CREDIT(12); + } else { + val |= MBUS_DBOX_BW_CREDIT(1); + val |= MBUS_DBOX_B_CREDIT(8); + } I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); } -- cgit v1.2.3 From 3fd53262f0dd81361efcb5ccba8312525ee6c645 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 11 Jul 2019 10:31:11 -0700 Subject: drm/i915/tgl: Add gmbus gpio pin to port mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add default GPIO pin mapping for all ports. Tiger Lake has 3 combophy ports and 6 TC ports, gpio pin1-3 are mapped to combophy & pin9-14 are mapped to TC ports. Cc: Anusha Srivatsa Cc: Rodrigo Vivi Signed-off-by: Mahesh Kumar Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-18-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.h | 2 ++ drivers/gpu/drm/i915/display/intel_gmbus.c | 20 ++++++++++++++++++-- drivers/gpu/drm/i915/i915_reg.h | 4 +++- 3 files changed, 23 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 92931dc26470..67743eea4a50 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -45,6 +45,8 @@ enum i915_gpio { GPIOK, GPIOL, GPIOM, + GPION, + GPIOO, }; /* diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 4f6a9bd5af47..b42c79aea61a 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -94,11 +94,25 @@ static const struct gmbus_pin gmbus_pins_mcc[] = { [GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ }, }; +static const struct gmbus_pin gmbus_pins_tgp[] = { + [GMBUS_PIN_1_BXT] = { "dpa", GPIOB }, + [GMBUS_PIN_2_BXT] = { "dpb", GPIOC }, + [GMBUS_PIN_3_BXT] = { "dpc", GPIOD }, + [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ }, + [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK }, + [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL }, + [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM }, + [GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION }, + [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO }, +}; + /* pin is expected to be valid */ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv, unsigned int pin) { - if (HAS_PCH_MCC(dev_priv)) + if (HAS_PCH_TGP(dev_priv)) + return &gmbus_pins_tgp[pin]; + else if (HAS_PCH_MCC(dev_priv)) return &gmbus_pins_mcc[pin]; else if (HAS_PCH_ICP(dev_priv)) return &gmbus_pins_icp[pin]; @@ -119,7 +133,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv, { unsigned int size; - if (HAS_PCH_MCC(dev_priv)) + if (HAS_PCH_TGP(dev_priv)) + size = ARRAY_SIZE(gmbus_pins_tgp); + else if (HAS_PCH_MCC(dev_priv)) size = ARRAY_SIZE(gmbus_pins_mcc); else if (HAS_PCH_ICP(dev_priv)) size = ARRAY_SIZE(gmbus_pins_icp); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ad96c5b4975c..62ac8a119602 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3254,8 +3254,10 @@ enum i915_power_well_id { #define GMBUS_PIN_10_TC2_ICP 10 #define GMBUS_PIN_11_TC3_ICP 11 #define GMBUS_PIN_12_TC4_ICP 12 +#define GMBUS_PIN_13_TC5_TGP 13 +#define GMBUS_PIN_14_TC6_TGP 14 -#define GMBUS_NUM_PINS 13 /* including 0 */ +#define GMBUS_NUM_PINS 15 /* including 0 */ #define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */ #define GMBUS_SW_CLR_INT (1 << 31) #define GMBUS_SW_RDY (1 << 30) -- cgit v1.2.3 From fb81cbe46988cddfd9e6fd2bb9f7ce39cea0c380 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 11 Jul 2019 10:31:12 -0700 Subject: drm/i915/tgl: port to ddc pin mapping Make the icl function generic so it is based on phy type and can be applied to tgl as well. I checked if this could not apply to EHL as well, but unfortunately there the HPD and DDC/GMBUS pins for DDI C are mapped to TypeC Port 1 even though it doesn't have TC phy. v2: don't add a separate function for TGL, but rather reuse the ICL one (suggested by Rodrigo) v3: rebase after the introduction of enum phy and use it for the conversions Cc: Anusha Srivatsa Cc: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-19-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_hdmi.c | 36 ++++++++----------------------- 1 file changed, 9 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 0ebec69bbbfc..77af0dfd93ce 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2930,33 +2930,15 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv, static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { - u8 ddc_pin; + enum phy phy = intel_port_to_phy(dev_priv, port); - switch (port) { - case PORT_A: - ddc_pin = GMBUS_PIN_1_BXT; - break; - case PORT_B: - ddc_pin = GMBUS_PIN_2_BXT; - break; - case PORT_C: - ddc_pin = GMBUS_PIN_9_TC1_ICP; - break; - case PORT_D: - ddc_pin = GMBUS_PIN_10_TC2_ICP; - break; - case PORT_E: - ddc_pin = GMBUS_PIN_11_TC3_ICP; - break; - case PORT_F: - ddc_pin = GMBUS_PIN_12_TC4_ICP; - break; - default: - MISSING_CASE(port); - ddc_pin = GMBUS_PIN_2_BXT; - break; - } - return ddc_pin; + if (intel_phy_is_combo(dev_priv, phy)) + return GMBUS_PIN_1_BXT + port; + else if (intel_phy_is_tc(dev_priv, phy)) + return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port); + + WARN(1, "Unknown port:%c\n", port_name(port)); + return GMBUS_PIN_2_BXT; } static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) @@ -3019,7 +3001,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, if (HAS_PCH_MCC(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); - else if (HAS_PCH_ICP(dev_priv)) + else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv)) ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); -- cgit v1.2.3 From d757535e3116c1a5f024bfef1ab3544a525f03fb Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Thu, 11 Jul 2019 10:31:13 -0700 Subject: drm/i915/tgl: Add vbt value mapping for DDC Bus pin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add VBT-value to DDC bus pin mapping for the same. Signed-off-by: Mahesh Kumar Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-20-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 17 ++++++++++++++++- drivers/gpu/drm/i915/display/intel_vbt_defs.h | 3 +++ 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 4fdbb5c35d87..2fe68f72b88f 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1355,12 +1355,27 @@ static const u8 mcc_ddc_pin_map[] = { [MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP, }; +static const u8 tgp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT, + [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP, + [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP, + [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP, + [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP, + [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP, + [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP, +}; + static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) { const u8 *ddc_pin_map; int n_entries; - if (HAS_PCH_MCC(dev_priv)) { + if (HAS_PCH_TGP(dev_priv)) { + ddc_pin_map = tgp_ddc_pin_map; + n_entries = ARRAY_SIZE(tgp_ddc_pin_map); + } else if (HAS_PCH_MCC(dev_priv)) { ddc_pin_map = mcc_ddc_pin_map; n_entries = ARRAY_SIZE(mcc_ddc_pin_map); } else if (HAS_PCH_ICP(dev_priv)) { diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 2f4894e9a03d..93f5c9d204d6 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -310,10 +310,13 @@ enum vbt_gmbus_ddi { DDC_BUS_DDI_F, ICL_DDC_BUS_DDI_A = 0x1, ICL_DDC_BUS_DDI_B, + TGL_DDC_BUS_DDI_C, ICL_DDC_BUS_PORT_1 = 0x4, ICL_DDC_BUS_PORT_2, ICL_DDC_BUS_PORT_3, ICL_DDC_BUS_PORT_4, + TGL_DDC_BUS_PORT_5, + TGL_DDC_BUS_PORT_6, MCC_DDC_BUS_DDI_A = 0x1, MCC_DDC_BUS_DDI_B, MCC_DDC_BUS_DDI_C = 0x4, -- cgit v1.2.3 From 36ca5335f202bd54faf38b37fed1b99078e1839e Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 11 Jul 2019 10:31:14 -0700 Subject: drm/i915/tgl: Add DPLL registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On TGL the port programming for combophy is very similar to ICL, so adapt the callers to possibly use the different register values. v2 (Lucas): Add TODO with about DPLL4 (requested by Ville) Cc: Vandita Kulkarni Cc: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-21-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 24 +++++++++++++++++++----- drivers/gpu/drm/i915/i915_reg.h | 17 +++++++++++++++++ 2 files changed, 36 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 9f96d29c5060..267e6d7df706 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3119,8 +3119,13 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!(val & PLL_ENABLE)) goto out; - hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); + if (INTEL_GEN(dev_priv) >= 12) { + hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id)); + } else { + hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); + } ret = true; out: @@ -3154,10 +3159,19 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; const enum intel_dpll_id id = pll->info->id; + i915_reg_t cfgcr0_reg, cfgcr1_reg; + + if (INTEL_GEN(dev_priv) >= 12) { + cfgcr0_reg = TGL_DPLL_CFGCR0(id); + cfgcr1_reg = TGL_DPLL_CFGCR1(id); + } else { + cfgcr0_reg = ICL_DPLL_CFGCR0(id); + cfgcr1_reg = ICL_DPLL_CFGCR1(id); + } - I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0); - I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1); - POSTING_READ(ICL_DPLL_CFGCR1(id)); + I915_WRITE(cfgcr0_reg, hw_state->cfgcr0); + I915_WRITE(cfgcr1_reg, hw_state->cfgcr1); + POSTING_READ(cfgcr1_reg); } static void icl_mg_pll_write(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 62ac8a119602..c8277862bbbe 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -242,6 +242,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) +#define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c)) /* * Device info offset array based helpers for groups of registers with unevenly @@ -9955,6 +9956,22 @@ enum skl_power_gate { #define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \ _ICL_DPLL1_CFGCR1) +#define _TGL_DPLL0_CFGCR0 0x164284 +#define _TGL_DPLL1_CFGCR0 0x16428C +/* TODO: add DPLL4 */ +#define _TGL_TBTPLL_CFGCR0 0x16429C +#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ + _TGL_DPLL1_CFGCR0, \ + _TGL_TBTPLL_CFGCR0) + +#define _TGL_DPLL0_CFGCR1 0x164288 +#define _TGL_DPLL1_CFGCR1 0x164290 +/* TODO: add DPLL4 */ +#define _TGL_TBTPLL_CFGCR1 0x1642A0 +#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ + _TGL_DPLL1_CFGCR1, \ + _TGL_TBTPLL_CFGCR1) + /* BXT display engine PLL */ #define BXT_DE_PLL_CTL _MMIO(0x6d000) #define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ -- cgit v1.2.3 From a1c5f1510b3f39d57a6eaa9d75c70e5beaa952ff Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 11 Jul 2019 10:31:15 -0700 Subject: drm/i915/tgl: Update DPLL clock reference register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This register definition changed from ICL and has now another meaning. Use the right bits on TGL. Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190711173115.28296-22-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 8 ++++++-- drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 267e6d7df706..319a26a1ec10 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2597,8 +2597,12 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) | DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) | DPLL_CFGCR1_KDIV(pll_params.kdiv) | - DPLL_CFGCR1_PDIV(pll_params.pdiv) | - DPLL_CFGCR1_CENTRAL_FREQ_8400; + DPLL_CFGCR1_PDIV(pll_params.pdiv); + + if (INTEL_GEN(dev_priv) >= 12) + cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL; + else + cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400; memset(pll_state, 0, sizeof(*pll_state)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c8277862bbbe..3ff659a180e6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9944,6 +9944,7 @@ enum skl_power_gate { #define DPLL_CFGCR1_PDIV_7 (8 << 2) #define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0) #define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0) +#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0) #define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1) #define _ICL_DPLL0_CFGCR0 0x164000 -- cgit v1.2.3 From 1e2b7f497c28a47793a95d8f0cc8e135899827f1 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Fri, 12 Jul 2019 00:07:43 -0700 Subject: drm/i915: Add test for invalid flag bits in whitelist entries As per review feedback by Tvrtko, added a check that no invalid bits are being set in the whitelist flags fields. Also updated the read/write access definitions to make it clearer that they are an enum field not a set of single bit flags. Signed-off-by: John Harrison CC: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190712070745.35239-2-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 29 ++++++++++++++++++++------ drivers/gpu/drm/i915/gt/selftest_workarounds.c | 14 +++++++++---- drivers/gpu/drm/i915/i915_reg.h | 12 ++++++++--- 3 files changed, 42 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 9e069286d3ce..95be0f108f26 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1011,6 +1011,20 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) return wa_list_verify(gt->uncore, >->i915->gt_wa_list, from); } +static inline bool is_nonpriv_flags_valid(u32 flags) +{ + /* Check only valid flag bits are set */ + if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID) + return false; + + /* NB: Only 3 out of 4 enum values are valid for access field */ + if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_INVALID) + return false; + + return true; +} + static void whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) { @@ -1021,6 +1035,9 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) return; + if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))) + return; + wa.reg.reg |= flags; _wa_add(wal, &wa); } @@ -1028,7 +1045,7 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) static void whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) { - whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW); + whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW); } static void gen9_whitelist_build(struct i915_wa_list *w) @@ -1109,7 +1126,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine) * - PS_DEPTH_COUNT_UDW */ whitelist_reg_ext(w, PS_INVOCATION_COUNT, - RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4); } @@ -1149,20 +1166,20 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) * - PS_DEPTH_COUNT_UDW */ whitelist_reg_ext(w, PS_INVOCATION_COUNT, - RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4); break; case VIDEO_DECODE_CLASS: /* hucStatusRegOffset */ whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base), - RING_FORCE_TO_NONPRIV_RD); + RING_FORCE_TO_NONPRIV_ACCESS_RD); /* hucUKernelHdrInfoRegOffset */ whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base), - RING_FORCE_TO_NONPRIV_RD); + RING_FORCE_TO_NONPRIV_ACCESS_RD); /* hucStatus2RegOffset */ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), - RING_FORCE_TO_NONPRIV_RD); + RING_FORCE_TO_NONPRIV_ACCESS_RD); break; default: diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index fa01ea7855de..466dcc8214c3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -397,6 +397,10 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg) enum intel_platform platform = INTEL_INFO(engine->i915)->platform; int i; + if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_WR) + return true; + for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { if (wo_registers[i].platform == platform && wo_registers[i].reg == reg) @@ -408,7 +412,8 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg) static bool ro_register(u32 reg) { - if (reg & RING_FORCE_TO_NONPRIV_RD) + if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == + RING_FORCE_TO_NONPRIV_ACCESS_RD) return true; return false; @@ -760,8 +765,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx, u64 offset = results->node.start + sizeof(u32) * i; u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - /* Clear RD only and WR only flags */ - reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR); + /* Clear access permission field */ + reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; *cs++ = srm; *cs++ = reg; @@ -931,7 +936,8 @@ check_whitelisted_registers(struct intel_engine_cs *engine, for (i = 0; i < engine->whitelist.count; i++) { const struct i915_wa *wa = &engine->whitelist.list[i]; - if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD) + if (i915_mmio_reg_offset(wa->reg) & + RING_FORCE_TO_NONPRIV_ACCESS_RD) continue; if (!fn(engine, a[i], b[i], wa->reg)) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3ff659a180e6..e14c9b76c2d0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2522,13 +2522,19 @@ enum i915_power_well_id { #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) -#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */ -#define RING_FORCE_TO_NONPRIV_RD (1 << 28) -#define RING_FORCE_TO_NONPRIV_WR (2 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ +#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28) #define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */ #define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0) #define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0) #define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0) +#define RING_FORCE_TO_NONPRIV_MASK_VALID \ + (RING_FORCE_TO_NONPRIV_RANGE_MASK \ + | RING_FORCE_TO_NONPRIV_ACCESS_MASK) #define RING_MAX_NONPRIV_SLOTS 12 #define GEN7_TLB_RD_ADDR _MMIO(0x4700) -- cgit v1.2.3 From aee20aaed8876e90fa6f500ee5637423a3ef9ab9 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Fri, 12 Jul 2019 00:07:44 -0700 Subject: drm/i915: Implement read-only support in whitelist selftest Newer hardware supports extra feature in the whitelist registers. This patch updates the selftest to test that entries marked as read only are actually read only. v2: Removed all use of 'rsvd' for read-only registers to avoid ambiguous code or error messages. Signed-off-by: John Harrison CC: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190712070745.35239-3-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 49 ++++++++++++++++++-------- 1 file changed, 35 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 466dcc8214c3..fd1d47ba4b10 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -485,12 +485,12 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, u32 srm, lrm, rsvd; u32 expect; int idx; + bool ro_reg; if (wo_register(engine, reg)) continue; - if (ro_register(reg)) - continue; + ro_reg = ro_register(reg); srm = MI_STORE_REGISTER_MEM; lrm = MI_LOAD_REGISTER_MEM; @@ -591,24 +591,35 @@ err_request: } GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); - rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */ - if (!rsvd) { - pr_err("%s: Unable to write to whitelisted register %x\n", - engine->name, reg); - err = -EINVAL; - goto out_unpin; + if (!ro_reg) { + /* detect write masking */ + rsvd = results[ARRAY_SIZE(values)]; + if (!rsvd) { + pr_err("%s: Unable to write to whitelisted register %x\n", + engine->name, reg); + err = -EINVAL; + goto out_unpin; + } } expect = results[0]; idx = 1; for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, values[v], rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, values[v], rsvd); + if (results[idx] != expect) err++; idx++; } for (v = 0; v < ARRAY_SIZE(values); v++) { - expect = reg_write(expect, ~values[v], rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, ~values[v], rsvd); + if (results[idx] != expect) err++; idx++; @@ -617,15 +628,22 @@ err_request: pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", engine->name, err, reg); - pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", - engine->name, reg, results[0], rsvd); + if (ro_reg) + pr_info("%s: Whitelisted read-only register: %x, original value %08x\n", + engine->name, reg, results[0]); + else + pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", + engine->name, reg, results[0], rsvd); expect = results[0]; idx = 1; for (v = 0; v < ARRAY_SIZE(values); v++) { u32 w = values[v]; - expect = reg_write(expect, w, rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, w, rsvd); pr_info("Wrote %08x, read %08x, expect %08x\n", w, results[idx], expect); idx++; @@ -633,7 +651,10 @@ err_request: for (v = 0; v < ARRAY_SIZE(values); v++) { u32 w = ~values[v]; - expect = reg_write(expect, w, rsvd); + if (ro_reg) + expect = results[0]; + else + expect = reg_write(expect, w, rsvd); pr_info("Wrote %08x, read %08x, expect %08x\n", w, results[idx], expect); idx++; -- cgit v1.2.3 From 3e1f0a518d22c361d8e947e94a189ba0c9b3db7a Mon Sep 17 00:00:00 2001 From: John Harrison Date: Fri, 12 Jul 2019 00:07:45 -0700 Subject: drm/i915: Add engine name to workaround debug print There is a debug message in the workaround initialisation path that reports how many entries were added of each type. However, whitelist workarounds exist for multiple engines but the type name is just 'whitelist'. Tvrtko suggested adding the engine name to make the message more useful. v2: Updated the similar message in the workaround reset selftest. Signed-off-by: John Harrison CC: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Signed-off-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190712070745.35239-4-John.C.Harrison@Intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 15 ++++++++------- drivers/gpu/drm/i915/gt/intel_workarounds_types.h | 1 + drivers/gpu/drm/i915/gt/selftest_workarounds.c | 17 +++++------------ 3 files changed, 14 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 95be0f108f26..3b1fc7c8faa8 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -50,9 +50,10 @@ * - Public functions to init or apply the given workaround type. */ -static void wa_init_start(struct i915_wa_list *wal, const char *name) +static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name) { wal->name = name; + wal->engine_name = engine_name; } #define WA_LIST_CHUNK (1 << 4) @@ -74,8 +75,8 @@ static void wa_init_finish(struct i915_wa_list *wal) if (!wal->count) return; - DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n", - wal->wa_count, wal->name); + DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n", + wal->wa_count, wal->name, wal->engine_name); } static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) @@ -591,7 +592,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) return; - wa_init_start(wal, name); + wa_init_start(wal, name, engine->name); if (IS_GEN(i915, 11)) icl_ctx_workarounds_init(engine, wal); @@ -921,7 +922,7 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915) { struct i915_wa_list *wal = &i915->gt_wa_list; - wa_init_start(wal, "GT"); + wa_init_start(wal, "GT", "global"); gt_init_workarounds(i915, wal); wa_init_finish(wal); } @@ -1192,7 +1193,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct i915_wa_list *w = &engine->whitelist; - wa_init_start(w, "whitelist"); + wa_init_start(w, "whitelist", engine->name); if (IS_GEN(i915, 11)) icl_whitelist_build(engine); @@ -1384,7 +1385,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine) if (INTEL_GEN(engine->i915) < 8) return; - wa_init_start(wal, engine->name); + wa_init_start(wal, "engine", engine->name); engine_init_workarounds(engine, wal); wa_init_finish(wal); } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h index 42ac1fb99572..e27ab1b710b3 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h +++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h @@ -20,6 +20,7 @@ struct i915_wa { struct i915_wa_list { const char *name; + const char *engine_name; struct i915_wa *list; unsigned int count; unsigned int wa_count; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index fd1d47ba4b10..a0d8f1bfe0ad 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -25,11 +25,9 @@ static const struct wo_register { { INTEL_GEMINILAKE, 0x731c } }; -#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8) struct wa_lists { struct i915_wa_list gt_wa_list; struct { - char name[REF_NAME_MAX]; struct i915_wa_list wa_list; struct i915_wa_list ctx_wa_list; } engine[I915_NUM_ENGINES]; @@ -43,25 +41,20 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) memset(lists, 0, sizeof(*lists)); - wa_init_start(&lists->gt_wa_list, "GT_REF"); + wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); gt_init_workarounds(i915, &lists->gt_wa_list); wa_init_finish(&lists->gt_wa_list); for_each_engine(engine, i915, id) { struct i915_wa_list *wal = &lists->engine[id].wa_list; - char *name = lists->engine[id].name; - snprintf(name, REF_NAME_MAX, "%s_REF", engine->name); - - wa_init_start(wal, name); + wa_init_start(wal, "REF", engine->name); engine_init_workarounds(engine, wal); wa_init_finish(wal); - snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name); - __intel_engine_init_ctx_wa(engine, &lists->engine[id].ctx_wa_list, - name); + "CTX_REF"); } } @@ -292,8 +285,8 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, intel_wakeref_t wakeref; int err; - pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", - engine->whitelist.count, name); + pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n", + engine->whitelist.count, engine->name, name); ctx = kernel_context(i915); if (IS_ERR(ctx)) -- cgit v1.2.3 From 6eebfe8a10a62139d681e2f1af1386252742278b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 08:58:18 +0100 Subject: drm/i915/gtt: Use shallow dma pages for scratch We only use the dma pages for scratch, and so do not need to allocate the extra storage for the shadow page directory. v2: Refrain from reintroducing I915_PDES Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190712075818.20616-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 196 +++++++++++++++--------------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 23 +++-- 2 files changed, 100 insertions(+), 119 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 236c964dd761..1e6021e75993 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -594,26 +594,17 @@ static void cleanup_page_dma(struct i915_address_space *vm, #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) -#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v)) -#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v)) - -static void fill_page_dma(struct i915_address_space *vm, - struct i915_page_dma *p, - const u64 val) +static void +fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count) { - u64 * const vaddr = kmap_atomic(p->page); - - memset64(vaddr, val, PAGE_SIZE / sizeof(val)); - - kunmap_atomic(vaddr); + kunmap_atomic(memset64(kmap_atomic(p->page), val, count)); } -static void fill_page_dma_32(struct i915_address_space *vm, - struct i915_page_dma *p, - const u32 v) -{ - fill_page_dma(vm, p, (u64)v << 32 | v); -} +#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64)) +#define fill32_px(px, v) do { \ + u64 v__ = lower_32_bits(v); \ + fill_px((px), v__ << 32 | v__); \ +} while (0) static int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) @@ -687,6 +678,21 @@ static void cleanup_scratch_page(struct i915_address_space *vm) __free_pages(p->page, order); } +static void free_scratch(struct i915_address_space *vm) +{ + if (!vm->scratch_page.daddr) /* set to 0 on clones */ + return; + + if (vm->scratch_pdp.daddr) + cleanup_page_dma(vm, &vm->scratch_pdp); + if (vm->scratch_pd.daddr) + cleanup_page_dma(vm, &vm->scratch_pd); + if (vm->scratch_pt.daddr) + cleanup_page_dma(vm, &vm->scratch_pt); + + cleanup_scratch_page(vm); +} + static struct i915_page_table *alloc_pt(struct i915_address_space *vm) { struct i915_page_table *pt; @@ -711,18 +717,6 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) kfree(pt); } -static void gen8_initialize_pt(struct i915_address_space *vm, - struct i915_page_table *pt) -{ - fill_px(vm, pt, vm->scratch_pte); -} - -static void gen6_initialize_pt(struct i915_address_space *vm, - struct i915_page_table *pt) -{ - fill32_px(vm, pt, vm->scratch_pte); -} - static struct i915_page_directory *__alloc_pd(void) { struct i915_page_directory *pd; @@ -765,9 +759,11 @@ static void free_pd(struct i915_address_space *vm, kfree(pd); } -#define init_pd(vm, pd, to) { \ - fill_px((vm), (pd), gen8_pde_encode(px_dma(to), I915_CACHE_LLC)); \ - memset_p((pd)->entry, (to), 512); \ +static void init_pd(struct i915_page_directory *pd, + struct i915_page_dma *scratch) +{ + fill_px(pd, gen8_pde_encode(scratch->daddr, I915_CACHE_LLC)); + memset_p(pd->entry, scratch, 512); } static inline void @@ -869,12 +865,11 @@ static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, u32 pde; gen8_for_each_pde(pt, pd, start, length, pde) { - GEM_BUG_ON(pt == vm->scratch_pt); + GEM_BUG_ON(px_base(pt) == &vm->scratch_pt); atomic_inc(&pt->used); gen8_ppgtt_clear_pt(vm, pt, start, length); - if (release_pd_entry(pd, pde, &pt->used, - px_base(vm->scratch_pt))) + if (release_pd_entry(pd, pde, &pt->used, &vm->scratch_pt)) free_pt(vm, pt); } } @@ -890,12 +885,11 @@ static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, unsigned int pdpe; gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - GEM_BUG_ON(pd == vm->scratch_pd); + GEM_BUG_ON(px_base(pd) == &vm->scratch_pd); atomic_inc(&pd->used); gen8_ppgtt_clear_pd(vm, pd, start, length); - if (release_pd_entry(pdp, pdpe, &pd->used, - px_base(vm->scratch_pd))) + if (release_pd_entry(pdp, pdpe, &pd->used, &vm->scratch_pd)) free_pd(vm, pd); } } @@ -921,12 +915,11 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, GEM_BUG_ON(!i915_vm_is_4lvl(vm)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - GEM_BUG_ON(pdp == vm->scratch_pdp); + GEM_BUG_ON(px_base(pdp) == &vm->scratch_pdp); atomic_inc(&pdp->used); gen8_ppgtt_clear_pdp(vm, pdp, start, length); - if (release_pd_entry(pml4, pml4e, &pdp->used, - px_base(vm->scratch_pdp))) + if (release_pd_entry(pml4, pml4e, &pdp->used, &vm->scratch_pdp)) free_pd(vm, pdp); } } @@ -1181,7 +1174,7 @@ static void gen8_free_page_tables(struct i915_address_space *vm, int i; for (i = 0; i < I915_PDES; i++) { - if (pd->entry[i] != vm->scratch_pt) + if (pd->entry[i] != &vm->scratch_pt) free_pt(vm, pd->entry[i]); } } @@ -1218,37 +1211,34 @@ static int gen8_init_scratch(struct i915_address_space *vm) I915_CACHE_LLC, vm->has_read_only); - vm->scratch_pt = alloc_pt(vm); - if (IS_ERR(vm->scratch_pt)) { - ret = PTR_ERR(vm->scratch_pt); + if (unlikely(setup_page_dma(vm, &vm->scratch_pt))) { + ret = -ENOMEM; goto free_scratch_page; } + fill_px(&vm->scratch_pt, vm->scratch_pte); - vm->scratch_pd = alloc_pd(vm); - if (IS_ERR(vm->scratch_pd)) { - ret = PTR_ERR(vm->scratch_pd); + if (unlikely(setup_page_dma(vm, &vm->scratch_pd))) { + ret = -ENOMEM; goto free_pt; } + fill_px(&vm->scratch_pd, + gen8_pde_encode(vm->scratch_pt.daddr, I915_CACHE_LLC)); if (i915_vm_is_4lvl(vm)) { - vm->scratch_pdp = alloc_pd(vm); - if (IS_ERR(vm->scratch_pdp)) { - ret = PTR_ERR(vm->scratch_pdp); + if (unlikely(setup_page_dma(vm, &vm->scratch_pdp))) { + ret = -ENOMEM; goto free_pd; } + fill_px(&vm->scratch_pdp, + gen8_pde_encode(vm->scratch_pd.daddr, I915_CACHE_LLC)); } - gen8_initialize_pt(vm, vm->scratch_pt); - init_pd(vm, vm->scratch_pd, vm->scratch_pt); - if (i915_vm_is_4lvl(vm)) - init_pd(vm, vm->scratch_pdp, vm->scratch_pd); - return 0; free_pd: - free_pd(vm, vm->scratch_pd); + cleanup_page_dma(vm, &vm->scratch_pd); free_pt: - free_pt(vm, vm->scratch_pt); + cleanup_page_dma(vm, &vm->scratch_pt); free_scratch_page: cleanup_scratch_page(vm); @@ -1292,18 +1282,6 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) return 0; } -static void gen8_free_scratch(struct i915_address_space *vm) -{ - if (!vm->scratch_page.daddr) - return; - - if (i915_vm_is_4lvl(vm)) - free_pd(vm, vm->scratch_pdp); - free_pd(vm, vm->scratch_pd); - free_pt(vm, vm->scratch_pt); - cleanup_scratch_page(vm); -} - static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, struct i915_page_directory *pdp) { @@ -1311,7 +1289,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, int i; for (i = 0; i < pdpes; i++) { - if (pdp->entry[i] == vm->scratch_pd) + if (pdp->entry[i] == &vm->scratch_pd) continue; gen8_free_page_tables(vm, pdp->entry[i]); @@ -1329,7 +1307,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { struct i915_page_directory *pdp = i915_pdp_entry(pml4, i); - if (pdp == ppgtt->vm.scratch_pdp) + if (px_base(pdp) == &ppgtt->vm.scratch_pdp) continue; gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); @@ -1351,7 +1329,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) else gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd); - gen8_free_scratch(vm); + free_scratch(vm); } static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, @@ -1367,7 +1345,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, gen8_for_each_pde(pt, pd, start, length, pde) { const int count = gen8_pte_count(start, length); - if (pt == vm->scratch_pt) { + if (px_base(pt) == &vm->scratch_pt) { spin_unlock(&pd->lock); pt = fetch_and_zero(&alloc); @@ -1379,10 +1357,10 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, } if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) - gen8_initialize_pt(vm, pt); + fill_px(pt, vm->scratch_pte); spin_lock(&pd->lock); - if (pd->entry[pde] == vm->scratch_pt) { + if (pd->entry[pde] == &vm->scratch_pt) { set_pd_entry(pd, pde, pt); } else { alloc = pt; @@ -1414,7 +1392,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, spin_lock(&pdp->lock); gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - if (pd == vm->scratch_pd) { + if (px_base(pd) == &vm->scratch_pd) { spin_unlock(&pdp->lock); pd = fetch_and_zero(&alloc); @@ -1425,10 +1403,10 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto unwind; } - init_pd(vm, pd, vm->scratch_pt); + init_pd(pd, &vm->scratch_pt); spin_lock(&pdp->lock); - if (pdp->entry[pdpe] == vm->scratch_pd) { + if (pdp->entry[pdpe] == &vm->scratch_pd) { set_pd_entry(pdp, pdpe, pd); } else { alloc = pd; @@ -1449,7 +1427,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto out; unwind_pd: - if (release_pd_entry(pdp, pdpe, &pd->used, px_base(vm->scratch_pd))) + if (release_pd_entry(pdp, pdpe, &pd->used, &vm->scratch_pd)) free_pd(vm, pd); unwind: gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); @@ -1478,7 +1456,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, spin_lock(&pml4->lock); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (pdp == vm->scratch_pdp) { + if (px_base(pdp) == &vm->scratch_pdp) { spin_unlock(&pml4->lock); pdp = fetch_and_zero(&alloc); @@ -1489,10 +1467,10 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto unwind; } - init_pd(vm, pdp, vm->scratch_pd); + init_pd(pdp, &vm->scratch_pd); spin_lock(&pml4->lock); - if (pml4->entry[pml4e] == vm->scratch_pdp) { + if (pml4->entry[pml4e] == &vm->scratch_pdp) { set_pd_entry(pml4, pml4e, pdp); } else { alloc = pdp; @@ -1513,7 +1491,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto out; unwind_pdp: - if (release_pd_entry(pml4, pml4e, &pdp->used, px_base(vm->scratch_pdp))) + if (release_pd_entry(pml4, pml4e, &pdp->used, &vm->scratch_pdp)) free_pd(vm, pdp); unwind: gen8_ppgtt_clear_4lvl(vm, from, start - from); @@ -1537,7 +1515,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) if (IS_ERR(pd)) goto unwind; - init_pd(vm, pd, vm->scratch_pt); + init_pd(pd, &vm->scratch_pt); set_pd_entry(pdp, pdpe, pd); } @@ -1568,10 +1546,10 @@ static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) static void init_pd_n(struct i915_address_space *vm, struct i915_page_directory *pd, - struct i915_page_directory *to, + struct i915_page_dma *to, const unsigned int entries) { - const u64 daddr = gen8_pde_encode(px_dma(to), I915_CACHE_LLC); + const u64 daddr = gen8_pde_encode(to->daddr, I915_CACHE_LLC); u64 * const vaddr = kmap_atomic(pd->base.page); memset64(vaddr, daddr, entries); @@ -1588,7 +1566,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) if (i915_vm_is_4lvl(vm)) { pd = alloc_pd(vm); if (!IS_ERR(pd)) - init_pd(vm, pd, vm->scratch_pdp); + init_pd(pd, &vm->scratch_pdp); return pd; } @@ -1605,7 +1583,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) return ERR_PTR(-ENOMEM); } - init_pd_n(vm, pd, vm->scratch_pd, GEN8_3LVL_PDPES); + init_pd_n(vm, pd, &vm->scratch_pd, GEN8_3LVL_PDPES); return pd; } @@ -1678,7 +1656,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) err_free_pd: free_pd(&ppgtt->vm, ppgtt->pd); err_free_scratch: - gen8_free_scratch(&ppgtt->vm); + free_scratch(&ppgtt->vm); err_free: kfree(ppgtt); return ERR_PTR(err); @@ -1763,7 +1741,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, const unsigned int count = min(num_entries, GEN6_PTES - pte); gen6_pte_t *vaddr; - GEM_BUG_ON(pt == vm->scratch_pt); + GEM_BUG_ON(px_base(pt) == &vm->scratch_pt); num_entries -= count; @@ -1800,7 +1778,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sgt_dma iter = sgt_dma(vma); gen6_pte_t *vaddr; - GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt); + GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch_pt); vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { @@ -1845,7 +1823,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, gen6_for_each_pde(pt, pd, start, length, pde) { const unsigned int count = gen6_pte_count(start, length); - if (pt == vm->scratch_pt) { + if (px_base(pt) == &vm->scratch_pt) { spin_unlock(&pd->lock); pt = fetch_and_zero(&alloc); @@ -1856,10 +1834,10 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, goto unwind_out; } - gen6_initialize_pt(vm, pt); + fill32_px(pt, vm->scratch_pte); spin_lock(&pd->lock); - if (pd->entry[pde] == vm->scratch_pt) { + if (pd->entry[pde] == &vm->scratch_pt) { pd->entry[pde] = pt; if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { @@ -1908,26 +1886,18 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) I915_CACHE_NONE, PTE_READ_ONLY); - vm->scratch_pt = alloc_pt(vm); - if (IS_ERR(vm->scratch_pt)) { + if (unlikely(setup_page_dma(vm, &vm->scratch_pt))) { cleanup_scratch_page(vm); - return PTR_ERR(vm->scratch_pt); + return -ENOMEM; } - - gen6_initialize_pt(vm, vm->scratch_pt); + fill32_px(&vm->scratch_pt, vm->scratch_pte); gen6_for_all_pdes(unused, pd, pde) - pd->entry[pde] = vm->scratch_pt; + pd->entry[pde] = &vm->scratch_pt; return 0; } -static void gen6_ppgtt_free_scratch(struct i915_address_space *vm) -{ - free_pt(vm, vm->scratch_pt); - cleanup_scratch_page(vm); -} - static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) { struct i915_page_directory * const pd = ppgtt->base.pd; @@ -1935,7 +1905,7 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) u32 pde; gen6_for_all_pdes(pt, pd, pde) - if (pt != ppgtt->base.vm.scratch_pt) + if (px_base(pt) != &ppgtt->base.vm.scratch_pt) free_pt(&ppgtt->base.vm, pt); } @@ -1950,7 +1920,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) mutex_unlock(&i915->drm.struct_mutex); gen6_ppgtt_free_pd(ppgtt); - gen6_ppgtt_free_scratch(vm); + free_scratch(vm); kfree(ppgtt->base.pd); } @@ -1993,7 +1963,7 @@ static void pd_vma_unbind(struct i915_vma *vma) { struct gen6_ppgtt *ppgtt = vma->private; struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt; + struct i915_page_dma * const scratch = &ppgtt->base.vm.scratch_pt; struct i915_page_table *pt; unsigned int pde; @@ -2002,11 +1972,11 @@ static void pd_vma_unbind(struct i915_vma *vma) /* Free all no longer used page tables */ gen6_for_all_pdes(pt, ppgtt->base.pd, pde) { - if (atomic_read(&pt->used) || pt == scratch_pt) + if (px_base(pt) == scratch || atomic_read(&pt->used)) continue; free_pt(&ppgtt->base.vm, pt); - pd->entry[pde] = scratch_pt; + pd->entry[pde] = scratch; } ppgtt->scan_for_unused_pt = false; @@ -2148,7 +2118,7 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) return &ppgtt->base; err_scratch: - gen6_ppgtt_free_scratch(&ppgtt->base.vm); + free_scratch(&ppgtt->base.vm); err_pd: kfree(ppgtt->base.pd); err_free: diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 57a68ef4eda7..91d8b4c20c61 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -240,9 +240,6 @@ struct i915_page_dma { }; }; -#define px_base(px) (&(px)->base) -#define px_dma(px) (px_base(px)->daddr) - struct i915_page_table { struct i915_page_dma base; atomic_t used; @@ -255,6 +252,20 @@ struct i915_page_directory { void *entry[512]; }; +#define __px_choose_expr(x, type, expr, other) \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), type) || \ + __builtin_types_compatible_p(typeof(x), const type), \ + ({ type __x = (type)(x); expr; }), \ + other) + +#define px_base(px) \ + __px_choose_expr(px, struct i915_page_dma *, __x, \ + __px_choose_expr(px, struct i915_page_table *, &__x->base, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->base, \ + (void)0))) +#define px_dma(px) (px_base(px)->daddr) + struct i915_vma_ops { /* Map an object into an address space with the given cache flags. */ int (*bind_vma)(struct i915_vma *vma, @@ -304,9 +315,9 @@ struct i915_address_space { u64 scratch_pte; int scratch_order; struct i915_page_dma scratch_page; - struct i915_page_table *scratch_pt; - struct i915_page_directory *scratch_pd; - struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */ + struct i915_page_dma scratch_pt; + struct i915_page_dma scratch_pd; + struct i915_page_dma scratch_pdp; /* GEN8+ & 48b PPGTT */ /** * List of vma currently bound. -- cgit v1.2.3 From 57a7e30546b2f8de4925406ff55eef11da05615b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 10:43:22 +0100 Subject: drm/i915/gtt: Wrap page_table with page_directory The page directory extends the page table with the shadow entries. Make the page directory struct embed the page table for easier code reuse. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190712094327.24437-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 100 +++++++++++++---------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 16 +++-- 3 files changed, 57 insertions(+), 61 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index a98652e4055c..2873276879cb 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1512,7 +1512,7 @@ static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt) *cs++ = MI_LOAD_REGISTER_IMM(1); *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); - *cs++ = ppgtt->pd->base.ggtt_offset << 10; + *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; intel_ring_advance(rq, cs); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1e6021e75993..b2aa44103fb0 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -107,6 +107,8 @@ * */ +#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt) + static int i915_get_ggtt_vma_pages(struct i915_vma *vma); @@ -711,28 +713,17 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) return pt; } -static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) -{ - cleanup_page_dma(vm, &pt->base); - kfree(pt); -} - static struct i915_page_directory *__alloc_pd(void) { struct i915_page_directory *pd; pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); - if (unlikely(!pd)) return NULL; - memset(&pd->base, 0, sizeof(pd->base)); - atomic_set(&pd->used, 0); + atomic_set(px_used(pd), 0); spin_lock_init(&pd->lock); - /* for safety */ - pd->entry[0] = NULL; - return pd; } @@ -744,7 +735,7 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) if (unlikely(!pd)) return ERR_PTR(-ENOMEM); - if (unlikely(setup_page_dma(vm, &pd->base))) { + if (unlikely(setup_page_dma(vm, px_base(pd)))) { kfree(pd); return ERR_PTR(-ENOMEM); } @@ -752,13 +743,14 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) return pd; } -static void free_pd(struct i915_address_space *vm, - struct i915_page_directory *pd) +static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) { - cleanup_page_dma(vm, &pd->base); + cleanup_page_dma(vm, pd); kfree(pd); } +#define free_px(vm, px) free_pd(vm, px_base(px)) + static void init_pd(struct i915_page_directory *pd, struct i915_page_dma *scratch) { @@ -783,9 +775,9 @@ __set_pd_entry(struct i915_page_directory * const pd, struct i915_page_dma * const to, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { - GEM_BUG_ON(atomic_read(&pd->used) > 512); + GEM_BUG_ON(atomic_read(px_used(pd)) > 512); - atomic_inc(&pd->used); + atomic_inc(px_used(pd)); pd->entry[pde] = to; write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC)); } @@ -796,11 +788,11 @@ __clear_pd_entry(struct i915_page_directory * const pd, struct i915_page_dma * const to, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { - GEM_BUG_ON(atomic_read(&pd->used) == 0); + GEM_BUG_ON(atomic_read(px_used(pd)) == 0); write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC)); pd->entry[pde] = to; - atomic_dec(&pd->used); + atomic_dec(px_used(pd)); } #define set_pd_entry(pd, pde, to) \ @@ -812,13 +804,13 @@ __clear_pd_entry(struct i915_page_directory * const pd, static bool release_pd_entry(struct i915_page_directory * const pd, const unsigned short pde, - atomic_t *counter, + struct i915_page_table * const pt, struct i915_page_dma * const scratch) { bool free = false; spin_lock(&pd->lock); - if (atomic_dec_and_test(counter)) { + if (atomic_dec_and_test(&pt->used)) { clear_pd_entry(pd, pde, scratch); free = true; } @@ -869,8 +861,8 @@ static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, atomic_inc(&pt->used); gen8_ppgtt_clear_pt(vm, pt, start, length); - if (release_pd_entry(pd, pde, &pt->used, &vm->scratch_pt)) - free_pt(vm, pt); + if (release_pd_entry(pd, pde, pt, &vm->scratch_pt)) + free_px(vm, pt); } } @@ -887,10 +879,10 @@ static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { GEM_BUG_ON(px_base(pd) == &vm->scratch_pd); - atomic_inc(&pd->used); + atomic_inc(px_used(pd)); gen8_ppgtt_clear_pd(vm, pd, start, length); - if (release_pd_entry(pdp, pdpe, &pd->used, &vm->scratch_pd)) - free_pd(vm, pd); + if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd)) + free_px(vm, pd); } } @@ -917,10 +909,10 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { GEM_BUG_ON(px_base(pdp) == &vm->scratch_pdp); - atomic_inc(&pdp->used); + atomic_inc(px_used(pdp)); gen8_ppgtt_clear_pdp(vm, pdp, start, length); - if (release_pd_entry(pml4, pml4e, &pdp->used, &vm->scratch_pdp)) - free_pd(vm, pdp); + if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp)) + free_px(vm, pdp); } } @@ -1175,7 +1167,7 @@ static void gen8_free_page_tables(struct i915_address_space *vm, for (i = 0; i < I915_PDES; i++) { if (pd->entry[i] != &vm->scratch_pt) - free_pt(vm, pd->entry[i]); + free_pd(vm, pd->entry[i]); } } @@ -1253,9 +1245,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) int i; if (create) - atomic_inc(&ppgtt->pd->used); /* never remove */ + atomic_inc(px_used(ppgtt->pd)); /* never remove */ else - atomic_dec(&ppgtt->pd->used); + atomic_dec(px_used(ppgtt->pd)); if (i915_vm_is_4lvl(vm)) { const u64 daddr = px_dma(ppgtt->pd); @@ -1296,7 +1288,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, free_pd(vm, pdp->entry[i]); } - free_pd(vm, pdp); + free_px(vm, pdp); } static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) @@ -1313,7 +1305,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); } - free_pd(&ppgtt->vm, pml4); + free_px(&ppgtt->vm, pml4); } static void gen8_ppgtt_cleanup(struct i915_address_space *vm) @@ -1377,7 +1369,7 @@ unwind: gen8_ppgtt_clear_pd(vm, pd, from, start - from); out: if (alloc) - free_pt(vm, alloc); + free_px(vm, alloc); return ret; } @@ -1413,7 +1405,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, pd = pdp->entry[pdpe]; } } - atomic_inc(&pd->used); + atomic_inc(px_used(pd)); spin_unlock(&pdp->lock); ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); @@ -1421,19 +1413,19 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto unwind_pd; spin_lock(&pdp->lock); - atomic_dec(&pd->used); + atomic_dec(px_used(pd)); } spin_unlock(&pdp->lock); goto out; unwind_pd: - if (release_pd_entry(pdp, pdpe, &pd->used, &vm->scratch_pd)) - free_pd(vm, pd); + if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd)) + free_px(vm, pd); unwind: gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); out: if (alloc) - free_pd(vm, alloc); + free_px(vm, alloc); return ret; } @@ -1477,7 +1469,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, pdp = pml4->entry[pml4e]; } } - atomic_inc(&pdp->used); + atomic_inc(px_used(pdp)); spin_unlock(&pml4->lock); ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); @@ -1485,19 +1477,19 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto unwind_pdp; spin_lock(&pml4->lock); - atomic_dec(&pdp->used); + atomic_dec(px_used(pdp)); } spin_unlock(&pml4->lock); goto out; unwind_pdp: - if (release_pd_entry(pml4, pml4e, &pdp->used, &vm->scratch_pdp)) - free_pd(vm, pdp); + if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp)) + free_px(vm, pdp); unwind: gen8_ppgtt_clear_4lvl(vm, from, start - from); out: if (alloc) - free_pd(vm, alloc); + free_px(vm, alloc); return ret; } @@ -1523,7 +1515,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) unwind: gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); - atomic_set(&pdp->used, 0); + atomic_set(px_used(pdp), 0); return -ENOMEM; } @@ -1550,7 +1542,7 @@ static void init_pd_n(struct i915_address_space *vm, const unsigned int entries) { const u64 daddr = gen8_pde_encode(to->daddr, I915_CACHE_LLC); - u64 * const vaddr = kmap_atomic(pd->base.page); + u64 * const vaddr = kmap_atomic_px(pd); memset64(vaddr, daddr, entries); kunmap_atomic(vaddr); @@ -1578,7 +1570,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) pd->entry[GEN8_3LVL_PDPES] = NULL; - if (unlikely(setup_page_dma(vm, &pd->base))) { + if (unlikely(setup_page_dma(vm, px_base(pd)))) { kfree(pd); return ERR_PTR(-ENOMEM); } @@ -1654,7 +1646,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) return ppgtt; err_free_pd: - free_pd(&ppgtt->vm, ppgtt->pd); + free_px(&ppgtt->vm, ppgtt->pd); err_free_scratch: free_scratch(&ppgtt->vm); err_free: @@ -1865,7 +1857,7 @@ unwind_out: gen6_ppgtt_clear_range(vm, from, start - from); out: if (alloc) - free_pt(vm, alloc); + free_px(vm, alloc); intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); return ret; } @@ -1906,7 +1898,7 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) gen6_for_all_pdes(pt, pd, pde) if (px_base(pt) != &ppgtt->base.vm.scratch_pt) - free_pt(&ppgtt->base.vm, pt); + free_px(&ppgtt->base.vm, pt); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm) @@ -1947,7 +1939,7 @@ static int pd_vma_bind(struct i915_vma *vma, struct i915_page_table *pt; unsigned int pde; - ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); + px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t); ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset; gen6_for_all_pdes(pt, ppgtt->base.pd, pde) @@ -1975,7 +1967,7 @@ static void pd_vma_unbind(struct i915_vma *vma) if (px_base(pt) == scratch || atomic_read(&pt->used)) continue; - free_pt(&ppgtt->base.vm, pt); + free_px(&ppgtt->base.vm, pt); pd->entry[pde] = scratch; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 91d8b4c20c61..48bb8c5125e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -246,8 +246,7 @@ struct i915_page_table { }; struct i915_page_directory { - struct i915_page_dma base; - atomic_t used; + struct i915_page_table pt; spinlock_t lock; void *entry[512]; }; @@ -262,10 +261,16 @@ struct i915_page_directory { #define px_base(px) \ __px_choose_expr(px, struct i915_page_dma *, __x, \ __px_choose_expr(px, struct i915_page_table *, &__x->base, \ - __px_choose_expr(px, struct i915_page_directory *, &__x->base, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ (void)0))) #define px_dma(px) (px_base(px)->daddr) +#define px_pt(px) \ + __px_choose_expr(px, struct i915_page_table *, __x, \ + __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \ + (void)0)) +#define px_used(px) (&px_pt(px)->used) + struct i915_vma_ops { /* Map an object into an address space with the given cache flags. */ int (*bind_vma)(struct i915_vma *vma, @@ -600,10 +605,9 @@ static inline u64 gen8_pte_count(u64 address, u64 length) static inline dma_addr_t i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) { - struct i915_page_directory *pd; + struct i915_page_dma *pt = ppgtt->pd->entry[n]; - pd = i915_pdp_entry(ppgtt->pd, n); - return px_dma(pd); + return px_dma(pt); } static inline struct i915_ggtt * -- cgit v1.2.3 From a9abea97856b47bb9a85d60b34461444cb9a2d1e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 10:43:23 +0100 Subject: drm/i915/gtt: Reorder gen8 ppgtt free/clear/alloc In preparation for refactoring the free/clear/alloc, first move the code around so that we can avoid forward declarations in the next set of patches. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190712094327.24437-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 673 ++++++++++++++++++------------------ 1 file changed, 337 insertions(+), 336 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b2aa44103fb0..abb2e3e4bbbc 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -830,6 +830,104 @@ static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt) ppgtt->pd_dirty_engines = ALL_ENGINES; } +static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) +{ + struct i915_address_space *vm = &ppgtt->vm; + struct drm_i915_private *dev_priv = vm->i915; + enum vgt_g2v_type msg; + int i; + + if (create) + atomic_inc(px_used(ppgtt->pd)); /* never remove */ + else + atomic_dec(px_used(ppgtt->pd)); + + if (i915_vm_is_4lvl(vm)) { + const u64 daddr = px_dma(ppgtt->pd); + + I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); + I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); + + msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); + } else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) { + const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); + + I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); + I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); + } + + msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : + VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); + } + + I915_WRITE(vgtif_reg(g2v_notify), msg); + + return 0; +} + +static void gen8_free_page_tables(struct i915_address_space *vm, + struct i915_page_directory *pd) +{ + int i; + + for (i = 0; i < I915_PDES; i++) { + if (pd->entry[i] != &vm->scratch_pt) + free_pd(vm, pd->entry[i]); + } +} + +static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, + struct i915_page_directory *pdp) +{ + const unsigned int pdpes = i915_pdpes_per_pdp(vm); + int i; + + for (i = 0; i < pdpes; i++) { + if (pdp->entry[i] == &vm->scratch_pd) + continue; + + gen8_free_page_tables(vm, pdp->entry[i]); + free_pd(vm, pdp->entry[i]); + } + + free_px(vm, pdp); +} + +static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) +{ + struct i915_page_directory * const pml4 = ppgtt->pd; + int i; + + for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { + struct i915_page_directory *pdp = i915_pdp_entry(pml4, i); + + if (px_base(pdp) == &ppgtt->vm.scratch_pdp) + continue; + + gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); + } + + free_px(&ppgtt->vm, pml4); +} + +static void gen8_ppgtt_cleanup(struct i915_address_space *vm) +{ + struct drm_i915_private *i915 = vm->i915; + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + + if (intel_vgpu_active(i915)) + gen8_ppgtt_notify_vgt(ppgtt, false); + + if (i915_vm_is_4lvl(vm)) + gen8_ppgtt_cleanup_4lvl(ppgtt); + else + gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd); + + free_scratch(vm); +} + /* Removes entries from a single page table, releasing it if it's empty. * Caller can use the return value to update higher-level entries. */ @@ -916,95 +1014,265 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, } } -static inline struct sgt_dma { - struct scatterlist *sg; - dma_addr_t dma, max; -} sgt_dma(struct i915_vma *vma) { - struct scatterlist *sg = vma->pages->sgl; - dma_addr_t addr = sg_dma_address(sg); - return (struct sgt_dma) { sg, addr, addr + sg->length }; -} - -struct gen8_insert_pte { - u16 pml4e; - u16 pdpe; - u16 pde; - u16 pte; -}; -static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) +static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, + struct i915_page_directory *pd, + u64 start, u64 length) { - return (struct gen8_insert_pte) { - gen8_pml4e_index(start), - gen8_pdpe_index(start), - gen8_pde_index(start), - gen8_pte_index(start), - }; -} + struct i915_page_table *pt, *alloc = NULL; + u64 from = start; + unsigned int pde; + int ret = 0; -static __always_inline bool -gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, - struct i915_page_directory *pdp, - struct sgt_dma *iter, - struct gen8_insert_pte *idx, - enum i915_cache_level cache_level, - u32 flags) -{ - struct i915_page_directory *pd; - const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); - gen8_pte_t *vaddr; - bool ret; + spin_lock(&pd->lock); + gen8_for_each_pde(pt, pd, start, length, pde) { + const int count = gen8_pte_count(start, length); - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); - pd = i915_pd_entry(pdp, idx->pdpe); - vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); - do { - vaddr[idx->pte] = pte_encode | iter->dma; + if (px_base(pt) == &vm->scratch_pt) { + spin_unlock(&pd->lock); - iter->dma += I915_GTT_PAGE_SIZE; - if (iter->dma >= iter->max) { - iter->sg = __sg_next(iter->sg); - if (!iter->sg) { - ret = false; - break; + pt = fetch_and_zero(&alloc); + if (!pt) + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto unwind; } - iter->dma = sg_dma_address(iter->sg); - iter->max = iter->dma + iter->sg->length; + if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) + fill_px(pt, vm->scratch_pte); + + spin_lock(&pd->lock); + if (pd->entry[pde] == &vm->scratch_pt) { + set_pd_entry(pd, pde, pt); + } else { + alloc = pt; + pt = pd->entry[pde]; + } } - if (++idx->pte == GEN8_PTES) { - idx->pte = 0; + atomic_add(count, &pt->used); + } + spin_unlock(&pd->lock); + goto out; - if (++idx->pde == I915_PDES) { - idx->pde = 0; +unwind: + gen8_ppgtt_clear_pd(vm, pd, from, start - from); +out: + if (alloc) + free_px(vm, alloc); + return ret; +} - /* Limited by sg length for 3lvl */ - if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { - idx->pdpe = 0; - ret = true; - break; - } +static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, + struct i915_page_directory *pdp, + u64 start, u64 length) +{ + struct i915_page_directory *pd, *alloc = NULL; + u64 from = start; + unsigned int pdpe; + int ret = 0; - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); - pd = pdp->entry[idx->pdpe]; + spin_lock(&pdp->lock); + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { + if (px_base(pd) == &vm->scratch_pd) { + spin_unlock(&pdp->lock); + + pd = fetch_and_zero(&alloc); + if (!pd) + pd = alloc_pd(vm); + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); + goto unwind; } - kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); + init_pd(pd, &vm->scratch_pt); + + spin_lock(&pdp->lock); + if (pdp->entry[pdpe] == &vm->scratch_pd) { + set_pd_entry(pdp, pdpe, pd); + } else { + alloc = pd; + pd = pdp->entry[pdpe]; + } } - } while (1); - kunmap_atomic(vaddr); + atomic_inc(px_used(pd)); + spin_unlock(&pdp->lock); + + ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); + if (unlikely(ret)) + goto unwind_pd; + + spin_lock(&pdp->lock); + atomic_dec(px_used(pd)); + } + spin_unlock(&pdp->lock); + goto out; +unwind_pd: + if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd)) + free_px(vm, pd); +unwind: + gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); +out: + if (alloc) + free_px(vm, alloc); return ret; } -static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, - struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, + u64 start, u64 length) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + return gen8_ppgtt_alloc_pdp(vm, + i915_vm_to_ppgtt(vm)->pd, start, length); +} + +static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, + u64 start, u64 length) +{ + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + struct i915_page_directory * const pml4 = ppgtt->pd; + struct i915_page_directory *pdp, *alloc = NULL; + u64 from = start; + int ret = 0; + u32 pml4e; + + spin_lock(&pml4->lock); + gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { + if (px_base(pdp) == &vm->scratch_pdp) { + spin_unlock(&pml4->lock); + + pdp = fetch_and_zero(&alloc); + if (!pdp) + pdp = alloc_pd(vm); + if (IS_ERR(pdp)) { + ret = PTR_ERR(pdp); + goto unwind; + } + + init_pd(pdp, &vm->scratch_pd); + + spin_lock(&pml4->lock); + if (pml4->entry[pml4e] == &vm->scratch_pdp) { + set_pd_entry(pml4, pml4e, pdp); + } else { + alloc = pdp; + pdp = pml4->entry[pml4e]; + } + } + atomic_inc(px_used(pdp)); + spin_unlock(&pml4->lock); + + ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); + if (unlikely(ret)) + goto unwind_pdp; + + spin_lock(&pml4->lock); + atomic_dec(px_used(pdp)); + } + spin_unlock(&pml4->lock); + goto out; + +unwind_pdp: + if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp)) + free_px(vm, pdp); +unwind: + gen8_ppgtt_clear_4lvl(vm, from, start - from); +out: + if (alloc) + free_px(vm, alloc); + return ret; +} + +static inline struct sgt_dma { + struct scatterlist *sg; + dma_addr_t dma, max; +} sgt_dma(struct i915_vma *vma) { + struct scatterlist *sg = vma->pages->sgl; + dma_addr_t addr = sg_dma_address(sg); + return (struct sgt_dma) { sg, addr, addr + sg->length }; +} + +struct gen8_insert_pte { + u16 pml4e; + u16 pdpe; + u16 pde; + u16 pte; +}; + +static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) +{ + return (struct gen8_insert_pte) { + gen8_pml4e_index(start), + gen8_pdpe_index(start), + gen8_pde_index(start), + gen8_pte_index(start), + }; +} + +static __always_inline bool +gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, + struct i915_page_directory *pdp, + struct sgt_dma *iter, + struct gen8_insert_pte *idx, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_page_directory *pd; + const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); + gen8_pte_t *vaddr; + bool ret; + + GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); + pd = i915_pd_entry(pdp, idx->pdpe); + vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); + do { + vaddr[idx->pte] = pte_encode | iter->dma; + + iter->dma += I915_GTT_PAGE_SIZE; + if (iter->dma >= iter->max) { + iter->sg = __sg_next(iter->sg); + if (!iter->sg) { + ret = false; + break; + } + + iter->dma = sg_dma_address(iter->sg); + iter->max = iter->dma + iter->sg->length; + } + + if (++idx->pte == GEN8_PTES) { + idx->pte = 0; + + if (++idx->pde == I915_PDES) { + idx->pde = 0; + + /* Limited by sg length for 3lvl */ + if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { + idx->pdpe = 0; + ret = true; + break; + } + + GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); + pd = pdp->entry[idx->pdpe]; + } + + kunmap_atomic(vaddr); + vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); + } + } while (1); + kunmap_atomic(vaddr); + + return ret; +} + +static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) +{ + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); @@ -1160,17 +1428,6 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, } } -static void gen8_free_page_tables(struct i915_address_space *vm, - struct i915_page_directory *pd) -{ - int i; - - for (i = 0; i < I915_PDES; i++) { - if (pd->entry[i] != &vm->scratch_pt) - free_pd(vm, pd->entry[i]); - } -} - static int gen8_init_scratch(struct i915_address_space *vm) { int ret; @@ -1237,262 +1494,6 @@ free_scratch_page: return ret; } -static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) -{ - struct i915_address_space *vm = &ppgtt->vm; - struct drm_i915_private *dev_priv = vm->i915; - enum vgt_g2v_type msg; - int i; - - if (create) - atomic_inc(px_used(ppgtt->pd)); /* never remove */ - else - atomic_dec(px_used(ppgtt->pd)); - - if (i915_vm_is_4lvl(vm)) { - const u64 daddr = px_dma(ppgtt->pd); - - I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); - - msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); - } else { - for (i = 0; i < GEN8_3LVL_PDPES; i++) { - const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); - - I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); - I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); - } - - msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : - VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); - } - - I915_WRITE(vgtif_reg(g2v_notify), msg); - - return 0; -} - -static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, - struct i915_page_directory *pdp) -{ - const unsigned int pdpes = i915_pdpes_per_pdp(vm); - int i; - - for (i = 0; i < pdpes; i++) { - if (pdp->entry[i] == &vm->scratch_pd) - continue; - - gen8_free_page_tables(vm, pdp->entry[i]); - free_pd(vm, pdp->entry[i]); - } - - free_px(vm, pdp); -} - -static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) -{ - struct i915_page_directory * const pml4 = ppgtt->pd; - int i; - - for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { - struct i915_page_directory *pdp = i915_pdp_entry(pml4, i); - - if (px_base(pdp) == &ppgtt->vm.scratch_pdp) - continue; - - gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); - } - - free_px(&ppgtt->vm, pml4); -} - -static void gen8_ppgtt_cleanup(struct i915_address_space *vm) -{ - struct drm_i915_private *i915 = vm->i915; - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - - if (intel_vgpu_active(i915)) - gen8_ppgtt_notify_vgt(ppgtt, false); - - if (i915_vm_is_4lvl(vm)) - gen8_ppgtt_cleanup_4lvl(ppgtt); - else - gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd); - - free_scratch(vm); -} - -static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, - struct i915_page_directory *pd, - u64 start, u64 length) -{ - struct i915_page_table *pt, *alloc = NULL; - u64 from = start; - unsigned int pde; - int ret = 0; - - spin_lock(&pd->lock); - gen8_for_each_pde(pt, pd, start, length, pde) { - const int count = gen8_pte_count(start, length); - - if (px_base(pt) == &vm->scratch_pt) { - spin_unlock(&pd->lock); - - pt = fetch_and_zero(&alloc); - if (!pt) - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto unwind; - } - - if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) - fill_px(pt, vm->scratch_pte); - - spin_lock(&pd->lock); - if (pd->entry[pde] == &vm->scratch_pt) { - set_pd_entry(pd, pde, pt); - } else { - alloc = pt; - pt = pd->entry[pde]; - } - } - - atomic_add(count, &pt->used); - } - spin_unlock(&pd->lock); - goto out; - -unwind: - gen8_ppgtt_clear_pd(vm, pd, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - return ret; -} - -static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, - struct i915_page_directory *pdp, - u64 start, u64 length) -{ - struct i915_page_directory *pd, *alloc = NULL; - u64 from = start; - unsigned int pdpe; - int ret = 0; - - spin_lock(&pdp->lock); - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - if (px_base(pd) == &vm->scratch_pd) { - spin_unlock(&pdp->lock); - - pd = fetch_and_zero(&alloc); - if (!pd) - pd = alloc_pd(vm); - if (IS_ERR(pd)) { - ret = PTR_ERR(pd); - goto unwind; - } - - init_pd(pd, &vm->scratch_pt); - - spin_lock(&pdp->lock); - if (pdp->entry[pdpe] == &vm->scratch_pd) { - set_pd_entry(pdp, pdpe, pd); - } else { - alloc = pd; - pd = pdp->entry[pdpe]; - } - } - atomic_inc(px_used(pd)); - spin_unlock(&pdp->lock); - - ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); - if (unlikely(ret)) - goto unwind_pd; - - spin_lock(&pdp->lock); - atomic_dec(px_used(pd)); - } - spin_unlock(&pdp->lock); - goto out; - -unwind_pd: - if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd)) - free_px(vm, pd); -unwind: - gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - return ret; -} - -static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, - u64 start, u64 length) -{ - return gen8_ppgtt_alloc_pdp(vm, - i915_vm_to_ppgtt(vm)->pd, start, length); -} - -static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, - u64 start, u64 length) -{ - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory * const pml4 = ppgtt->pd; - struct i915_page_directory *pdp, *alloc = NULL; - u64 from = start; - int ret = 0; - u32 pml4e; - - spin_lock(&pml4->lock); - gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (px_base(pdp) == &vm->scratch_pdp) { - spin_unlock(&pml4->lock); - - pdp = fetch_and_zero(&alloc); - if (!pdp) - pdp = alloc_pd(vm); - if (IS_ERR(pdp)) { - ret = PTR_ERR(pdp); - goto unwind; - } - - init_pd(pdp, &vm->scratch_pd); - - spin_lock(&pml4->lock); - if (pml4->entry[pml4e] == &vm->scratch_pdp) { - set_pd_entry(pml4, pml4e, pdp); - } else { - alloc = pdp; - pdp = pml4->entry[pml4e]; - } - } - atomic_inc(px_used(pdp)); - spin_unlock(&pml4->lock); - - ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); - if (unlikely(ret)) - goto unwind_pdp; - - spin_lock(&pml4->lock); - atomic_dec(px_used(pdp)); - } - spin_unlock(&pml4->lock); - goto out; - -unwind_pdp: - if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp)) - free_px(vm, pdp); -unwind: - gen8_ppgtt_clear_4lvl(vm, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - return ret; -} - static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->vm; -- cgit v1.2.3 From 18c7962b8cf24a5a2a757db16f535ff8c5191966 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 10:43:24 +0100 Subject: drm/i915/gtt: Markup i915_ppgtt height This will be useful to consolidate recursive code. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190712094327.24437-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ drivers/gpu/drm/i915/i915_gem_gtt.h | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index abb2e3e4bbbc..4544af1d3a3c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1598,6 +1598,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) return ERR_PTR(-ENOMEM); ppgtt_init(ppgtt, &i915->gt); + ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2; /* * From bdw, there is hw support for read-only pages in the PPGTT. @@ -2084,6 +2085,7 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) return ERR_PTR(-ENOMEM); ppgtt_init(&ppgtt->base, &i915->gt); + ppgtt->base.vm.top = 1; ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 48bb8c5125e3..36162bb7561b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -323,6 +323,7 @@ struct i915_address_space { struct i915_page_dma scratch_pt; struct i915_page_dma scratch_pd; struct i915_page_dma scratch_pdp; /* GEN8+ & 48b PPGTT */ + unsigned int top; /** * List of vma currently bound. -- cgit v1.2.3 From 2776326457562f971b7b38f4683e298e55252ccc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 10:43:25 +0100 Subject: drm/i915/gtt: Compute the radix for gen8 page table levels The radix levels of each page directory are easily determined so replace the numerous hardcoded constants with precomputed derived constants. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190712094327.24437-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 42 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem_gtt.h | 1 - 2 files changed, 42 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4544af1d3a3c..1a60403e1a70 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -867,6 +867,48 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) return 0; } +/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ +#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */ +#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE)) +#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64)) +#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES)) +#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl)) +#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl)) +#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl)) + +static inline unsigned int +gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx) +{ + const int shift = gen8_pd_shift(lvl); + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); + + GEM_BUG_ON(start >= end); + end += ~mask >> gen8_pd_shift(1); + + *idx = i915_pde_index(start, shift); + if ((start ^ end) & mask) + return GEN8_PDES - *idx; + else + return i915_pde_index(end, shift) - *idx; +} + +static inline bool gen8_pd_contains(u64 start, u64 end, int lvl) +{ + const u64 mask = ~0ull << gen8_pd_shift(lvl + 1); + + GEM_BUG_ON(start >= end); + return (start ^ end) & mask && (start & ~mask) == 0; +} + +static inline unsigned int gen8_pt_count(u64 start, u64 end) +{ + GEM_BUG_ON(start >= end); + if ((start ^ end) >> gen8_pd_shift(1)) + return GEN8_PDES - (start & (GEN8_PDES - 1)); + else + return end - start; +} + static void gen8_free_page_tables(struct i915_address_space *vm, struct i915_page_directory *pd) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 36162bb7561b..fb33f251ef9a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -124,7 +124,6 @@ typedef u64 gen8_pte_t; #define GEN8_3LVL_PDPES 4 #define GEN8_PDE_SHIFT 21 #define GEN8_PDE_MASK 0x1ff -#define GEN8_PTE_SHIFT 12 #define GEN8_PTE_MASK 0x1ff #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) -- cgit v1.2.3 From c03cbe4c0a838ce03266a8c579ef8d13181907b6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 10:43:26 +0100 Subject: drm/i915/gtt: Convert vm->scratch into an array Each level has its own scratch. Make the levels more obvious by forgoing the fancy similarly names and replace them with a number. 0 is the bottom most level, the physical page used for actual data; 1+ are the page directories. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190712094327.24437-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 205 ++++++++++++++++-------------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 16 +-- 2 files changed, 100 insertions(+), 121 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1a60403e1a70..16bcf2163ae0 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -633,7 +633,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL; do { - int order = get_order(size); + unsigned int order = get_order(size); struct page *page; dma_addr_t addr; @@ -652,8 +652,8 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!IS_ALIGNED(addr, size))) goto unmap_page; - vm->scratch_page.page = page; - vm->scratch_page.daddr = addr; + vm->scratch[0].base.page = page; + vm->scratch[0].base.daddr = addr; vm->scratch_order = order; return 0; @@ -672,8 +672,8 @@ skip: static void cleanup_scratch_page(struct i915_address_space *vm) { - struct i915_page_dma *p = &vm->scratch_page; - int order = vm->scratch_order; + struct i915_page_dma *p = px_base(&vm->scratch[0]); + unsigned int order = vm->scratch_order; dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, PCI_DMA_BIDIRECTIONAL); @@ -682,15 +682,16 @@ static void cleanup_scratch_page(struct i915_address_space *vm) static void free_scratch(struct i915_address_space *vm) { - if (!vm->scratch_page.daddr) /* set to 0 on clones */ + int i; + + if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */ return; - if (vm->scratch_pdp.daddr) - cleanup_page_dma(vm, &vm->scratch_pdp); - if (vm->scratch_pd.daddr) - cleanup_page_dma(vm, &vm->scratch_pd); - if (vm->scratch_pt.daddr) - cleanup_page_dma(vm, &vm->scratch_pt); + for (i = 1; i <= vm->top; i++) { + if (!px_dma(&vm->scratch[i])) + break; + cleanup_page_dma(vm, px_base(&vm->scratch[i])); + } cleanup_scratch_page(vm); } @@ -752,9 +753,9 @@ static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) #define free_px(vm, px) free_pd(vm, px_base(px)) static void init_pd(struct i915_page_directory *pd, - struct i915_page_dma *scratch) + struct i915_page_scratch *scratch) { - fill_px(pd, gen8_pde_encode(scratch->daddr, I915_CACHE_LLC)); + fill_px(pd, scratch->encode); memset_p(pd->entry, scratch, 512); } @@ -782,30 +783,26 @@ __set_pd_entry(struct i915_page_directory * const pd, write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC)); } +#define set_pd_entry(pd, pde, to) \ + __set_pd_entry((pd), (pde), px_base(to), gen8_pde_encode) + static inline void -__clear_pd_entry(struct i915_page_directory * const pd, - const unsigned short pde, - struct i915_page_dma * const to, - u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) +clear_pd_entry(struct i915_page_directory * const pd, + const unsigned short pde, + struct i915_page_scratch * const scratch) { GEM_BUG_ON(atomic_read(px_used(pd)) == 0); - write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC)); - pd->entry[pde] = to; + write_dma_entry(px_base(pd), pde, scratch->encode); + pd->entry[pde] = scratch; atomic_dec(px_used(pd)); } -#define set_pd_entry(pd, pde, to) \ - __set_pd_entry((pd), (pde), px_base(to), gen8_pde_encode) - -#define clear_pd_entry(pd, pde, to) \ - __clear_pd_entry((pd), (pde), (to), gen8_pde_encode) - static bool release_pd_entry(struct i915_page_directory * const pd, const unsigned short pde, struct i915_page_table * const pt, - struct i915_page_dma * const scratch) + struct i915_page_scratch * const scratch) { bool free = false; @@ -915,7 +912,7 @@ static void gen8_free_page_tables(struct i915_address_space *vm, int i; for (i = 0; i < I915_PDES; i++) { - if (pd->entry[i] != &vm->scratch_pt) + if (pd->entry[i] != &vm->scratch[1]) free_pd(vm, pd->entry[i]); } } @@ -927,7 +924,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, int i; for (i = 0; i < pdpes; i++) { - if (pdp->entry[i] == &vm->scratch_pd) + if (pdp->entry[i] == &vm->scratch[2]) continue; gen8_free_page_tables(vm, pdp->entry[i]); @@ -945,7 +942,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { struct i915_page_directory *pdp = i915_pdp_entry(pml4, i); - if (px_base(pdp) == &ppgtt->vm.scratch_pdp) + if (px_base(pdp) == px_base(&ppgtt->vm.scratch[3])) continue; gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); @@ -981,7 +978,9 @@ static void gen8_ppgtt_clear_pt(const struct i915_address_space *vm, gen8_pte_t *vaddr; vaddr = kmap_atomic_px(pt); - memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries); + memset64(vaddr + gen8_pte_index(start), + vm->scratch[0].encode, + num_entries); kunmap_atomic(vaddr); GEM_BUG_ON(num_entries > atomic_read(&pt->used)); @@ -997,11 +996,11 @@ static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, u32 pde; gen8_for_each_pde(pt, pd, start, length, pde) { - GEM_BUG_ON(px_base(pt) == &vm->scratch_pt); + GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); atomic_inc(&pt->used); gen8_ppgtt_clear_pt(vm, pt, start, length); - if (release_pd_entry(pd, pde, pt, &vm->scratch_pt)) + if (release_pd_entry(pd, pde, pt, &vm->scratch[1])) free_px(vm, pt); } } @@ -1017,11 +1016,11 @@ static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, unsigned int pdpe; gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - GEM_BUG_ON(px_base(pd) == &vm->scratch_pd); + GEM_BUG_ON(px_base(pd) == px_base(&vm->scratch[2])); atomic_inc(px_used(pd)); gen8_ppgtt_clear_pd(vm, pd, start, length); - if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd)) + if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch[2])) free_px(vm, pd); } } @@ -1047,16 +1046,15 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, GEM_BUG_ON(!i915_vm_is_4lvl(vm)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - GEM_BUG_ON(px_base(pdp) == &vm->scratch_pdp); + GEM_BUG_ON(px_base(pdp) == px_base(&vm->scratch[3])); atomic_inc(px_used(pdp)); gen8_ppgtt_clear_pdp(vm, pdp, start, length); - if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp)) + if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch[3])) free_px(vm, pdp); } } - static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, struct i915_page_directory *pd, u64 start, u64 length) @@ -1070,7 +1068,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, gen8_for_each_pde(pt, pd, start, length, pde) { const int count = gen8_pte_count(start, length); - if (px_base(pt) == &vm->scratch_pt) { + if (px_base(pt) == px_base(&vm->scratch[1])) { spin_unlock(&pd->lock); pt = fetch_and_zero(&alloc); @@ -1082,10 +1080,10 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, } if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) - fill_px(pt, vm->scratch_pte); + fill_px(pt, vm->scratch[0].encode); spin_lock(&pd->lock); - if (pd->entry[pde] == &vm->scratch_pt) { + if (pd->entry[pde] == &vm->scratch[1]) { set_pd_entry(pd, pde, pt); } else { alloc = pt; @@ -1117,7 +1115,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, spin_lock(&pdp->lock); gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - if (px_base(pd) == &vm->scratch_pd) { + if (px_base(pd) == px_base(&vm->scratch[2])) { spin_unlock(&pdp->lock); pd = fetch_and_zero(&alloc); @@ -1128,10 +1126,10 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto unwind; } - init_pd(pd, &vm->scratch_pt); + init_pd(pd, &vm->scratch[1]); spin_lock(&pdp->lock); - if (pdp->entry[pdpe] == &vm->scratch_pd) { + if (pdp->entry[pdpe] == &vm->scratch[2]) { set_pd_entry(pdp, pdpe, pd); } else { alloc = pd; @@ -1152,7 +1150,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto out; unwind_pd: - if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch_pd)) + if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch[2])) free_px(vm, pd); unwind: gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); @@ -1181,7 +1179,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, spin_lock(&pml4->lock); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (px_base(pdp) == &vm->scratch_pdp) { + if (px_base(pdp) == px_base(&vm->scratch[3])) { spin_unlock(&pml4->lock); pdp = fetch_and_zero(&alloc); @@ -1192,10 +1190,10 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto unwind; } - init_pd(pdp, &vm->scratch_pd); + init_pd(pdp, &vm->scratch[2]); spin_lock(&pml4->lock); - if (pml4->entry[pml4e] == &vm->scratch_pdp) { + if (pml4->entry[pml4e] == &vm->scratch[3]) { set_pd_entry(pml4, pml4e, pdp); } else { alloc = pdp; @@ -1216,7 +1214,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto out; unwind_pdp: - if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch_pdp)) + if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch[3])) free_px(vm, pdp); unwind: gen8_ppgtt_clear_4lvl(vm, from, start - from); @@ -1430,7 +1428,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { u16 i; - encode = vma->vm->scratch_pte; + encode = vma->vm->scratch[0].encode; vaddr = kmap_atomic_px(i915_pt_entry(pd, idx.pde)); @@ -1473,6 +1471,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, static int gen8_init_scratch(struct i915_address_space *vm) { int ret; + int i; /* * If everybody agrees to not to write into the scratch page, @@ -1486,10 +1485,8 @@ static int gen8_init_scratch(struct i915_address_space *vm) GEM_BUG_ON(!clone->has_read_only); vm->scratch_order = clone->scratch_order; - vm->scratch_pte = clone->scratch_pte; - vm->scratch_pt = clone->scratch_pt; - vm->scratch_pd = clone->scratch_pd; - vm->scratch_pdp = clone->scratch_pdp; + memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch)); + px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */ return 0; } @@ -1497,43 +1494,25 @@ static int gen8_init_scratch(struct i915_address_space *vm) if (ret) return ret; - vm->scratch_pte = - gen8_pte_encode(vm->scratch_page.daddr, - I915_CACHE_LLC, - vm->has_read_only); + vm->scratch[0].encode = + gen8_pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_LLC, vm->has_read_only); - if (unlikely(setup_page_dma(vm, &vm->scratch_pt))) { - ret = -ENOMEM; - goto free_scratch_page; - } - fill_px(&vm->scratch_pt, vm->scratch_pte); + for (i = 1; i <= vm->top; i++) { + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i])))) + goto free_scratch; - if (unlikely(setup_page_dma(vm, &vm->scratch_pd))) { - ret = -ENOMEM; - goto free_pt; - } - fill_px(&vm->scratch_pd, - gen8_pde_encode(vm->scratch_pt.daddr, I915_CACHE_LLC)); - - if (i915_vm_is_4lvl(vm)) { - if (unlikely(setup_page_dma(vm, &vm->scratch_pdp))) { - ret = -ENOMEM; - goto free_pd; - } - fill_px(&vm->scratch_pdp, - gen8_pde_encode(vm->scratch_pd.daddr, I915_CACHE_LLC)); + fill_px(&vm->scratch[i], vm->scratch[i - 1].encode); + vm->scratch[i].encode = + gen8_pde_encode(px_dma(&vm->scratch[i]), + I915_CACHE_LLC); } return 0; -free_pd: - cleanup_page_dma(vm, &vm->scratch_pd); -free_pt: - cleanup_page_dma(vm, &vm->scratch_pt); -free_scratch_page: - cleanup_scratch_page(vm); - - return ret; +free_scratch: + free_scratch(vm); + return -ENOMEM; } static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) @@ -1550,7 +1529,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) if (IS_ERR(pd)) goto unwind; - init_pd(pd, &vm->scratch_pt); + init_pd(pd, &vm->scratch[1]); set_pd_entry(pdp, pdpe, pd); } @@ -1581,16 +1560,15 @@ static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) static void init_pd_n(struct i915_address_space *vm, struct i915_page_directory *pd, - struct i915_page_dma *to, + struct i915_page_scratch *scratch, const unsigned int entries) { - const u64 daddr = gen8_pde_encode(to->daddr, I915_CACHE_LLC); u64 * const vaddr = kmap_atomic_px(pd); - memset64(vaddr, daddr, entries); + memset64(vaddr, scratch->encode, entries); kunmap_atomic(vaddr); - memset_p(pd->entry, to, entries); + memset_p(pd->entry, scratch, entries); } static struct i915_page_directory * @@ -1601,7 +1579,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) if (i915_vm_is_4lvl(vm)) { pd = alloc_pd(vm); if (!IS_ERR(pd)) - init_pd(pd, &vm->scratch_pdp); + init_pd(pd, &vm->scratch[3]); return pd; } @@ -1618,7 +1596,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) return ERR_PTR(-ENOMEM); } - init_pd_n(vm, pd, &vm->scratch_pd, GEN8_3LVL_PDPES); + init_pd_n(vm, pd, &vm->scratch[2], GEN8_3LVL_PDPES); return pd; } @@ -1766,7 +1744,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, { struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); const unsigned int first_entry = start / I915_GTT_PAGE_SIZE; - const gen6_pte_t scratch_pte = vm->scratch_pte; + const gen6_pte_t scratch_pte = vm->scratch[0].encode; unsigned int pde = first_entry / GEN6_PTES; unsigned int pte = first_entry % GEN6_PTES; unsigned int num_entries = length / I915_GTT_PAGE_SIZE; @@ -1777,7 +1755,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, const unsigned int count = min(num_entries, GEN6_PTES - pte); gen6_pte_t *vaddr; - GEM_BUG_ON(px_base(pt) == &vm->scratch_pt); + GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); num_entries -= count; @@ -1814,7 +1792,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sgt_dma iter = sgt_dma(vma); gen6_pte_t *vaddr; - GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch_pt); + GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { @@ -1859,7 +1837,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, gen6_for_each_pde(pt, pd, start, length, pde) { const unsigned int count = gen6_pte_count(start, length); - if (px_base(pt) == &vm->scratch_pt) { + if (px_base(pt) == px_base(&vm->scratch[1])) { spin_unlock(&pd->lock); pt = fetch_and_zero(&alloc); @@ -1870,10 +1848,10 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, goto unwind_out; } - fill32_px(pt, vm->scratch_pte); + fill32_px(pt, vm->scratch[0].encode); spin_lock(&pd->lock); - if (pd->entry[pde] == &vm->scratch_pt) { + if (pd->entry[pde] == &vm->scratch[1]) { pd->entry[pde] = pt; if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { @@ -1910,26 +1888,23 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) { struct i915_address_space * const vm = &ppgtt->base.vm; struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_table *unused; - u32 pde; int ret; ret = setup_scratch_page(vm, __GFP_HIGHMEM); if (ret) return ret; - vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr, - I915_CACHE_NONE, - PTE_READ_ONLY); + vm->scratch[0].encode = + vm->pte_encode(px_dma(&vm->scratch[0]), + I915_CACHE_NONE, PTE_READ_ONLY); - if (unlikely(setup_page_dma(vm, &vm->scratch_pt))) { + if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) { cleanup_scratch_page(vm); return -ENOMEM; } - fill32_px(&vm->scratch_pt, vm->scratch_pte); - gen6_for_all_pdes(unused, pd, pde) - pd->entry[pde] = &vm->scratch_pt; + fill32_px(&vm->scratch[1], vm->scratch[0].encode); + memset_p(pd->entry, &vm->scratch[1], I915_PDES); return 0; } @@ -1937,11 +1912,13 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt) static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) { struct i915_page_directory * const pd = ppgtt->base.pd; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); struct i915_page_table *pt; u32 pde; gen6_for_all_pdes(pt, pd, pde) - if (px_base(pt) != &ppgtt->base.vm.scratch_pt) + if (px_base(pt) != scratch) free_px(&ppgtt->base.vm, pt); } @@ -1999,7 +1976,8 @@ static void pd_vma_unbind(struct i915_vma *vma) { struct gen6_ppgtt *ppgtt = vma->private; struct i915_page_directory * const pd = ppgtt->base.pd; - struct i915_page_dma * const scratch = &ppgtt->base.vm.scratch_pt; + struct i915_page_dma * const scratch = + px_base(&ppgtt->base.vm.scratch[1]); struct i915_page_table *pt; unsigned int pde; @@ -2405,7 +2383,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); unsigned first_entry = start / I915_GTT_PAGE_SIZE; unsigned num_entries = length / I915_GTT_PAGE_SIZE; - const gen8_pte_t scratch_pte = vm->scratch_pte; + const gen8_pte_t scratch_pte = vm->scratch[0].encode; gen8_pte_t __iomem *gtt_base = (gen8_pte_t __iomem *)ggtt->gsm + first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry; @@ -2530,8 +2508,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = vm->scratch_pte; - + scratch_pte = vm->scratch[0].encode; for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); } @@ -3005,8 +2982,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return ret; } - ggtt->vm.scratch_pte = - ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr, + ggtt->vm.scratch[0].encode = + ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]), I915_CACHE_NONE, 0); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index fb33f251ef9a..1a43ac4a9249 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -239,6 +239,11 @@ struct i915_page_dma { }; }; +struct i915_page_scratch { + struct i915_page_dma base; + u64 encode; +}; + struct i915_page_table { struct i915_page_dma base; atomic_t used; @@ -259,9 +264,10 @@ struct i915_page_directory { #define px_base(px) \ __px_choose_expr(px, struct i915_page_dma *, __x, \ + __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \ __px_choose_expr(px, struct i915_page_table *, &__x->base, \ __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \ - (void)0))) + (void)0)))) #define px_dma(px) (px_base(px)->daddr) #define px_pt(px) \ @@ -316,12 +322,8 @@ struct i915_address_space { #define VM_CLASS_GGTT 0 #define VM_CLASS_PPGTT 1 - u64 scratch_pte; - int scratch_order; - struct i915_page_dma scratch_page; - struct i915_page_dma scratch_pt; - struct i915_page_dma scratch_pd; - struct i915_page_dma scratch_pdp; /* GEN8+ & 48b PPGTT */ + struct i915_page_scratch scratch[4]; + unsigned int scratch_order; unsigned int top; /** -- cgit v1.2.3 From 6239901c576d4df87d6463c4f163bd432995d540 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 10:43:27 +0100 Subject: drm/i915/gtt: Use NULL to encode scratch shadow entries We can simplify our gtt walking code by comparing against NULL for scratch entries as opposed to looking up the distinct per-level scratch pointer. The only caveat is to remember to protect external parties and map the NULL to the scratch top pd. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190712094327.24437-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 107 +++++++++++------------------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- 2 files changed, 33 insertions(+), 76 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 16bcf2163ae0..753090a7729e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -710,7 +710,6 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) } atomic_set(&pt->used, 0); - return pt; } @@ -718,13 +717,11 @@ static struct i915_page_directory *__alloc_pd(void) { struct i915_page_directory *pd; - pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); + pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); if (unlikely(!pd)) return NULL; - atomic_set(px_used(pd), 0); spin_lock_init(&pd->lock); - return pd; } @@ -752,63 +749,56 @@ static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) #define free_px(vm, px) free_pd(vm, px_base(px)) -static void init_pd(struct i915_page_directory *pd, - struct i915_page_scratch *scratch) -{ - fill_px(pd, scratch->encode); - memset_p(pd->entry, scratch, 512); -} - static inline void write_dma_entry(struct i915_page_dma * const pdma, - const unsigned short pde, + const unsigned short idx, const u64 encoded_entry) { u64 * const vaddr = kmap_atomic(pdma->page); - vaddr[pde] = encoded_entry; + vaddr[idx] = encoded_entry; kunmap_atomic(vaddr); } static inline void __set_pd_entry(struct i915_page_directory * const pd, - const unsigned short pde, + const unsigned short idx, struct i915_page_dma * const to, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { GEM_BUG_ON(atomic_read(px_used(pd)) > 512); atomic_inc(px_used(pd)); - pd->entry[pde] = to; - write_dma_entry(px_base(pd), pde, encode(to->daddr, I915_CACHE_LLC)); + pd->entry[idx] = to; + write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC)); } -#define set_pd_entry(pd, pde, to) \ - __set_pd_entry((pd), (pde), px_base(to), gen8_pde_encode) +#define set_pd_entry(pd, idx, to) \ + __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode) static inline void clear_pd_entry(struct i915_page_directory * const pd, - const unsigned short pde, - struct i915_page_scratch * const scratch) + const unsigned short idx, + const struct i915_page_scratch * const scratch) { GEM_BUG_ON(atomic_read(px_used(pd)) == 0); - write_dma_entry(px_base(pd), pde, scratch->encode); - pd->entry[pde] = scratch; + write_dma_entry(px_base(pd), idx, scratch->encode); + pd->entry[idx] = NULL; atomic_dec(px_used(pd)); } static bool release_pd_entry(struct i915_page_directory * const pd, - const unsigned short pde, + const unsigned short idx, struct i915_page_table * const pt, - struct i915_page_scratch * const scratch) + const struct i915_page_scratch * const scratch) { bool free = false; spin_lock(&pd->lock); if (atomic_dec_and_test(&pt->used)) { - clear_pd_entry(pd, pde, scratch); + clear_pd_entry(pd, idx, scratch); free = true; } spin_unlock(&pd->lock); @@ -912,7 +902,7 @@ static void gen8_free_page_tables(struct i915_address_space *vm, int i; for (i = 0; i < I915_PDES; i++) { - if (pd->entry[i] != &vm->scratch[1]) + if (pd->entry[i]) free_pd(vm, pd->entry[i]); } } @@ -924,7 +914,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, int i; for (i = 0; i < pdpes; i++) { - if (pdp->entry[i] == &vm->scratch[2]) + if (!pdp->entry[i]) continue; gen8_free_page_tables(vm, pdp->entry[i]); @@ -942,7 +932,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { struct i915_page_directory *pdp = i915_pdp_entry(pml4, i); - if (px_base(pdp) == px_base(&ppgtt->vm.scratch[3])) + if (!pdp) continue; gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); @@ -996,8 +986,6 @@ static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, u32 pde; gen8_for_each_pde(pt, pd, start, length, pde) { - GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); - atomic_inc(&pt->used); gen8_ppgtt_clear_pt(vm, pt, start, length); if (release_pd_entry(pd, pde, pt, &vm->scratch[1])) @@ -1016,8 +1004,6 @@ static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, unsigned int pdpe; gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - GEM_BUG_ON(px_base(pd) == px_base(&vm->scratch[2])); - atomic_inc(px_used(pd)); gen8_ppgtt_clear_pd(vm, pd, start, length); if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch[2])) @@ -1046,8 +1032,6 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, GEM_BUG_ON(!i915_vm_is_4lvl(vm)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - GEM_BUG_ON(px_base(pdp) == px_base(&vm->scratch[3])); - atomic_inc(px_used(pdp)); gen8_ppgtt_clear_pdp(vm, pdp, start, length); if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch[3])) @@ -1068,7 +1052,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, gen8_for_each_pde(pt, pd, start, length, pde) { const int count = gen8_pte_count(start, length); - if (px_base(pt) == px_base(&vm->scratch[1])) { + if (!pt) { spin_unlock(&pd->lock); pt = fetch_and_zero(&alloc); @@ -1083,7 +1067,7 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, fill_px(pt, vm->scratch[0].encode); spin_lock(&pd->lock); - if (pd->entry[pde] == &vm->scratch[1]) { + if (!pd->entry[pde]) { set_pd_entry(pd, pde, pt); } else { alloc = pt; @@ -1115,7 +1099,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, spin_lock(&pdp->lock); gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - if (px_base(pd) == px_base(&vm->scratch[2])) { + if (!pd) { spin_unlock(&pdp->lock); pd = fetch_and_zero(&alloc); @@ -1126,10 +1110,10 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, goto unwind; } - init_pd(pd, &vm->scratch[1]); + fill_px(pd, vm->scratch[1].encode); spin_lock(&pdp->lock); - if (pdp->entry[pdpe] == &vm->scratch[2]) { + if (!pdp->entry[pdpe]) { set_pd_entry(pdp, pdpe, pd); } else { alloc = pd; @@ -1179,7 +1163,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, spin_lock(&pml4->lock); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (px_base(pdp) == px_base(&vm->scratch[3])) { + if (!pdp) { spin_unlock(&pml4->lock); pdp = fetch_and_zero(&alloc); @@ -1190,10 +1174,10 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto unwind; } - init_pd(pdp, &vm->scratch[2]); + fill_px(pdp, vm->scratch[2].encode); spin_lock(&pml4->lock); - if (pml4->entry[pml4e] == &vm->scratch[3]) { + if (!pml4->entry[pml4e]) { set_pd_entry(pml4, pml4e, pdp); } else { alloc = pdp; @@ -1529,7 +1513,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) if (IS_ERR(pd)) goto unwind; - init_pd(pd, &vm->scratch[1]); + fill_px(pd, vm->scratch[1].encode); set_pd_entry(pdp, pdpe, pd); } @@ -1558,46 +1542,19 @@ static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) ppgtt->vm.vma_ops.clear_pages = clear_pages; } -static void init_pd_n(struct i915_address_space *vm, - struct i915_page_directory *pd, - struct i915_page_scratch *scratch, - const unsigned int entries) -{ - u64 * const vaddr = kmap_atomic_px(pd); - - memset64(vaddr, scratch->encode, entries); - kunmap_atomic(vaddr); - - memset_p(pd->entry, scratch, entries); -} - static struct i915_page_directory * gen8_alloc_top_pd(struct i915_address_space *vm) { + const unsigned int count = vm->total >> __gen8_pte_shift(vm->top); struct i915_page_directory *pd; - if (i915_vm_is_4lvl(vm)) { - pd = alloc_pd(vm); - if (!IS_ERR(pd)) - init_pd(pd, &vm->scratch[3]); + GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); + pd = alloc_pd(vm); + if (IS_ERR(pd)) return pd; - } - - /* 3lvl */ - pd = __alloc_pd(); - if (!pd) - return ERR_PTR(-ENOMEM); - - pd->entry[GEN8_3LVL_PDPES] = NULL; - - if (unlikely(setup_page_dma(vm, px_base(pd)))) { - kfree(pd); - return ERR_PTR(-ENOMEM); - } - - init_pd_n(vm, pd, &vm->scratch[2], GEN8_3LVL_PDPES); + fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); return pd; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 1a43ac4a9249..b30ffe333852 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -609,7 +609,7 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) { struct i915_page_dma *pt = ppgtt->pd->entry[n]; - return px_dma(pt); + return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top])); } static inline struct i915_ggtt * -- cgit v1.2.3 From b5893ffc274be966f95aa35f35916fa8725af154 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 12 Jul 2019 13:24:25 +0200 Subject: drm/i915: Drop extern qualifiers from header function prototypes Follow dim checkpatch recommendation so it doesn't complain on that now and again on header file modifications. v2: drop testing leftover (Chris) Signed-off-by: Janusz Krzysztofik Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712112429.740-2-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object.h | 2 +- drivers/gpu/drm/i915/gvt/gtt.h | 13 +++++---- drivers/gpu/drm/i915/i915_drv.h | 47 ++++++++++++++---------------- drivers/gpu/drm/i915/i915_irq.h | 4 +-- drivers/gpu/drm/i915/oa/i915_oa_bdw.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_bxt.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_chv.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_cnl.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_glk.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_hsw.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_icl.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h | 2 +- drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h | 2 +- include/drm/i915_drm.h | 10 +++---- 19 files changed, 51 insertions(+), 53 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 20754c15412a..67aea07ea019 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -81,7 +81,7 @@ i915_gem_object_lookup(struct drm_file *file, u32 handle) } __deprecated -extern struct drm_gem_object * +struct drm_gem_object * drm_gem_object_lookup(struct drm_file *file, u32 handle); __attribute__((nonnull)) diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 42d0394f0de2..88789316807d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -205,17 +205,18 @@ struct intel_vgpu_gtt { struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; }; -extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); -extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); +int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); +void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); -extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +int intel_gvt_init_gtt(struct intel_gvt *gvt); void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); -extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); +void intel_gvt_clean_gtt(struct intel_gvt *gvt); -extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, - int page_table_level, void *root_entry); +struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, + int page_table_level, + void *root_entry); struct intel_vgpu_oos_page { struct intel_vgpu_ppgtt_spt *spt; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 56527a7a1666..1a6b4e14a405 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2390,19 +2390,17 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) #ifdef CONFIG_COMPAT -extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg); +long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #else #define i915_compat_ioctl NULL #endif extern const struct dev_pm_ops i915_pm_ops; -extern int i915_driver_load(struct pci_dev *pdev, - const struct pci_device_id *ent); -extern void i915_driver_unload(struct drm_device *dev); +int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); +void i915_driver_unload(struct drm_device *dev); -extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); -extern void intel_hangcheck_init(struct drm_i915_private *dev_priv); +void intel_engine_init_hangcheck(struct intel_engine_cs *engine); +void intel_hangcheck_init(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv); @@ -2672,14 +2670,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, bool is_master); /* i915_perf.c */ -extern void i915_perf_init(struct drm_i915_private *dev_priv); -extern void i915_perf_fini(struct drm_i915_private *dev_priv); -extern void i915_perf_register(struct drm_i915_private *dev_priv); -extern void i915_perf_unregister(struct drm_i915_private *dev_priv); +void i915_perf_init(struct drm_i915_private *dev_priv); +void i915_perf_fini(struct drm_i915_private *dev_priv); +void i915_perf_register(struct drm_i915_private *dev_priv); +void i915_perf_unregister(struct drm_i915_private *dev_priv); /* i915_suspend.c */ -extern int i915_save_state(struct drm_i915_private *dev_priv); -extern int i915_restore_state(struct drm_i915_private *dev_priv); +int i915_save_state(struct drm_i915_private *dev_priv); +int i915_restore_state(struct drm_i915_private *dev_priv); /* i915_sysfs.c */ void i915_setup_sysfs(struct drm_i915_private *dev_priv); @@ -2693,23 +2691,22 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) } /* modesetting */ -extern void intel_modeset_init_hw(struct drm_device *dev); -extern int intel_modeset_init(struct drm_device *dev); -extern void intel_modeset_cleanup(struct drm_device *dev); -extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, - bool state); -extern void intel_display_resume(struct drm_device *dev); -extern void i915_redisable_vga(struct drm_i915_private *dev_priv); -extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); -extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv); +void intel_modeset_init_hw(struct drm_device *dev); +int intel_modeset_init(struct drm_device *dev); +void intel_modeset_cleanup(struct drm_device *dev); +int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); +void intel_display_resume(struct drm_device *dev); +void i915_redisable_vga(struct drm_i915_private *dev_priv); +void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); +void intel_init_pch_refclk(struct drm_i915_private *dev_priv); int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -extern struct intel_display_error_state * +struct intel_display_error_state * intel_display_capture_error_state(struct drm_i915_private *dev_priv); -extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, - struct intel_display_error_state *error); +void intel_display_print_error_state(struct drm_i915_error_state_buf *e, + struct intel_display_error_state *error); #define __I915_REG_OP(op__, dev_priv__, ...) \ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index d93fa4e75442..4f803f910177 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -13,8 +13,8 @@ struct drm_i915_private; struct intel_crtc; -extern void intel_irq_init(struct drm_i915_private *dev_priv); -extern void intel_irq_fini(struct drm_i915_private *dev_priv); +void intel_irq_init(struct drm_i915_private *dev_priv); +void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h index 0e667f1a8aa1..b5ed68882588 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_BDW_H__ #define __I915_OA_BDW_H__ -extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h index 679e92cf4f1d..43c3e4ab030a 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_BXT_H__ #define __I915_OA_BXT_H__ -extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h index 4d6025559bbe..1b4b563bc585 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CFLGT2_H__ #define __I915_OA_CFLGT2_H__ -extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h index 0697f4077402..500565e055cd 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CFLGT3_H__ #define __I915_OA_CFLGT3_H__ -extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h index 0986eae3135f..ad85d6a6a573 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_chv.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CHV_H__ #define __I915_OA_CHV_H__ -extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h index e830a406aff2..9faaca38b587 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_CNL_H__ #define __I915_OA_CNL_H__ -extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h index 06dedf991edb..cc13a1e9fd3e 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_glk.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_GLK_H__ #define __I915_OA_GLK_H__ -extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h index 3d0c870cd0bd..f0ddcc79c761 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_HSW_H__ #define __I915_OA_HSW_H__ -extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h index 24eaa97d61ba..e501651d385b 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_icl.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_ICL_H__ #define __I915_OA_ICL_H__ -extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h index a55398a904de..dc460e6e0fae 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_KBLGT2_H__ #define __I915_OA_KBLGT2_H__ -extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h index 3ddd3483b7cc..5926992b735a 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_KBLGT3_H__ #define __I915_OA_KBLGT3_H__ -extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h index be6256037239..353db35b36c1 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_SKLGT2_H__ #define __I915_OA_SKLGT2_H__ -extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h index 650beb068e56..52f94c674b62 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_SKLGT3_H__ #define __I915_OA_SKLGT3_H__ -extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h index 8dcf849d131e..8e364820cc63 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h @@ -10,6 +10,6 @@ #ifndef __I915_OA_SKLGT4_H__ #define __I915_OA_SKLGT4_H__ -extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); +void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); #endif diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index eb30062359d1..23274cf92712 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -30,11 +30,11 @@ #include /* For use by IPS driver */ -extern unsigned long i915_read_mch_val(void); -extern bool i915_gpu_raise(void); -extern bool i915_gpu_lower(void); -extern bool i915_gpu_busy(void); -extern bool i915_gpu_turbo_disable(void); +unsigned long i915_read_mch_val(void); +bool i915_gpu_raise(void); +bool i915_gpu_lower(void); +bool i915_gpu_busy(void); +bool i915_gpu_turbo_disable(void); /* Exported from arch/x86/kernel/early-quirks.c */ extern struct resource intel_graphics_stolen_res; -- cgit v1.2.3 From b01558e56f84866dcad5f3f99819ec560d639f09 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 12 Jul 2019 13:24:26 +0200 Subject: drm/i915: Rename "_load"/"_unload" to match PCI entry points Current names of i915_driver_load/unload() functions originate in legacy DRM stubs. Reduce nomenclature ambiguity by renaming them to match their current use as helpers called from PCI entry points. Suggested by: Chris Wilson Signed-off-by: Janusz Krzysztofik Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712112429.740-3-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 8 ++++---- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/i915_pci.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8502a2e4268e..9488aa3cb9cc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1876,17 +1876,17 @@ static void i915_driver_destroy(struct drm_i915_private *i915) } /** - * i915_driver_load - setup chip and create an initial config + * i915_driver_probe - setup chip and create an initial config * @pdev: PCI device * @ent: matching PCI ID entry * - * The driver load routine has to do several things: + * The driver probe routine has to do several things: * - drive output discovery via intel_modeset_init() * - initialize the memory manager * - allocate initial config memory * - setup the DRM framebuffer with the allocated memory */ -int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { const struct intel_device_info *match_info = (struct intel_device_info *)ent->driver_data; @@ -1952,7 +1952,7 @@ out_fini: return ret; } -void i915_driver_unload(struct drm_device *dev) +void i915_driver_remove(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1a6b4e14a405..b644f54abec2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2396,8 +2396,8 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif extern const struct dev_pm_ops i915_pm_ops; -int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); -void i915_driver_unload(struct drm_device *dev); +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +void i915_driver_remove(struct drm_device *dev); void intel_engine_init_hangcheck(struct intel_engine_cs *engine); void intel_hangcheck_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index e83c94cf2744..49558727b5f3 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -878,7 +878,7 @@ static void i915_pci_remove(struct pci_dev *pdev) if (!dev) /* driver load aborted, nothing to cleanup */ return; - i915_driver_unload(dev); + i915_driver_remove(dev); drm_dev_put(dev); pci_set_drvdata(pdev, NULL); @@ -953,7 +953,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; - err = i915_driver_load(pdev, ent); + err = i915_driver_probe(pdev, ent); if (err) return err; -- cgit v1.2.3 From f2db53f14d3d1cde1d4c3b5b5aaba556c953f2f5 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 12 Jul 2019 13:24:27 +0200 Subject: drm/i915: Replace "_load" with "_probe" consequently Use the "_probe" nomenclature not only in i915_driver_probe() helper name but also in other related function / variable names for consistency. Only the userspace exposed name of a related module parameter is left untouched. Signed-off-by: Janusz Krzysztofik Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712112429.740-4-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/display/intel_connector.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 20 ++++++++++---------- drivers/gpu/drm/i915/i915_drv.h | 10 +++++----- drivers/gpu/drm/i915/i915_gem.c | 8 ++++---- drivers/gpu/drm/i915/i915_pci.c | 2 +- drivers/gpu/drm/i915/intel_gvt.c | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 2 +- drivers/gpu/drm/i915/intel_wopcm.c | 2 +- 9 files changed, 25 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 41310f8e5a2a..d0163d86c42a 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector) if (ret) goto err; - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { ret = -EFAULT; goto err_backlight; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 76b5c068a26d..2dc1917b9d30 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -426,7 +426,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) WARN_ON(engine_mask & GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9488aa3cb9cc..7371571d735d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -81,14 +81,14 @@ static struct drm_driver driver; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -static unsigned int i915_load_fail_count; +static unsigned int i915_probe_fail_count; -bool __i915_inject_load_failure(const char *func, int line) +bool __i915_inject_probe_failure(const char *func, int line) { - if (i915_load_fail_count >= i915_modparams.inject_load_failure) + if (i915_probe_fail_count >= i915_modparams.inject_load_failure) return false; - if (++i915_load_fail_count == i915_modparams.inject_load_failure) { + if (++i915_probe_fail_count == i915_modparams.inject_load_failure) { DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", i915_modparams.inject_load_failure, func, line); i915_modparams.inject_load_failure = 0; @@ -100,7 +100,7 @@ bool __i915_inject_load_failure(const char *func, int line) bool i915_error_injected(void) { - return i915_load_fail_count && !i915_modparams.inject_load_failure; + return i915_probe_fail_count && !i915_modparams.inject_load_failure; } #endif @@ -687,7 +687,7 @@ static int i915_load_modeset_init(struct drm_device *dev) struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; if (HAS_DISPLAY(dev_priv)) { @@ -903,7 +903,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) { int ret = 0; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; intel_device_info_subplatform_init(dev_priv); @@ -997,7 +997,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) { int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; if (i915_get_bridge_dev(dev_priv)) @@ -1541,7 +1541,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; intel_device_info_runtime_init(dev_priv); @@ -1947,7 +1947,7 @@ out_runtime_pm_put: out_pci_disable: pci_disable_device(pdev); out_fini: - i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); + i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret); i915_driver_destroy(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b644f54abec2..af07d8d4ac10 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -122,20 +122,20 @@ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -bool __i915_inject_load_failure(const char *func, int line); -#define i915_inject_load_failure() \ - __i915_inject_load_failure(__func__, __LINE__) +bool __i915_inject_probe_failure(const char *func, int line); +#define i915_inject_probe_failure() \ + __i915_inject_probe_failure(__func__, __LINE__) bool i915_error_injected(void); #else -#define i915_inject_load_failure() false +#define i915_inject_probe_failure() false #define i915_error_injected() false #endif -#define i915_load_error(i915, fmt, ...) \ +#define i915_probe_error(i915, fmt, ...) \ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7ade42b8ec99..015208741405 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1515,12 +1515,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) goto err_gt; - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { ret = -ENODEV; goto err_gt; } - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { ret = -EIO; goto err_gt; } @@ -1582,8 +1582,8 @@ err_uc_misc: * for all other failure, such as an allocation failure, bail. */ if (!i915_reset_failed(dev_priv)) { - i915_load_error(dev_priv, - "Failed to initialize GPU, declaring it wedged!\n"); + i915_probe_error(dev_priv, + "Failed to initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 49558727b5f3..95763ad92287 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -957,7 +957,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - if (i915_inject_load_failure()) { + if (i915_inject_probe_failure()) { i915_pci_remove(pdev); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 1d7d26e4cf14..842ee26effd4 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENODEV; if (!i915_modparams.enable_gvt) { diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4015e964c6fc..475ab3d4d91d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1331,7 +1331,7 @@ static int __fw_domain_init(struct intel_uncore *uncore, GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); GEM_BUG_ON(uncore->fw_domain[domain_id]); - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -ENOMEM; d = kzalloc(sizeof(*d), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 8c850785e4b4..a6bc15bc7be3 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -177,7 +177,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) GEM_BUG_ON(!wopcm->size); - if (i915_inject_load_failure()) + if (i915_inject_probe_failure()) return -E2BIG; if (guc_fw_size >= wopcm->size) { -- cgit v1.2.3 From 3b58a94551368924d8a49baa349bc7694fbd7ddd Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 12 Jul 2019 13:24:28 +0200 Subject: drm/i915: Propagate "_release" function name suffix down Replace mixed "_fini"/"_cleanup"/"_cleanup_hw" suffixes found in names of functions called from i915_driver_release() with "_release" suffix consistently. This provides better code readability, especially helpful when trying to work out which phase the code is in. Functions names starting with "i915_driver_", i.e., those defined in drivers/gpu/dri/i915/i915_drv.c, just have their "cleanup" or "fini" parts of their names replaced with the "_release" suffix, while names of functions coming from other source files have been suffixed with "_driver_release" to avoid ambiguity with other possible .release entry points. v2: early_probe pairs better with late_release (Chris) v3: fix typo in commit message (Joonas) Suggested-by: Chris Wilson Signed-off-by: Janusz Krzysztofik Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712112429.740-5-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 33 +++++++++++++++++---------------- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_gtt.h | 2 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 2 +- drivers/gpu/drm/i915/intel_runtime_pm.h | 2 +- 7 files changed, 24 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7371571d735d..60dad4450e28 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -758,7 +758,7 @@ static int i915_load_modeset_init(struct drm_device *dev) cleanup_gem: i915_gem_suspend(dev_priv); i915_gem_fini_hw(dev_priv); - i915_gem_fini(dev_priv); + i915_gem_driver_release(dev_priv); cleanup_modeset: intel_modeset_cleanup(dev); cleanup_irq: @@ -968,10 +968,11 @@ err_engines: } /** - * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() + * i915_driver_late_release - cleanup the setup done in + * i915_driver_init_early() * @dev_priv: device private */ -static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) +static void i915_driver_late_release(struct drm_i915_private *dev_priv) { intel_irq_fini(dev_priv); intel_power_domains_cleanup(dev_priv); @@ -1034,10 +1035,10 @@ err_bridge: } /** - * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() + * i915_driver_mmio_release - cleanup the setup done in i915_driver_init_mmio() * @dev_priv: device private */ -static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) +static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); @@ -1690,7 +1691,7 @@ err_msi: pci_disable_msi(pdev); pm_qos_remove_request(&dev_priv->pm_qos); err_ggtt: - i915_ggtt_cleanup_hw(dev_priv); + i915_ggtt_driver_release(dev_priv); err_perf: i915_perf_fini(dev_priv); return ret; @@ -1935,15 +1936,15 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_hw: i915_driver_cleanup_hw(dev_priv); - i915_ggtt_cleanup_hw(dev_priv); + i915_ggtt_driver_release(dev_priv); /* Paranoia: make sure we have disabled everything before we exit. */ intel_sanitize_gt_powersave(dev_priv); out_cleanup_mmio: - i915_driver_cleanup_mmio(dev_priv); + i915_driver_mmio_release(dev_priv); out_runtime_pm_put: enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - i915_driver_cleanup_early(dev_priv); + i915_driver_late_release(dev_priv); out_pci_disable: pci_disable_device(pdev); out_fini: @@ -2006,19 +2007,19 @@ static void i915_driver_release(struct drm_device *dev) disable_rpm_wakeref_asserts(rpm); - i915_gem_fini(dev_priv); + i915_gem_driver_release(dev_priv); - i915_ggtt_cleanup_hw(dev_priv); + i915_ggtt_driver_release(dev_priv); /* Paranoia: make sure we have disabled everything before we exit. */ intel_sanitize_gt_powersave(dev_priv); - i915_driver_cleanup_mmio(dev_priv); + i915_driver_mmio_release(dev_priv); enable_rpm_wakeref_asserts(rpm); - intel_runtime_pm_cleanup(rpm); + intel_runtime_pm_driver_release(rpm); - i915_driver_cleanup_early(dev_priv); + i915_driver_late_release(dev_priv); i915_driver_destroy(dev_priv); } @@ -2211,7 +2212,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) out: enable_rpm_wakeref_asserts(rpm); if (!dev_priv->uncore.user_forcewake.count) - intel_runtime_pm_cleanup(rpm); + intel_runtime_pm_driver_release(rpm); return ret; } @@ -2975,7 +2976,7 @@ static int intel_runtime_suspend(struct device *kdev) } enable_rpm_wakeref_asserts(rpm); - intel_runtime_pm_cleanup(rpm); + intel_runtime_pm_driver_release(rpm); if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index af07d8d4ac10..60e1e84a5862 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2538,7 +2538,7 @@ void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_fini_hw(struct drm_i915_private *dev_priv); -void i915_gem_fini(struct drm_i915_private *dev_priv); +void i915_gem_driver_release(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags, long timeout); void i915_gem_suspend(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 015208741405..51a0fbaa781b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1620,7 +1620,7 @@ void i915_gem_fini_hw(struct drm_i915_private *dev_priv) i915_gem_drain_freed_objects(dev_priv); } -void i915_gem_fini(struct drm_i915_private *dev_priv) +void i915_gem_driver_release(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->drm.struct_mutex); intel_engines_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 753090a7729e..7352b9ef6c0a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2851,10 +2851,10 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) } /** - * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization + * i915_ggtt_driver_release - Clean up GGTT hardware initialization * @i915: i915 device */ -void i915_ggtt_cleanup_hw(struct drm_i915_private *i915) +void i915_ggtt_driver_release(struct drm_i915_private *i915) { struct pagevec *pvec; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index b30ffe333852..a450349b3a50 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -634,7 +634,7 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); void i915_ggtt_enable_guc(struct drm_i915_private *i915); void i915_ggtt_disable_guc(struct drm_i915_private *i915); int i915_init_ggtt(struct drm_i915_private *dev_priv); -void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); +void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 8d1aebc3e857..b2a05850ea42 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -592,7 +592,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm) pm_runtime_put(kdev); } -void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm) +void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) { int count = atomic_read(&rpm->wakeref_count); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 473c4850c01d..89f8d284239a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -173,7 +173,7 @@ enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm) void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm); void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); -void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm); +void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); -- cgit v1.2.3 From 78dae1ac35dd0c50d0f118e886b1f466cb9311fe Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 12 Jul 2019 13:24:29 +0200 Subject: drm/i915: Propagate "_remove" function name suffix down Similar to the "_release" case, consistently replace mixed "_cleanup"/"_fini"/"_fini_hw" components found in names of functions called from i915_driver_remove() with "_remove" or "_driver_remove" suffixes for better code readability. Signed-off-by: Janusz Krzysztofik Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712112429.740-6-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 4 ++-- drivers/gpu/drm/i915/display/intel_bios.h | 2 +- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/display/intel_display_power.c | 6 +++--- drivers/gpu/drm/i915/display/intel_display_power.h | 2 +- drivers/gpu/drm/i915/i915_drv.c | 24 +++++++++++----------- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/intel_gvt.c | 5 +++-- drivers/gpu/drm/i915/intel_gvt.h | 5 +++-- 10 files changed, 29 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 2fe68f72b88f..21501d565327 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1908,10 +1908,10 @@ out: } /** - * intel_bios_cleanup - Free any resources allocated by intel_bios_init() + * intel_bios_driver_remove - Free any resources allocated by intel_bios_init() * @dev_priv: i915 device instance */ -void intel_bios_cleanup(struct drm_i915_private *dev_priv) +void intel_bios_driver_remove(struct drm_i915_private *dev_priv) { kfree(dev_priv->vbt.child_dev); dev_priv->vbt.child_dev = NULL; diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 0b7be6389a07..4969189e620f 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -228,7 +228,7 @@ struct mipi_pps_data { } __packed; void intel_bios_init(struct drm_i915_private *dev_priv); -void intel_bios_cleanup(struct drm_i915_private *dev_priv); +void intel_bios_driver_remove(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 79ae8f4e5213..e12671ca1886 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -17092,7 +17092,7 @@ static void intel_hpd_poll_fini(struct drm_device *dev) drm_connector_list_iter_end(&conn_iter); } -void intel_modeset_cleanup(struct drm_device *dev) +void intel_modeset_driver_remove(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index d25fd5a25199..93a148684c53 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -4902,7 +4902,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); * * It will return with power domains disabled (to be enabled later by * intel_power_domains_enable()) and must be paired with - * intel_power_domains_fini_hw(). + * intel_power_domains_driver_remove(). */ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) { @@ -4954,7 +4954,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) } /** - * intel_power_domains_fini_hw - deinitialize hw power domain state + * intel_power_domains_driver_remove - deinitialize hw power domain state * @i915: i915 device instance * * De-initializes the display power domain HW state. It also ensures that the @@ -4964,7 +4964,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) * intel_power_domains_disable()) and must be paired with * intel_power_domains_init_hw(). */ -void intel_power_domains_fini_hw(struct drm_i915_private *i915) +void intel_power_domains_driver_remove(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&i915->power_domains.wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index a264f18c95f1..e4d2c1ba24b0 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -241,7 +241,7 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv); int intel_power_domains_init(struct drm_i915_private *dev_priv); void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); -void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); +void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv); void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume); void icl_display_core_uninit(struct drm_i915_private *dev_priv); void intel_power_domains_enable(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 60dad4450e28..3af8e602befe 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -757,16 +757,16 @@ static int i915_load_modeset_init(struct drm_device *dev) cleanup_gem: i915_gem_suspend(dev_priv); - i915_gem_fini_hw(dev_priv); + i915_gem_driver_remove(dev_priv); i915_gem_driver_release(dev_priv); cleanup_modeset: - intel_modeset_cleanup(dev); + intel_modeset_driver_remove(dev); cleanup_irq: intel_irq_uninstall(dev_priv); intel_gmbus_teardown(dev_priv); cleanup_csr: intel_csr_ucode_fini(dev_priv); - intel_power_domains_fini_hw(dev_priv); + intel_power_domains_driver_remove(dev_priv); vga_switcheroo_unregister_client(pdev); cleanup_vga_client: vga_client_register(pdev, NULL, NULL, NULL); @@ -1698,10 +1698,10 @@ err_perf: } /** - * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() + * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe() * @dev_priv: device private */ -static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) +static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; @@ -1935,7 +1935,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out_cleanup_hw: - i915_driver_cleanup_hw(dev_priv); + i915_driver_hw_remove(dev_priv); i915_ggtt_driver_release(dev_priv); /* Paranoia: make sure we have disabled everything before we exit. */ @@ -1976,11 +1976,11 @@ void i915_driver_remove(struct drm_device *dev) drm_atomic_helper_shutdown(dev); - intel_gvt_cleanup(dev_priv); + intel_gvt_driver_remove(dev_priv); - intel_modeset_cleanup(dev); + intel_modeset_driver_remove(dev); - intel_bios_cleanup(dev_priv); + intel_bios_driver_remove(dev_priv); vga_switcheroo_unregister_client(pdev); vga_client_register(pdev, NULL, NULL, NULL); @@ -1991,11 +1991,11 @@ void i915_driver_remove(struct drm_device *dev) cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); i915_reset_error_state(dev_priv); - i915_gem_fini_hw(dev_priv); + i915_gem_driver_remove(dev_priv); - intel_power_domains_fini_hw(dev_priv); + intel_power_domains_driver_remove(dev_priv); - i915_driver_cleanup_hw(dev_priv); + i915_driver_hw_remove(dev_priv); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 60e1e84a5862..7e1ee30d74a9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2537,7 +2537,7 @@ bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); -void i915_gem_fini_hw(struct drm_i915_private *dev_priv); +void i915_gem_driver_remove(struct drm_i915_private *dev_priv); void i915_gem_driver_release(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags, long timeout); @@ -2693,7 +2693,7 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) /* modesetting */ void intel_modeset_init_hw(struct drm_device *dev); int intel_modeset_init(struct drm_device *dev); -void intel_modeset_cleanup(struct drm_device *dev); +void intel_modeset_driver_remove(struct drm_device *dev); int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); void intel_display_resume(struct drm_device *dev); void i915_redisable_vga(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 51a0fbaa781b..37fe2ed2f582 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1600,7 +1600,7 @@ err_uc_misc: return ret; } -void i915_gem_fini_hw(struct drm_i915_private *dev_priv) +void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { GEM_BUG_ON(dev_priv->gt.awake); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 842ee26effd4..c66b2d8a6219 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -122,13 +122,14 @@ bail: } /** - * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading + * intel_gvt_driver_remove - cleanup GVT components when i915 driver is + * unbinding * @dev_priv: drm i915 private * * * This function is called at the i915 driver unloading stage, to shutdown * GVT components and release the related resources. */ -void intel_gvt_cleanup(struct drm_i915_private *dev_priv) +void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { if (!intel_gvt_active(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h index 85ce37eb7cd6..502fad8a8652 100644 --- a/drivers/gpu/drm/i915/intel_gvt.h +++ b/drivers/gpu/drm/i915/intel_gvt.h @@ -28,7 +28,7 @@ struct drm_i915_private; #ifdef CONFIG_DRM_I915_GVT int intel_gvt_init(struct drm_i915_private *dev_priv); -void intel_gvt_cleanup(struct drm_i915_private *dev_priv); +void intel_gvt_driver_remove(struct drm_i915_private *dev_priv); int intel_gvt_init_device(struct drm_i915_private *dev_priv); void intel_gvt_clean_device(struct drm_i915_private *dev_priv); int intel_gvt_init_host(void); @@ -38,7 +38,8 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv) { return 0; } -static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv) + +static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) { } -- cgit v1.2.3 From 0b61b8b07f6bc54b3e255bc40f63c3adfa968594 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 12 Jul 2019 13:24:30 +0200 Subject: drm/i915: Propagate "_probe" function name suffix down Similar to the "_release" and "_remove" cases, consequently replace "_init" components of names of functions called from i915_driver_probe() with "_probe" suffixes for better code readability. Signed-off-by: Janusz Krzysztofik Reviewed-by: Chris Wilson Reviewed-by: Joonas Lahtinen Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712112429.740-7-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/i915_drv.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3af8e602befe..e2d1bed9454c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -681,7 +681,7 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { .can_switch = i915_switcheroo_can_switch, }; -static int i915_load_modeset_init(struct drm_device *dev) +static int i915_driver_modeset_probe(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct pci_dev *pdev = dev_priv->drm.pdev; @@ -890,7 +890,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) } /** - * i915_driver_init_early - setup state not requiring device access + * i915_driver_early_probe - setup state not requiring device access * @dev_priv: device private * * Initialize everything that is a "SW-only" state, that is state not @@ -899,7 +899,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) * system memory allocation, setting up device specific attributes and * function hooks not requiring accessing the device. */ -static int i915_driver_init_early(struct drm_i915_private *dev_priv) +static int i915_driver_early_probe(struct drm_i915_private *dev_priv) { int ret = 0; @@ -969,7 +969,7 @@ err_engines: /** * i915_driver_late_release - cleanup the setup done in - * i915_driver_init_early() + * i915_driver_early_probe() * @dev_priv: device private */ static void i915_driver_late_release(struct drm_i915_private *dev_priv) @@ -986,7 +986,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) } /** - * i915_driver_init_mmio - setup device MMIO + * i915_driver_mmio_probe - setup device MMIO * @dev_priv: device private * * Setup minimal device state necessary for MMIO accesses later in the @@ -994,7 +994,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) * side effects or exposing the driver via kernel internal or user space * interfaces. */ -static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) +static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) { int ret; @@ -1035,7 +1035,7 @@ err_bridge: } /** - * i915_driver_mmio_release - cleanup the setup done in i915_driver_init_mmio() + * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe() * @dev_priv: device private */ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) @@ -1531,13 +1531,13 @@ static void edram_detect(struct drm_i915_private *dev_priv) } /** - * i915_driver_init_hw - setup state requiring device access + * i915_driver_hw_probe - setup state requiring device access * @dev_priv: device private * * Setup state that requires accessing the device, but doesn't require * exposing the driver via kernel internal or userspace interfaces. */ -static int i915_driver_init_hw(struct drm_i915_private *dev_priv) +static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = dev_priv->drm.pdev; int ret; @@ -1906,7 +1906,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_fini; - ret = i915_driver_init_early(dev_priv); + ret = i915_driver_early_probe(dev_priv); if (ret < 0) goto out_pci_disable; @@ -1914,15 +1914,15 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i915_detect_vgpu(dev_priv); - ret = i915_driver_init_mmio(dev_priv); + ret = i915_driver_mmio_probe(dev_priv); if (ret < 0) goto out_runtime_pm_put; - ret = i915_driver_init_hw(dev_priv); + ret = i915_driver_hw_probe(dev_priv); if (ret < 0) goto out_cleanup_mmio; - ret = i915_load_modeset_init(&dev_priv->drm); + ret = i915_driver_modeset_probe(&dev_priv->drm); if (ret < 0) goto out_cleanup_hw; -- cgit v1.2.3 From 87d855e8cfa48613423357587dfbfe29bec23abe Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 12 Jul 2019 11:14:44 +0000 Subject: drm/i915/guc: Don't enable GuC/HuC in auto mode on pre-Gen11 We are about to change default setting of "enable_guc" modparam from 0(disabled) to -1(auto). As we only want to turn on GuC/HuC on Gen11+, keep it off for older gens. Note that it would be still possible to enable GuC/HuC on these old platforms using explicit "enable_guc=2" modparam. Signed-off-by: Michal Wajdeczko Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Reviewed-by: Joonas Lahtinen Reviewed-by: Rodrigo Vivi Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712111445.21040-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_uc.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index fdf00f1ebb57..72bbcb2597fd 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -58,12 +58,16 @@ static int __get_platform_enable_guc(struct drm_i915_private *i915) struct intel_uc_fw *huc_fw = &i915->huc.fw; int enable_guc = 0; - /* Default is to use HuC if we know GuC and HuC firmwares */ + if (!HAS_GUC(i915)) + return 0; + + /* We don't want to enable GuC/HuC on pre-Gen11 by default */ + if (INTEL_GEN(i915) < 11) + return 0; + if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw)) enable_guc |= ENABLE_GUC_LOAD_HUC; - /* Any platform specific fine-tuning can be done here */ - return enable_guc; } -- cgit v1.2.3 From f774f09649192f326fa030564afd3f8f5d82c1e4 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 12 Jul 2019 11:14:45 +0000 Subject: drm/i915/guc: Turn on GuC/HuC auto mode Using "enable_guc" modparam auto mode (-1) will let driver decide on which platforms and in which configuration we want to use GuC/HuC firmwares. Today driver will enable HuC firmware authentication by GuC only on Gen11+ platforms as HuC firmware is required to unlock advanced video codecs in media driver. Legacy platforms with GuC/HuC are not affected by this change as for them driver still defaults to disabled(0) in auto mode. Signed-off-by: Michal Wajdeczko Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Reviewed-by: Joonas Lahtinen Reviewed-by: Rodrigo Vivi Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190712111445.21040-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/i915_params.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index d29ade3b7de6..5736c55694fe 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -54,7 +54,7 @@ struct drm_printer; param(int, disable_power_well, -1) \ param(int, enable_ips, 1) \ param(int, invert_brightness, 0) \ - param(int, enable_guc, 0) \ + param(int, enable_guc, -1) \ param(int, guc_log_level, -1) \ param(char *, guc_firmware_path, NULL) \ param(char *, huc_firmware_path, NULL) \ -- cgit v1.2.3 From eaa2b31be152514f638e7c1c482e0ea0920a6aab Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 28 May 2019 17:06:50 +0300 Subject: drm/i915: Skip SINK_COUNT read on CH7511 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CH7511 doesn't update SINK_COUNT properly so in order to detect the device as connected we have to ignore SINK_COUNT. In order to have access to the quirk list early enough we must move the drm_dp_read_desc() call to happen earlier. We can also skip re-reading this on eDP since we know it won't change. Cc: David S. Cc: Peteris Rudzusiks Tested-by: Peteris Rudzusiks Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105406 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190528140650.19230-2-ville.syrjala@linux.intel.com Acked-by: Jani Nikula #irc --- drivers/gpu/drm/i915/display/intel_dp.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 24592d985bcb..ca333b3c7415 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4221,8 +4221,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (!intel_dp_read_dpcd(intel_dp)) return false; - /* Don't clobber cached eDP rates. */ + /* + * Don't clobber cached eDP rates. Also skip re-reading + * the OUI/ID since we know it won't change. + */ if (!intel_dp_is_edp(intel_dp)) { + drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, + drm_dp_is_branch(intel_dp->dpcd)); + intel_dp_set_sink_rates(intel_dp); intel_dp_set_common_rates(intel_dp); } @@ -4231,7 +4237,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) * Some eDP panels do not set a valid value for sink count, that is why * it don't care about read it here and in intel_edp_init_dpcd(). */ - if (!intel_dp_is_edp(intel_dp)) { + if (!intel_dp_is_edp(intel_dp) && + !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) { u8 count; ssize_t r; @@ -5377,9 +5384,6 @@ intel_dp_detect(struct drm_connector *connector, if (INTEL_GEN(dev_priv) >= 11) intel_dp_get_dsc_sink_cap(intel_dp); - drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, - drm_dp_is_branch(intel_dp->dpcd)); - intel_dp_configure_mst(intel_dp); if (intel_dp->is_mst) { -- cgit v1.2.3 From 801404632c4ba08adce04cb9e81a1d34bfb14b17 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 14:42:34 +0100 Subject: drm/i915/display: Drop kerneldoc for 'intel_atomic_commit' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit intel_atomic_commit() is not for use internally, but only as an entry point from the core drm atomic helper (drm_atomic_commit). Squelches the warning for: drivers/gpu/drm/i915/display/intel_display.c:14148: warning: Function parameter or member '_state' not described in 'intel_atomic_commit' drivers/gpu/drm/i915/display/intel_display.c:14148: warning: Excess function parameter 'state' description in 'intel_atomic_commit' Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: Maarten Lankhorst Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190712134234.29893-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_display.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e12671ca1886..9758523f0837 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14130,18 +14130,6 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) plane->frontbuffer_bit); } -/** - * intel_atomic_commit - commit validated state object - * @dev: DRM device - * @state: the top-level driver state object - * @nonblock: nonblocking commit - * - * This function commits a top-level state object that has been validated - * with drm_atomic_helper_check(). - * - * RETURNS - * Zero for success or -errno. - */ static int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, bool nonblock) -- cgit v1.2.3 From 1eda701eace2ddac0f2e2e7176ff6b504e815453 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 12:27:22 +0100 Subject: drm/i915/gtt: Recursive cleanup for gen8 With an explicit level, we can refactor the separate cleanup functions as a simple recursive function. We take the opportunity to pass down the size of each level so that we can deal with the different sizes of top-level and avoid over allocating for 32/36-bit vm. Signed-off-by: Chris Wilson Reviewed-by: Abdiel Janulgue Link: https://patchwork.freedesktop.org/patch/msgid/20190712112725.2892-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 93 +++++++++++++------------------------ 1 file changed, 32 insertions(+), 61 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7352b9ef6c0a..3a156f0c44f7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -713,11 +713,11 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm) return pt; } -static struct i915_page_directory *__alloc_pd(void) +static struct i915_page_directory *__alloc_pd(size_t sz) { struct i915_page_directory *pd; - pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL); + pd = kzalloc(sz, I915_GFP_ALLOW_FAIL); if (unlikely(!pd)) return NULL; @@ -729,7 +729,7 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) { struct i915_page_directory *pd; - pd = __alloc_pd(); + pd = __alloc_pd(sizeof(*pd)); if (unlikely(!pd)) return ERR_PTR(-ENOMEM); @@ -766,7 +766,7 @@ __set_pd_entry(struct i915_page_directory * const pd, struct i915_page_dma * const to, u64 (*encode)(const dma_addr_t, const enum i915_cache_level)) { - GEM_BUG_ON(atomic_read(px_used(pd)) > 512); + GEM_BUG_ON(atomic_read(px_used(pd)) > ARRAY_SIZE(pd->entry)); atomic_inc(px_used(pd)); pd->entry[idx] = to; @@ -896,64 +896,34 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end) return end - start; } -static void gen8_free_page_tables(struct i915_address_space *vm, - struct i915_page_directory *pd) +static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, + struct i915_page_directory *pd, + int count, int lvl) { - int i; - - for (i = 0; i < I915_PDES; i++) { - if (pd->entry[i]) - free_pd(vm, pd->entry[i]); - } -} - -static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, - struct i915_page_directory *pdp) -{ - const unsigned int pdpes = i915_pdpes_per_pdp(vm); - int i; - - for (i = 0; i < pdpes; i++) { - if (!pdp->entry[i]) - continue; - - gen8_free_page_tables(vm, pdp->entry[i]); - free_pd(vm, pdp->entry[i]); - } - - free_px(vm, pdp); -} - -static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt) -{ - struct i915_page_directory * const pml4 = ppgtt->pd; - int i; - - for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { - struct i915_page_directory *pdp = i915_pdp_entry(pml4, i); + if (lvl) { + void **pde = pd->entry; - if (!pdp) - continue; + do { + if (!*pde) + continue; - gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp); + __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1); + } while (pde++, --count); } - free_px(&ppgtt->vm, pml4); + free_px(vm, pd); } static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { - struct drm_i915_private *i915 = vm->i915; struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - if (intel_vgpu_active(i915)) + if (intel_vgpu_active(vm->i915)) gen8_ppgtt_notify_vgt(ppgtt, false); - if (i915_vm_is_4lvl(vm)) - gen8_ppgtt_cleanup_4lvl(ppgtt); - else - gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd); - + __gen8_ppgtt_cleanup(vm, ppgtt->pd, + vm->total >> __gen8_pte_shift(vm->top), + vm->top); free_scratch(vm); } @@ -1505,24 +1475,18 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) struct i915_page_directory *pdp = ppgtt->pd; struct i915_page_directory *pd; u64 start = 0, length = ppgtt->vm.total; - u64 from = start; unsigned int pdpe; gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { pd = alloc_pd(vm); if (IS_ERR(pd)) - goto unwind; + return PTR_ERR(pd); fill_px(pd, vm->scratch[1].encode); set_pd_entry(pdp, pdpe, pd); } return 0; - -unwind: - gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); - atomic_set(px_used(pdp), 0); - return -ENOMEM; } static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) @@ -1550,9 +1514,14 @@ gen8_alloc_top_pd(struct i915_address_space *vm) GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); - pd = alloc_pd(vm); - if (IS_ERR(pd)) - return pd; + pd = __alloc_pd(offsetof(typeof(*pd), entry[count])); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); + + if (unlikely(setup_page_dma(vm, px_base(pd)))) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); return pd; @@ -1625,7 +1594,9 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) return ppgtt; err_free_pd: - free_px(&ppgtt->vm, ppgtt->pd); + __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, + ppgtt->vm.total >> __gen8_pte_shift(ppgtt->vm.top), + ppgtt->vm.top); err_free_scratch: free_scratch(&ppgtt->vm); err_free: @@ -2071,7 +2042,7 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->base.pd = __alloc_pd(); + ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd)); if (!ppgtt->base.pd) { err = -ENOMEM; goto err_free; -- cgit v1.2.3 From 4c2be3c5ebfd98fc588f6bc5f53fa2ef516c02ea Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 12:27:23 +0100 Subject: drm/i915/gtt: Recursive ppgtt clear for gen8 With an explicit level, we can refactor the separate clear functions as a simple recursive function. The additional knowledge of the level allows us to spot when we can free an entire subtree at once. Signed-off-by: Chris Wilson Reviewed-by: Abdiel Janulgue Link: https://patchwork.freedesktop.org/patch/msgid/20190712112725.2892-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Kconfig.debug | 14 ++++ drivers/gpu/drm/i915/i915_gem_gtt.c | 154 +++++++++++++++++++++--------------- 2 files changed, 104 insertions(+), 64 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 8d922bb4d953..4cdc0181a093 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -94,6 +94,20 @@ config DRM_I915_TRACE_GEM If in doubt, say "N". +config DRM_I915_TRACE_GTT + bool "Insert extra ftrace output from the GTT internals" + depends on DRM_I915_DEBUG_GEM + select TRACING + default n + help + Enable additional and verbose debugging output that will spam + ordinary tests, but may be vital for post-mortem debugging when + used with /proc/sys/kernel/ftrace_dump_on_oops + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_I915_SW_FENCE_DEBUG_OBJECTS bool "Enable additional driver debugging for fence objects" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3a156f0c44f7..6da564e27a64 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -46,6 +46,12 @@ #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) +#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT) +#define DBG(...) trace_printk(__VA_ARGS__) +#else +#define DBG(...) +#endif + /** * DOC: Global GTT views * @@ -796,6 +802,9 @@ release_pd_entry(struct i915_page_directory * const pd, { bool free = false; + if (atomic_add_unless(&pt->used, -1, 1)) + return false; + spin_lock(&pd->lock); if (atomic_dec_and_test(&pt->used)) { clear_pd_entry(pd, idx, scratch); @@ -927,86 +936,101 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) free_scratch(vm); } -/* Removes entries from a single page table, releasing it if it's empty. - * Caller can use the return value to update higher-level entries. - */ -static void gen8_ppgtt_clear_pt(const struct i915_address_space *vm, - struct i915_page_table *pt, - u64 start, u64 length) +static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 start, const u64 end, int lvl) { - const unsigned int num_entries = gen8_pte_count(start, length); - gen8_pte_t *vaddr; + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + unsigned int idx, len; - vaddr = kmap_atomic_px(pt); - memset64(vaddr + gen8_pte_index(start), - vm->scratch[0].encode, - num_entries); - kunmap_atomic(vaddr); + len = gen8_pd_range(start, end, lvl--, &idx); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n", + __func__, vm, lvl + 1, start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || len >= atomic_read(px_used(pd))); - GEM_BUG_ON(num_entries > atomic_read(&pt->used)); + do { + struct i915_page_table *pt = pd->entry[idx]; + + if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) && + gen8_pd_contains(start, end, lvl)) { + DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n", + __func__, vm, lvl + 1, idx, start, end); + clear_pd_entry(pd, idx, scratch); + __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl); + start += (u64)I915_PDES << gen8_pd_shift(lvl); + continue; + } - atomic_sub(num_entries, &pt->used); -} + if (lvl) { + start = __gen8_ppgtt_clear(vm, as_pd(pt), + start, end, lvl); + } else { + unsigned int count; + u64 *vaddr; -static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, - struct i915_page_directory *pd, - u64 start, u64 length) -{ - struct i915_page_table *pt; - u32 pde; + count = gen8_pt_count(start, end); + DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} removing pte\n", + __func__, vm, lvl, start, end, + gen8_pd_index(start, 0), count, + atomic_read(&pt->used)); + GEM_BUG_ON(!count || count >= atomic_read(&pt->used)); - gen8_for_each_pde(pt, pd, start, length, pde) { - atomic_inc(&pt->used); - gen8_ppgtt_clear_pt(vm, pt, start, length); - if (release_pd_entry(pd, pde, pt, &vm->scratch[1])) + vaddr = kmap_atomic_px(pt); + memset64(vaddr + gen8_pd_index(start, 0), + vm->scratch[0].encode, + count); + kunmap_atomic(vaddr); + + atomic_sub(count, &pt->used); + start += count; + } + + if (release_pd_entry(pd, idx, pt, scratch)) free_px(vm, pt); - } + } while (idx++, --len); + + return start; } -/* Removes entries from a single page dir pointer, releasing it if it's empty. - * Caller can use the return value to update higher-level entries - */ -static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, - struct i915_page_directory * const pdp, - u64 start, u64 length) +static void gen8_ppgtt_clear(struct i915_address_space *vm, + u64 start, u64 length) { - struct i915_page_directory *pd; - unsigned int pdpe; + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - atomic_inc(px_used(pd)); - gen8_ppgtt_clear_pd(vm, pd, start, length); - if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch[2])) - free_px(vm, pd); - } + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); + + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + start, start + length, vm->top); } -static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, - u64 start, u64 length) +static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, + struct i915_page_directory *pd, + u64 start, u64 length) { - gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length); + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); + + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + + __gen8_ppgtt_clear(vm, pd, start, start + length, 1); } -/* Removes entries from a single pml4. - * This is the top-level structure in 4-level page tables used on gen8+. - * Empty entries are always scratch pml4e. - */ -static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, - u64 start, u64 length) +static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, + struct i915_page_directory * const pdp, + u64 start, u64 length) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory * const pml4 = ppgtt->pd; - struct i915_page_directory *pdp; - unsigned int pml4e; + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(!i915_vm_is_4lvl(vm)); + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; - gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - atomic_inc(px_used(pdp)); - gen8_ppgtt_clear_pdp(vm, pdp, start, length); - if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch[3])) - free_px(vm, pdp); - } + __gen8_ppgtt_clear(vm, pdp, start, start + length, 2); } static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, @@ -1171,7 +1195,7 @@ unwind_pdp: if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch[3])) free_px(vm, pdp); unwind: - gen8_ppgtt_clear_4lvl(vm, from, start - from); + gen8_ppgtt_clear(vm, from, start - from); out: if (alloc) free_px(vm, alloc); @@ -1484,6 +1508,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) fill_px(pd, vm->scratch[1].encode); set_pd_entry(pdp, pdpe, pd); + atomic_inc(px_used(pd)); /* keep pinned */ } return 0; @@ -1524,6 +1549,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm) } fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count); + atomic_inc(px_used(pd)); /* mark as pinned */ return pd; } @@ -1573,7 +1599,6 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (i915_vm_is_4lvl(&ppgtt->vm)) { ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; - ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl; } else { if (intel_vgpu_active(i915)) { err = gen8_preallocate_top_level_pdp(ppgtt); @@ -1583,9 +1608,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; - ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl; } + ppgtt->vm.clear_range = gen8_ppgtt_clear; + if (intel_vgpu_active(i915)) gen8_ppgtt_notify_vgt(ppgtt, true); -- cgit v1.2.3 From 0caf625777300d318565eb29a71d71a8de2ff1d2 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Thu, 11 Jul 2019 22:57:05 -0700 Subject: drm/i915: Add modular FIA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some platforms may have Modular FIA. If Modular FIA is used in the SOC, then Display Driver will access the additional instances of FIA based on pre-assigned offset in GTTMADDR space. Each Modular FIA instance has its own IOSF Sideband Port ID and it houses only 2 Type-C Port. In SOC that has more than two Type-C Ports, there are multiple instances of Modular FIA. Gunit will need to use different destination ID when it access different pair of Type-C Port. The DFLEXDPSP register has Modular FIA bit starting on Tiger Lake. If Modular FIA is used in the SOC, this register bit exists in all the instances of Modular FIA. IOM FW is required to program only the MF bit in first FIA instance that houses the Type-C Port 0 and Port 1, for Display Driver to read from. v2 (Lucas): - Move all accesses to FIA to be contained in intel_tc.c, along with display_fia that is now called tc_phy_fia - Save the fia instance number on intel_digital_port, so we don't have to query if modular FIA is used on every access v3 (Lucas): Make function static v4 (Lucas): Move enum phy_fia to the header and use it in intel_digital_port (suggested by Ville) v5 (Lucas): Add comment about the mapping between FIA and TC port (suggested by Stuart) Cc: Jani Nikula Signed-off-by: Anusha Srivatsa Signed-off-by: Lucas De Marchi Acked-by: Ville Syrjälä Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20190712055706.12143-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.h | 6 ++++ drivers/gpu/drm/i915/display/intel_tc.c | 47 +++++++++++++++++++++++----- drivers/gpu/drm/i915/i915_reg.h | 13 +++++--- drivers/gpu/drm/i915/intel_device_info.h | 1 + drivers/gpu/drm/i915/intel_drv.h | 1 + 5 files changed, 56 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 67743eea4a50..d2c718f25478 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -260,6 +260,12 @@ enum phy { #define phy_name(a) ((a) + 'A') +enum phy_fia { + FIA1, + FIA2, + FIA3, +}; + #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index f44ee4bfe7c8..c96a81c2416c 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -22,6 +22,28 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) return names[mode]; } +static bool has_modular_fia(struct drm_i915_private *i915) +{ + if (!INTEL_INFO(i915)->display.has_modular_fia) + return false; + + return intel_uncore_read(&i915->uncore, + PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK; +} + +static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915, + enum tc_port tc_port) +{ + if (!has_modular_fia(i915)) + return FIA1; + + /* + * Each Modular FIA instance houses 2 TC ports. In SOC that has more + * than two TC ports, there are multiple instances of Modular FIA. + */ + return tc_port / 2; +} + u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); @@ -29,7 +51,8 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) struct intel_uncore *uncore = &i915->uncore; u32 lane_mask; - lane_mask = intel_uncore_read(uncore, PORT_TX_DFLEXDPSP); + lane_mask = intel_uncore_read(uncore, + PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); WARN_ON(lane_mask == 0xffffffff); @@ -78,7 +101,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY); - val = intel_uncore_read(uncore, PORT_TX_DFLEXDPMLE1); + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); switch (required_lanes) { @@ -97,7 +121,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, MISSING_CASE(required_lanes); } - intel_uncore_write(uncore, PORT_TX_DFLEXDPMLE1, val); + intel_uncore_write(uncore, + PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val); } static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port, @@ -129,7 +154,8 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) u32 mask = 0; u32 val; - val = intel_uncore_read(uncore, PORT_TX_DFLEXDPSP); + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia)); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n", @@ -159,7 +185,8 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) struct intel_uncore *uncore = &i915->uncore; u32 val; - val = intel_uncore_read(uncore, PORT_TX_DFLEXDPPMS); + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n", dig_port->tc_port_name); @@ -177,7 +204,8 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, struct intel_uncore *uncore = &i915->uncore; u32 val; - val = intel_uncore_read(uncore, PORT_TX_DFLEXDPCSSS); + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n", dig_port->tc_port_name, @@ -190,7 +218,8 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, if (!enable) val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); - intel_uncore_write(uncore, PORT_TX_DFLEXDPCSSS, val); + intel_uncore_write(uncore, + PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10)) DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n", @@ -206,7 +235,8 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) struct intel_uncore *uncore = &i915->uncore; u32 val; - val = intel_uncore_read(uncore, PORT_TX_DFLEXDPCSSS); + val = intel_uncore_read(uncore, + PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia)); if (val == 0xffffffff) { DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n", dig_port->tc_port_name); @@ -503,4 +533,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) mutex_init(&dig_port->tc_lock); dig_port->tc_legacy_port = is_legacy; dig_port->tc_link_refcount = 0; + dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e14c9b76c2d0..0dd4506323f2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2204,9 +2204,13 @@ enum i915_power_well_id { #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) #define FIA1_BASE 0x163000 +#define FIA2_BASE 0x16E000 +#define FIA3_BASE 0x16F000 +#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) +#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) /* ICL PHY DFLEX registers */ -#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0) +#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) #define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) #define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) #define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) @@ -11535,17 +11539,18 @@ enum skl_power_gate { _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) -#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0) +#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0) +#define MODULAR_FIA_MASK (1 << 4) #define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) #define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) #define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) #define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) #define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) -#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890) +#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890) #define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) -#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894) +#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894) #define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 468582484758..45a9badc9b8e 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -138,6 +138,7 @@ enum intel_ppgtt_type { func(has_gmch); \ func(has_hotplug); \ func(has_ipc); \ + func(has_modular_fia); \ func(has_overlay); \ func(has_psr); \ func(overlay_needs_physical); \ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 770f9f6aad84..e8ecbd55476e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1245,6 +1245,7 @@ struct intel_digital_port { bool tc_legacy_port:1; char tc_port_name[8]; enum tc_port_mode tc_mode; + enum phy_fia tc_phy_fia; void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, -- cgit v1.2.3 From 139ab811511ce46d29dd0a2a861d0ac0f04d2fe6 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 11 Jul 2019 22:57:06 -0700 Subject: drm/i915/tgl: add modular FIA to device info Tiger Lake has modular FIA bit indicating if we are using it, so add to the device info. Signed-off-by: Lucas De Marchi Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20190712055706.12143-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 95763ad92287..40076ba431d4 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -790,6 +790,7 @@ static const struct intel_device_info intel_tigerlake_12_info = { PLATFORM(INTEL_TIGERLAKE), .num_pipes = 4, .require_force_probe = 1, + .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; -- cgit v1.2.3 From cb823ed9915b0d4064f3f51e936fbe13c089948a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 20:29:53 +0100 Subject: drm/i915/gt: Use intel_gt as the primary object for handling resets Having taken the first step in encapsulating the functionality by moving the related files under gt/, the next step is to start encapsulating by passing around the relevant structs rather than the global drm_i915_private. In this step, we pass intel_gt to intel_reset.c Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190712192953.9187-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_display.c | 22 +- drivers/gpu/drm/i915/gem/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 8 +- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 25 +- drivers/gpu/drm/i915/gem/i915_gem_throttle.c | 2 +- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 20 +- .../drm/i915/gem/selftests/i915_gem_client_blt.c | 4 +- .../drm/i915/gem/selftests/i915_gem_coherency.c | 6 +- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 17 +- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 2 +- .../drm/i915/gem/selftests/i915_gem_object_blt.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine.h | 8 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 16 +- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 3 +- drivers/gpu/drm/i915/gt/intel_gt.c | 7 + drivers/gpu/drm/i915/gt/intel_gt.h | 12 + drivers/gpu/drm/i915/gt/intel_gt_pm.c | 22 +- drivers/gpu/drm/i915/gt/intel_gt_types.h | 12 + drivers/gpu/drm/i915/gt/intel_hangcheck.c | 68 +-- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 440 ++++++++++---------- drivers/gpu/drm/i915/gt/intel_reset.h | 73 ++-- drivers/gpu/drm/i915/gt/intel_reset_types.h | 50 +++ drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 461 +++++++++++---------- drivers/gpu/drm/i915/gt/selftest_lrc.c | 38 +- drivers/gpu/drm/i915/gt/selftest_reset.c | 97 +++-- drivers/gpu/drm/i915/gt/selftest_timeline.c | 3 +- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 33 +- drivers/gpu/drm/i915/i915_debugfs.c | 63 ++- drivers/gpu/drm/i915/i915_drv.c | 5 +- drivers/gpu/drm/i915/i915_drv.h | 35 +- drivers/gpu/drm/i915/i915_gem.c | 31 +- drivers/gpu/drm/i915/i915_gpu_error.h | 52 +-- drivers/gpu/drm/i915/i915_request.c | 5 +- drivers/gpu/drm/i915/i915_selftest.h | 9 + drivers/gpu/drm/i915/intel_guc_submission.c | 2 +- drivers/gpu/drm/i915/intel_uc.c | 2 +- drivers/gpu/drm/i915/selftests/i915_active.c | 3 +- drivers/gpu/drm/i915/selftests/i915_gem.c | 3 +- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 3 +- drivers/gpu/drm/i915/selftests/i915_request.c | 4 +- drivers/gpu/drm/i915/selftests/i915_selftest.c | 23 +- drivers/gpu/drm/i915/selftests/igt_flush_test.c | 5 +- drivers/gpu/drm/i915/selftests/igt_reset.c | 38 +- drivers/gpu/drm/i915/selftests/igt_reset.h | 10 +- drivers/gpu/drm/i915/selftests/igt_wedge_me.h | 58 --- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 5 - 49 files changed, 903 insertions(+), 914 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_reset_types.h delete mode 100644 drivers/gpu/drm/i915/selftests/igt_wedge_me.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 9758523f0837..e25b82d07d4f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4271,12 +4271,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) return; /* We have a modeset vs reset deadlock, defensively unbreak it. */ - set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); - wake_up_all(&dev_priv->gpu_error.wait_queue); + set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags); + smp_mb__after_atomic(); + wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET); if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(&dev_priv->gt); } /* @@ -4322,7 +4323,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) int ret; /* reset doesn't touch the display */ - if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags)) + if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) return; state = fetch_and_zero(&dev_priv->modeset_restore_state); @@ -4362,7 +4363,7 @@ unlock: drm_modeset_acquire_fini(ctx); mutex_unlock(&dev->mode_config.mutex); - clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); + clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); } static void icl_set_pipe_chicken(struct intel_crtc *crtc) @@ -13873,18 +13874,21 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat for (;;) { prepare_to_wait(&intel_state->commit_ready.wait, &wait_fence, TASK_UNINTERRUPTIBLE); - prepare_to_wait(&dev_priv->gpu_error.wait_queue, + prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags, + I915_RESET_MODESET), &wait_reset, TASK_UNINTERRUPTIBLE); - if (i915_sw_fence_done(&intel_state->commit_ready) - || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags)) + if (i915_sw_fence_done(&intel_state->commit_ready) || + test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags)) break; schedule(); } finish_wait(&intel_state->commit_ready.wait, &wait_fence); - finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset); + finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags, + I915_RESET_MODESET), + &wait_reset); } static void intel_atomic_cleanup_work(struct work_struct *work) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 078592912d97..c5f8bfa3f7b0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -2127,7 +2127,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) return -EINVAL; - ret = i915_terminally_wedged(i915); + ret = intel_gt_terminally_wedged(&i915->gt); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1c5dfbfad71b..8a2047c4e7c3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2130,7 +2130,7 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce) * ABI: Before userspace accesses the GPU (e.g. execbuffer), report * EIO if the GPU is already wedged. */ - err = i915_terminally_wedged(eb->i915); + err = intel_gt_terminally_wedged(ce->engine->gt); if (err) return err; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 391621ee3cbb..a564c1e4231b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -7,6 +7,8 @@ #include #include +#include "gt/intel_gt.h" + #include "i915_drv.h" #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" @@ -246,7 +248,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) wakeref = intel_runtime_pm_get(rpm); - srcu = i915_reset_trylock(i915); + srcu = intel_gt_reset_trylock(ggtt->vm.gt); if (srcu < 0) { ret = srcu; goto err_rpm; @@ -326,7 +328,7 @@ err_unpin: err_unlock: mutex_unlock(&dev->struct_mutex); err_reset: - i915_reset_unlock(i915, srcu); + intel_gt_reset_unlock(ggtt->vm.gt, srcu); err_rpm: intel_runtime_pm_put(rpm, wakeref); i915_gem_object_unpin_pages(obj); @@ -339,7 +341,7 @@ err: * fail). But any other -EIO isn't ours (e.g. swap in failure) * and so needs to be reported. */ - if (!i915_terminally_wedged(i915)) + if (!intel_gt_is_wedged(ggtt->vm.gt)) return VM_FAULT_SIGBUS; /* else: fall through */ case -EAGAIN: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 4d774376f5b8..9ee6edbad4c5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -5,6 +5,7 @@ */ #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "i915_drv.h" @@ -106,18 +107,18 @@ static int pm_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) +static bool switch_to_kernel_context_sync(struct intel_gt *gt) { - bool result = !i915_terminally_wedged(i915); + bool result = !intel_gt_is_wedged(gt); do { - if (i915_gem_wait_for_idle(i915, + if (i915_gem_wait_for_idle(gt->i915, I915_WAIT_LOCKED | I915_WAIT_FOR_IDLE_BOOST, I915_GEM_IDLE_TIMEOUT) == -ETIME) { /* XXX hide warning from gem_eio */ if (i915_modparams.reset) { - dev_err(i915->drm.dev, + dev_err(gt->i915->drm.dev, "Failed to idle engines, declaring wedged!\n"); GEM_TRACE_DUMP(); } @@ -126,18 +127,18 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915) * Forcibly cancel outstanding work and leave * the gpu quiet. */ - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); result = false; } - } while (i915_retire_requests(i915) && result); + } while (i915_retire_requests(gt->i915) && result); - GEM_BUG_ON(i915->gt.awake); + GEM_BUG_ON(gt->awake); return result; } bool i915_gem_load_power_context(struct drm_i915_private *i915) { - return switch_to_kernel_context_sync(i915); + return switch_to_kernel_context_sync(&i915->gt); } void i915_gem_suspend(struct drm_i915_private *i915) @@ -158,7 +159,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - switch_to_kernel_context_sync(i915); + switch_to_kernel_context_sync(&i915->gt); mutex_unlock(&i915->drm.struct_mutex); @@ -169,7 +170,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) GEM_BUG_ON(i915->gt.awake); flush_work(&i915->gem.idle_work); - cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&i915->gt.hangcheck.work); i915_gem_drain_freed_objects(i915); @@ -277,10 +278,10 @@ out_unlock: return; err_wedged: - if (!i915_reset_failed(i915)) { + if (!intel_gt_is_wedged(&i915->gt)) { dev_err(i915->drm.dev, "Failed to re-initialize GPU, declaring it wedged!\n"); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); } goto out_unlock; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index adb3074d9ce2..1e372420771b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -41,7 +41,7 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, long ret; /* ABI: return -EIO if already wedged */ - ret = i915_terminally_wedged(to_i915(dev)); + ret = intel_gt_terminally_wedged(&to_i915(dev)->gt); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 86eed4c3ae2b..6cbd4a668c9a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1753,7 +1753,7 @@ out_unlock: return err; } -int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) +int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_shrink_thp), @@ -1768,22 +1768,22 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) intel_wakeref_t wakeref; int err; - if (!HAS_PPGTT(dev_priv)) { + if (!HAS_PPGTT(i915)) { pr_info("PPGTT not supported, skipping live-selftests\n"); return 0; } - if (i915_terminally_wedged(dev_priv)) + if (intel_gt_is_wedged(&i915->gt)) return 0; - file = mock_file(dev_priv); + file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); - ctx = live_context(dev_priv, file); + ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_unlock; @@ -1795,10 +1795,10 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); - mock_file_free(dev_priv, file); + mock_file_free(i915, file); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index fa79233093eb..275c28926067 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -5,6 +5,8 @@ #include "i915_selftest.h" +#include "gt/intel_gt.h" + #include "selftests/igt_flush_test.h" #include "selftests/mock_drm.h" #include "mock_context.h" @@ -101,7 +103,7 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_client_fill), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; if (!HAS_ENGINE(i915, BCS0)) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 861f32be7d46..a1a4b53cdc4a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -6,6 +6,8 @@ #include +#include "gt/intel_gt.h" + #include "i915_selftest.h" #include "selftests/i915_random.h" @@ -242,12 +244,12 @@ static bool always_valid(struct drm_i915_private *i915) static bool needs_fence_registers(struct drm_i915_private *i915) { - return !i915_terminally_wedged(i915); + return !intel_gt_is_wedged(&i915->gt); } static bool needs_mi_store_dword(struct drm_i915_private *i915) { - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return false; if (!HAS_ENGINE(i915, RCS0)) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 695bfb18b0d4..db7856f0f31e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -7,6 +7,7 @@ #include #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "gt/intel_reset.h" #include "i915_selftest.h" @@ -83,7 +84,7 @@ static int live_nop_switch(void *arg) } if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Failed to populated %d contexts\n", nctx); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto out_unlock; } @@ -127,7 +128,7 @@ static int live_nop_switch(void *arg) if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Switching between %ld contexts timed out\n", prime); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); break; } @@ -956,7 +957,7 @@ __sseu_finish(struct drm_i915_private *i915, int ret = 0; if (flags & TEST_RESET) { - ret = i915_reset_engine(ce->engine, "sseu"); + ret = intel_engine_reset(ce->engine, "sseu"); if (ret) goto out; } @@ -1059,7 +1060,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, return PTR_ERR(file); if (flags & TEST_RESET) - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); mutex_lock(&i915->drm.struct_mutex); @@ -1120,7 +1121,7 @@ out_unlock: mutex_unlock(&i915->drm.struct_mutex); if (flags & TEST_RESET) - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); mock_file_free(i915, file); @@ -1722,7 +1723,7 @@ int i915_gem_context_mock_selftests(void) return err; } -int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) +int i915_gem_context_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_nop_switch), @@ -1733,8 +1734,8 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) SUBTEST(igt_vm_isolation), }; - if (i915_terminally_wedged(dev_priv)) + if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_live_subtests(tests, dev_priv); + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 5635cbb4af22..01857c12f12f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -478,7 +478,7 @@ static int igt_mmap_offset_exhaustion(void *arg) /* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) break; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c index 11d37238c62c..19843acc84d3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include "gt/intel_gt.h" + #include "i915_selftest.h" #include "selftests/igt_flush_test.h" @@ -95,7 +97,7 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_fill_blt), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; if (!HAS_ENGINE(i915, BCS0)) diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 0331e9ac2485..db5c73ce86ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -411,8 +411,8 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) return cs; } -static inline void intel_engine_reset(struct intel_engine_cs *engine, - bool stalled) +static inline void __intel_engine_reset(struct intel_engine_cs *engine, + bool stalled) { if (engine->reset.reset) engine->reset.reset(engine, stalled); @@ -420,9 +420,9 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine, } bool intel_engine_is_idle(struct intel_engine_cs *engine); -bool intel_engines_are_idle(struct drm_i915_private *dev_priv); +bool intel_engines_are_idle(struct intel_gt *gt); -void intel_engines_reset_default_submission(struct drm_i915_private *i915); +void intel_engines_reset_default_submission(struct intel_gt *gt); unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915); bool intel_engine_can_store_dword(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 2dc1917b9d30..022e00eb79ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1138,7 +1138,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) bool intel_engine_is_idle(struct intel_engine_cs *engine) { /* More white lies, if wedged, hw state is inconsistent */ - if (i915_reset_failed(engine->i915)) + if (intel_gt_is_wedged(engine->gt)) return true; if (!intel_engine_pm_is_awake(engine)) @@ -1174,7 +1174,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return ring_is_idle(engine); } -bool intel_engines_are_idle(struct drm_i915_private *i915) +bool intel_engines_are_idle(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1183,14 +1183,14 @@ bool intel_engines_are_idle(struct drm_i915_private *i915) * If the driver is wedged, HW state may be very inconsistent and * report that it is still busy, even though we have stopped using it. */ - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return true; /* Already parked (and passed an idleness test); must still be idle */ - if (!READ_ONCE(i915->gt.awake)) + if (!READ_ONCE(gt->awake)) return true; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { if (!intel_engine_is_idle(engine)) return false; } @@ -1198,12 +1198,12 @@ bool intel_engines_are_idle(struct drm_i915_private *i915) return true; } -void intel_engines_reset_default_submission(struct drm_i915_private *i915) +void intel_engines_reset_default_submission(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) engine->set_default_submission(engine); } @@ -1480,7 +1480,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, va_end(ap); } - if (i915_reset_failed(engine->i915)) + if (intel_gt_is_wedged(engine->gt)) drm_printf(m, "*** WEDGED ***\n"); drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 84e432abe8e0..e74fbf04a68d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -8,6 +8,7 @@ #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_gt_pm.h" static int __engine_unpark(struct intel_wakeref *wf) @@ -66,7 +67,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) return true; /* GPU is pointing to the void, as good as in the kernel context. */ - if (i915_reset_failed(engine->i915)) + if (intel_gt_is_wedged(engine->gt)) return true; /* diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 8cca6b22b386..f7e69db4019d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -19,6 +19,8 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) spin_lock_init(>->closed_lock); + intel_gt_init_hangcheck(gt); + intel_gt_init_reset(gt); intel_gt_pm_init_early(gt); } @@ -241,3 +243,8 @@ void intel_gt_fini_scratch(struct intel_gt *gt) { i915_vma_unpin_and_release(>->scratch, 0); } + +void intel_gt_cleanup_early(struct intel_gt *gt) +{ + intel_gt_fini_reset(gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 1093dcf36f63..49c0085385a0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -8,12 +8,15 @@ #include "intel_engine_types.h" #include "intel_gt_types.h" +#include "intel_reset.h" struct drm_i915_private; void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_hw(struct drm_i915_private *i915); +void intel_gt_cleanup_early(struct intel_gt *gt); + void intel_gt_check_and_clear_faults(struct intel_gt *gt); void intel_gt_clear_error_registers(struct intel_gt *gt, intel_engine_mask_t engine_mask); @@ -21,6 +24,8 @@ void intel_gt_clear_error_registers(struct intel_gt *gt, void intel_gt_flush_ggtt_writes(struct intel_gt *gt); void intel_gt_chipset_flush(struct intel_gt *gt); +void intel_gt_init_hangcheck(struct intel_gt *gt); + int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size); void intel_gt_fini_scratch(struct intel_gt *gt); @@ -30,4 +35,11 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, return i915_ggtt_offset(gt->scratch) + field; } +static inline bool intel_gt_is_wedged(struct intel_gt *gt) +{ + return __intel_reset_failed(>->reset); +} + +void intel_gt_queue_hangcheck(struct intel_gt *gt); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index da81b3a92d16..61ed912341f1 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -5,7 +5,9 @@ */ #include "i915_drv.h" +#include "i915_params.h" #include "intel_engine_pm.h" +#include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_pm.h" #include "intel_wakeref.h" @@ -17,8 +19,8 @@ static void pm_notify(struct drm_i915_private *i915, int state) static int intel_gt_unpark(struct intel_wakeref *wf) { - struct drm_i915_private *i915 = - container_of(wf, typeof(*i915), gt.wakeref); + struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); + struct drm_i915_private *i915 = gt->i915; GEM_TRACE("\n"); @@ -33,8 +35,8 @@ static int intel_gt_unpark(struct intel_wakeref *wf) * Work around it by grabbing a GT IRQ power domain whilst there is any * GT activity, preventing any DC state transitions. */ - i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); - GEM_BUG_ON(!i915->gt.awake); + gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); + GEM_BUG_ON(!gt->awake); intel_enable_gt_powersave(i915); @@ -44,7 +46,7 @@ static int intel_gt_unpark(struct intel_wakeref *wf) i915_pmu_gt_unparked(i915); - i915_queue_hangcheck(i915); + intel_gt_queue_hangcheck(gt); pm_notify(i915, INTEL_GT_UNPARK); @@ -91,12 +93,12 @@ void intel_gt_pm_init_early(struct intel_gt *gt) BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications); } -static bool reset_engines(struct drm_i915_private *i915) +static bool reset_engines(struct intel_gt *gt) { - if (INTEL_INFO(i915)->gpu_reset_clobbers_display) + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) return false; - return intel_gpu_reset(i915, ALL_ENGINES) == 0; + return __intel_gt_reset(gt, ALL_ENGINES) == 0; } /** @@ -116,11 +118,11 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) GEM_TRACE("\n"); - if (!reset_engines(gt->i915) && !force) + if (!reset_engines(gt) && !force) return; for_each_engine(engine, gt->i915, id) - intel_engine_reset(engine, false); + __intel_engine_reset(engine, false); } int intel_gt_resume(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 3563ce970102..1ffbc3ec6ef3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -14,12 +14,21 @@ #include #include "i915_vma.h" +#include "intel_reset_types.h" #include "intel_wakeref.h" struct drm_i915_private; struct i915_ggtt; struct intel_uncore; +struct intel_hangcheck { + /* For hangcheck timer */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ +#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) + + struct delayed_work work; +}; + struct intel_gt { struct drm_i915_private *i915; struct intel_uncore *uncore; @@ -41,6 +50,9 @@ struct intel_gt { struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ + struct intel_hangcheck hangcheck; + struct intel_reset reset; + /** * Is the GPU currently considered idle, or busy executing * userspace requests? Whilst idle, we allow runtime power diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 797d8ef0969c..05d042cdefe2 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -22,8 +22,10 @@ * */ -#include "intel_reset.h" #include "i915_drv.h" +#include "intel_engine.h" +#include "intel_gt.h" +#include "intel_reset.h" struct hangcheck { u64 acthd; @@ -100,7 +102,6 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd) static enum intel_engine_hangcheck_action engine_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_i915_private *dev_priv = engine->i915; enum intel_engine_hangcheck_action ha; u32 tmp; @@ -108,7 +109,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) if (ha != ENGINE_DEAD) return ha; - if (IS_GEN(dev_priv, 2)) + if (IS_GEN(engine->i915, 2)) return ENGINE_DEAD; /* Is the chip hanging on a WAIT_FOR_EVENT? @@ -118,8 +119,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) */ tmp = ENGINE_READ(engine, RING_CTL); if (tmp & RING_WAIT) { - i915_handle_error(dev_priv, engine->mask, 0, - "stuck wait on %s", engine->name); + intel_gt_handle_error(engine->gt, engine->mask, 0, + "stuck wait on %s", engine->name); ENGINE_WRITE(engine, RING_CTL, tmp); return ENGINE_WAIT_KICK; } @@ -219,7 +220,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, I915_ENGINE_WEDGED_TIMEOUT); } -static void hangcheck_declare_hang(struct drm_i915_private *i915, +static void hangcheck_declare_hang(struct intel_gt *gt, intel_engine_mask_t hung, intel_engine_mask_t stuck) { @@ -235,12 +236,12 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, hung &= ~stuck; len = scnprintf(msg, sizeof(msg), "%s on ", stuck == hung ? "no progress" : "hang"); - for_each_engine_masked(engine, i915, hung, tmp) + for_each_engine_masked(engine, gt->i915, hung, tmp) len += scnprintf(msg + len, sizeof(msg) - len, "%s, ", engine->name); msg[len-2] = '\0'; - return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg); + return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg); } /* @@ -251,11 +252,10 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, * we kick the ring. If we see no progress on three subsequent calls * we assume chip is wedged and try to fix it by resetting the chip. */ -static void i915_hangcheck_elapsed(struct work_struct *work) +static void hangcheck_elapsed(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), - gpu_error.hangcheck_work.work); + struct intel_gt *gt = + container_of(work, typeof(*gt), hangcheck.work.work); intel_engine_mask_t hung = 0, stuck = 0, wedged = 0; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -264,13 +264,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!i915_modparams.enable_hangcheck) return; - if (!READ_ONCE(dev_priv->gt.awake)) + if (!READ_ONCE(gt->awake)) return; - if (i915_terminally_wedged(dev_priv)) + if (intel_gt_is_wedged(gt)) return; - wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); + wakeref = intel_runtime_pm_get_if_in_use(>->i915->runtime_pm); if (!wakeref) return; @@ -278,9 +278,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) * periodically arm the mmio checker to see if we are triggering * any invalid access. */ - intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); + intel_uncore_arm_unclaimed_mmio_detection(gt->uncore); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, gt->i915, id) { struct hangcheck hc; intel_engine_signal_breadcrumbs(engine); @@ -302,7 +302,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (GEM_SHOW_DEBUG() && (hung | stuck)) { struct drm_printer p = drm_debug_printer("hangcheck"); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, gt->i915, id) { if (intel_engine_is_idle(engine)) continue; @@ -311,20 +311,37 @@ static void i915_hangcheck_elapsed(struct work_struct *work) } if (wedged) { - dev_err(dev_priv->drm.dev, + dev_err(gt->i915->drm.dev, "GPU recovery timed out," " cancelling all in-flight rendering.\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(gt); } if (hung) - hangcheck_declare_hang(dev_priv, hung, stuck); + hangcheck_declare_hang(gt, hung, stuck); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); /* Reset timer in case GPU hangs without another request being added */ - i915_queue_hangcheck(dev_priv); + intel_gt_queue_hangcheck(gt); +} + +void intel_gt_queue_hangcheck(struct intel_gt *gt) +{ + unsigned long delay; + + if (unlikely(!i915_modparams.enable_hangcheck)) + return; + + /* + * Don't continually defer the hangcheck so that it is always run at + * least once after work has been scheduled on any ring. Otherwise, + * we will ignore a hung ring if a second ring is kept busy. + */ + + delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); + queue_delayed_work(system_long_wq, >->hangcheck.work, delay); } void intel_engine_init_hangcheck(struct intel_engine_cs *engine) @@ -333,10 +350,9 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine) engine->hangcheck.action_timestamp = jiffies; } -void intel_hangcheck_init(struct drm_i915_private *i915) +void intel_gt_init_hangcheck(struct intel_gt *gt) { - INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work, - i915_hangcheck_elapsed); + INIT_DELAYED_WORK(>->hangcheck.work, hangcheck_elapsed); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 270ef417dd1a..9e0992498087 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2293,7 +2293,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * and have to at least restore the RING register in the context * image back to the expected values to skip over the guilty request. */ - i915_reset_request(rq, stalled); + __i915_request_reset(rq, stalled); if (!stalled) goto out_replay; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index adfdb908587f..7b150ec3d10a 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -114,7 +114,7 @@ static void context_mark_innocent(struct i915_gem_context *ctx) atomic_inc(&ctx->active_count); } -void i915_reset_request(struct i915_request *rq, bool guilty) +void __i915_request_reset(struct i915_request *rq, bool guilty) { GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", rq->engine->name, @@ -164,16 +164,15 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) intel_uncore_read_fw(uncore, RING_HEAD(base))); } -static void i915_stop_engines(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask) +static void stop_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask) { struct intel_engine_cs *engine; intel_engine_mask_t tmp; - if (INTEL_GEN(i915) < 3) + if (INTEL_GEN(gt->i915) < 3) return; - for_each_engine_masked(engine, i915, engine_mask, tmp) + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) gen3_stop_engine(engine); } @@ -185,11 +184,11 @@ static bool i915_in_reset(struct pci_dev *pdev) return gdrst & GRDOM_RESET_STATUS; } -static int i915_do_reset(struct drm_i915_private *i915, +static int i915_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = gt->i915->drm.pdev; int err; /* Assert reset for at least 20 usec, and wait for acknowledgement. */ @@ -214,22 +213,22 @@ static bool g4x_reset_complete(struct pci_dev *pdev) return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_i915_private *i915, +static int g33_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = i915->drm.pdev; + struct pci_dev *pdev = gt->i915->drm.pdev; pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); return wait_for_atomic(g4x_reset_complete(pdev), 50); } -static int g4x_do_reset(struct drm_i915_private *i915, +static int g4x_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct pci_dev *pdev = i915->drm.pdev; - struct intel_uncore *uncore = &i915->uncore; + struct pci_dev *pdev = gt->i915->drm.pdev; + struct intel_uncore *uncore = gt->uncore; int ret; /* WaVcpClkGateDisableForMediaReset:ctg,elk */ @@ -261,11 +260,11 @@ out: return ret; } -static int ironlake_do_reset(struct drm_i915_private *i915, +static int ironlake_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->uncore; int ret; intel_uncore_write_fw(uncore, ILK_GDSR, @@ -297,10 +296,9 @@ out: } /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ -static int gen6_hw_domain_reset(struct drm_i915_private *i915, - u32 hw_domain_mask) +static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->uncore; int err; /* @@ -322,7 +320,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *i915, return err; } -static int gen6_reset_engines(struct drm_i915_private *i915, +static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { @@ -342,13 +340,13 @@ static int gen6_reset_engines(struct drm_i915_private *i915, intel_engine_mask_t tmp; hw_mask = 0; - for_each_engine_masked(engine, i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; } } - return gen6_hw_domain_reset(i915, hw_mask); + return gen6_hw_domain_reset(gt, hw_mask); } static u32 gen11_lock_sfc(struct intel_engine_cs *engine) @@ -446,7 +444,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine) rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); } -static int gen11_reset_engines(struct drm_i915_private *i915, +static int gen11_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { @@ -469,17 +467,17 @@ static int gen11_reset_engines(struct drm_i915_private *i915, hw_mask = GEN11_GRDOM_FULL; } else { hw_mask = 0; - for_each_engine_masked(engine, i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; hw_mask |= gen11_lock_sfc(engine); } } - ret = gen6_hw_domain_reset(i915, hw_mask); + ret = gen6_hw_domain_reset(gt, hw_mask); if (engine_mask != ALL_ENGINES) - for_each_engine_masked(engine, i915, engine_mask, tmp) + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) gen11_unlock_sfc(engine); return ret; @@ -529,7 +527,7 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); } -static int gen8_reset_engines(struct drm_i915_private *i915, +static int gen8_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { @@ -538,7 +536,7 @@ static int gen8_reset_engines(struct drm_i915_private *i915, intel_engine_mask_t tmp; int ret; - for_each_engine_masked(engine, i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { ret = gen8_engine_reset_prepare(engine); if (ret && !reset_non_ready) goto skip_reset; @@ -554,23 +552,23 @@ static int gen8_reset_engines(struct drm_i915_private *i915, * We rather take context corruption instead of * failed reset with a wedged driver/gpu. And * active bb execution case should be covered by - * i915_stop_engines we have before the reset. + * stop_engines() we have before the reset. */ } - if (INTEL_GEN(i915) >= 11) - ret = gen11_reset_engines(i915, engine_mask, retry); + if (INTEL_GEN(gt->i915) >= 11) + ret = gen11_reset_engines(gt, engine_mask, retry); else - ret = gen6_reset_engines(i915, engine_mask, retry); + ret = gen6_reset_engines(gt, engine_mask, retry); skip_reset: - for_each_engine_masked(engine, i915, engine_mask, tmp) + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) gen8_engine_reset_cancel(engine); return ret; } -typedef int (*reset_func)(struct drm_i915_private *, +typedef int (*reset_func)(struct intel_gt *, intel_engine_mask_t engine_mask, unsigned int retry); @@ -592,15 +590,14 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) return NULL; } -int intel_gpu_reset(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask) +int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) { const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; reset_func reset; int ret = -ETIMEDOUT; int retry; - reset = intel_get_gpu_reset(i915); + reset = intel_get_gpu_reset(gt->i915); if (!reset) return -ENODEV; @@ -608,7 +605,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, * If the power well sleeps during the reset, the reset * request may be dropped and never completes (causing -EIO). */ - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { /* * We stop engines, otherwise we might get failed reset and a @@ -625,14 +622,14 @@ int intel_gpu_reset(struct drm_i915_private *i915, * FIXME: Wa for more modern gens needs to be validated */ if (retry) - i915_stop_engines(i915, engine_mask); + stop_engines(gt, engine_mask); GEM_TRACE("engine_mask=%x\n", engine_mask); preempt_disable(); - ret = reset(i915, engine_mask, retry); + ret = reset(gt, engine_mask, retry); preempt_enable(); } - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); return ret; } @@ -650,17 +647,17 @@ bool intel_has_reset_engine(struct drm_i915_private *i915) return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2; } -int intel_reset_guc(struct drm_i915_private *i915) +int intel_reset_guc(struct intel_gt *gt) { u32 guc_domain = - INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; + INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; int ret; - GEM_BUG_ON(!HAS_GUC(i915)); + GEM_BUG_ON(!HAS_GUC(gt->i915)); - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - ret = gen6_hw_domain_reset(i915, guc_domain); - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + ret = gen6_hw_domain_reset(gt, guc_domain); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); return ret; } @@ -682,56 +679,55 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) engine->reset.prepare(engine); } -static void revoke_mmaps(struct drm_i915_private *i915) +static void revoke_mmaps(struct intel_gt *gt) { int i; - for (i = 0; i < i915->ggtt.num_fences; i++) { + for (i = 0; i < gt->ggtt->num_fences; i++) { struct drm_vma_offset_node *node; struct i915_vma *vma; u64 vma_offset; - vma = READ_ONCE(i915->ggtt.fence_regs[i].vma); + vma = READ_ONCE(gt->ggtt->fence_regs[i].vma); if (!vma) continue; if (!i915_vma_has_userfault(vma)) continue; - GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]); + GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]); node = &vma->obj->base.vma_node; vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; - unmap_mapping_range(i915->drm.anon_inode->i_mapping, + unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping, drm_vma_node_offset_addr(node) + vma_offset, vma->size, 1); } } -static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915) +static intel_engine_mask_t reset_prepare(struct intel_gt *gt) { struct intel_engine_cs *engine; intel_engine_mask_t awake = 0; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { if (intel_engine_pm_get_if_awake(engine)) awake |= engine->mask; reset_prepare_engine(engine); } - intel_uc_reset_prepare(i915); + intel_uc_reset_prepare(gt->i915); return awake; } -static void gt_revoke(struct drm_i915_private *i915) +static void gt_revoke(struct intel_gt *gt) { - revoke_mmaps(i915); + revoke_mmaps(gt); } -static int gt_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask) +static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -741,14 +737,14 @@ static int gt_reset(struct drm_i915_private *i915, * Everything depends on having the GTT running, so we need to start * there. */ - err = i915_ggtt_enable_hw(i915); + err = i915_ggtt_enable_hw(gt->i915); if (err) return err; - for_each_engine(engine, i915, id) - intel_engine_reset(engine, stalled_mask & engine->mask); + for_each_engine(engine, gt->i915, id) + __intel_engine_reset(engine, stalled_mask & engine->mask); - i915_gem_restore_fences(i915); + i915_gem_restore_fences(gt->i915); return err; } @@ -761,13 +757,12 @@ static void reset_finish_engine(struct intel_engine_cs *engine) intel_engine_signal_breadcrumbs(engine); } -static void reset_finish(struct drm_i915_private *i915, - intel_engine_mask_t awake) +static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { reset_finish_engine(engine); if (awake & engine->mask) intel_engine_pm_put(engine); @@ -791,20 +786,19 @@ static void nop_submit_request(struct i915_request *request) intel_engine_queue_breadcrumbs(engine); } -static void __i915_gem_set_wedged(struct drm_i915_private *i915) +static void __intel_gt_set_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; intel_engine_mask_t awake; enum intel_engine_id id; - if (test_bit(I915_WEDGED, &error->flags)) + if (test_bit(I915_WEDGED, >->reset.flags)) return; - if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { + if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) { struct drm_printer p = drm_debug_printer(__func__); - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) intel_engine_dump(engine, &p, "%s\n", engine->name); } @@ -815,17 +809,17 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - awake = reset_prepare(i915); + awake = reset_prepare(gt); /* Even if the GPU reset fails, it should still stop the engines */ - if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) - intel_gpu_reset(i915, ALL_ENGINES); + if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + __intel_gt_reset(gt, ALL_ENGINES); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { engine->submit_request = nop_submit_request; engine->schedule = NULL; } - i915->caps.scheduler = 0; + gt->i915->caps.scheduler = 0; /* * Make sure no request can slip through without getting completed by @@ -833,37 +827,36 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) * in nop_submit_request. */ synchronize_rcu_expedited(); - set_bit(I915_WEDGED, &error->flags); + set_bit(I915_WEDGED, >->reset.flags); /* Mark all executing requests as skipped */ - for_each_engine(engine, i915, id) + for_each_engine(engine, gt->i915, id) engine->cancel_requests(engine); - reset_finish(i915, awake); + reset_finish(gt, awake); GEM_TRACE("end\n"); } -void i915_gem_set_wedged(struct drm_i915_private *i915) +void intel_gt_set_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; intel_wakeref_t wakeref; - mutex_lock(&error->wedge_mutex); - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - __i915_gem_set_wedged(i915); - mutex_unlock(&error->wedge_mutex); + mutex_lock(>->reset.mutex); + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + __intel_gt_set_wedged(gt); + mutex_unlock(>->reset.mutex); } -static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) +static bool __intel_gt_unset_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; + struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl; - if (!test_bit(I915_WEDGED, &error->flags)) + if (!test_bit(I915_WEDGED, >->reset.flags)) return true; - if (!i915->gt.scratch) /* Never full initialised, recovery impossible */ + if (!gt->scratch) /* Never full initialised, recovery impossible */ return false; GEM_TRACE("start\n"); @@ -878,8 +871,8 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) * * No more can be submitted until we reset the wedged bit. */ - mutex_lock(&i915->gt.timelines.mutex); - list_for_each_entry(tl, &i915->gt.timelines.active_list, link) { + mutex_lock(&timelines->mutex); + list_for_each_entry(tl, &timelines->active_list, link) { struct i915_request *rq; rq = i915_active_request_get_unlocked(&tl->last_request); @@ -896,9 +889,9 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); i915_request_put(rq); } - mutex_unlock(&i915->gt.timelines.mutex); + mutex_unlock(&timelines->mutex); - intel_gt_sanitize(&i915->gt, false); + intel_gt_sanitize(gt, false); /* * Undo nop_submit_request. We prevent all new i915 requests from @@ -909,53 +902,51 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) * the nop_submit_request on reset, we can do this from normal * context and do not require stop_machine(). */ - intel_engines_reset_default_submission(i915); + intel_engines_reset_default_submission(gt); GEM_TRACE("end\n"); smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ - clear_bit(I915_WEDGED, &i915->gpu_error.flags); + clear_bit(I915_WEDGED, >->reset.flags); return true; } -bool i915_gem_unset_wedged(struct drm_i915_private *i915) +bool intel_gt_unset_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; bool result; - mutex_lock(&error->wedge_mutex); - result = __i915_gem_unset_wedged(i915); - mutex_unlock(&error->wedge_mutex); + mutex_lock(>->reset.mutex); + result = __intel_gt_unset_wedged(gt); + mutex_unlock(>->reset.mutex); return result; } -static int do_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask) +static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) { int err, i; - gt_revoke(i915); + gt_revoke(gt); - err = intel_gpu_reset(i915, ALL_ENGINES); + err = __intel_gt_reset(gt, ALL_ENGINES); for (i = 0; err && i < RESET_MAX_RETRIES; i++) { msleep(10 * (i + 1)); - err = intel_gpu_reset(i915, ALL_ENGINES); + err = __intel_gt_reset(gt, ALL_ENGINES); } if (err) return err; - return gt_reset(i915, stalled_mask); + return gt_reset(gt, stalled_mask); } -static int resume(struct drm_i915_private *i915) +static int resume(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; int ret; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { ret = engine->resume(engine); if (ret) return ret; @@ -965,8 +956,8 @@ static int resume(struct drm_i915_private *i915) } /** - * i915_reset - reset chip after a hang - * @i915: #drm_i915_private to reset + * intel_gt_reset - reset chip after a hang + * @gt: #intel_gt to reset * @stalled_mask: mask of the stalled engines with the guilty requests * @reason: user error message for why we are resetting * @@ -981,50 +972,50 @@ static int resume(struct drm_i915_private *i915) * - re-init interrupt state * - re-init display */ -void i915_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask, - const char *reason) +void intel_gt_reset(struct intel_gt *gt, + intel_engine_mask_t stalled_mask, + const char *reason) { - struct i915_gpu_error *error = &i915->gpu_error; intel_engine_mask_t awake; int ret; - GEM_TRACE("flags=%lx\n", error->flags); + GEM_TRACE("flags=%lx\n", gt->reset.flags); might_sleep(); - GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); - mutex_lock(&error->wedge_mutex); + GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags)); + mutex_lock(>->reset.mutex); /* Clear any previous failed attempts at recovery. Time to try again. */ - if (!__i915_gem_unset_wedged(i915)) + if (!__intel_gt_unset_wedged(gt)) goto unlock; if (reason) - dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); - error->reset_count++; + dev_notice(gt->i915->drm.dev, + "Resetting chip for %s\n", reason); + atomic_inc(>->i915->gpu_error.reset_count); - awake = reset_prepare(i915); + awake = reset_prepare(gt); - if (!intel_has_gpu_reset(i915)) { + if (!intel_has_gpu_reset(gt->i915)) { if (i915_modparams.reset) - dev_err(i915->drm.dev, "GPU reset not supported\n"); + dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); else DRM_DEBUG_DRIVER("GPU reset disabled\n"); goto error; } - if (INTEL_INFO(i915)->gpu_reset_clobbers_display) - intel_runtime_pm_disable_interrupts(i915); + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + intel_runtime_pm_disable_interrupts(gt->i915); - if (do_reset(i915, stalled_mask)) { - dev_err(i915->drm.dev, "Failed to reset chip\n"); + if (do_reset(gt, stalled_mask)) { + dev_err(gt->i915->drm.dev, "Failed to reset chip\n"); goto taint; } - if (INTEL_INFO(i915)->gpu_reset_clobbers_display) - intel_runtime_pm_enable_interrupts(i915); + if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + intel_runtime_pm_enable_interrupts(gt->i915); - intel_overlay_reset(i915); + intel_overlay_reset(gt->i915); /* * Next we need to restore the context, but we don't use those @@ -1034,23 +1025,23 @@ void i915_reset(struct drm_i915_private *i915, * was running at the time of the reset (i.e. we weren't VT * switched away). */ - ret = i915_gem_init_hw(i915); + ret = i915_gem_init_hw(gt->i915); if (ret) { DRM_ERROR("Failed to initialise HW following reset (%d)\n", ret); goto taint; } - ret = resume(i915); + ret = resume(gt); if (ret) goto taint; - i915_queue_hangcheck(i915); + intel_gt_queue_hangcheck(gt); finish: - reset_finish(i915, awake); + reset_finish(gt, awake); unlock: - mutex_unlock(&error->wedge_mutex); + mutex_unlock(>->reset.mutex); return; taint: @@ -1068,18 +1059,17 @@ taint: */ add_taint_for_CI(TAINT_WARN); error: - __i915_gem_set_wedged(i915); + __intel_gt_set_wedged(gt); goto finish; } -static inline int intel_gt_reset_engine(struct drm_i915_private *i915, - struct intel_engine_cs *engine) +static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) { - return intel_gpu_reset(i915, engine->mask); + return __intel_gt_reset(engine->gt, engine->mask); } /** - * i915_reset_engine - reset GPU engine to recover from a hang + * intel_engine_reset - reset GPU engine to recover from a hang * @engine: engine to reset * @msg: reason for GPU reset; or NULL for no dev_notice() * @@ -1091,13 +1081,13 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *i915, * - reset engine (which will force the engine to idle) * - re-init/configure engine */ -int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) +int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) { - struct i915_gpu_error *error = &engine->i915->gpu_error; + struct intel_gt *gt = engine->gt; int ret; - GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); - GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); + GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags); + GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)); if (!intel_engine_pm_get_if_awake(engine)) return 0; @@ -1107,10 +1097,10 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) if (msg) dev_notice(engine->i915->drm.dev, "Resetting %s for %s\n", engine->name, msg); - error->reset_engine_count[engine->id]++; + atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); if (!engine->i915->guc.execbuf_client) - ret = intel_gt_reset_engine(engine->i915, engine); + ret = intel_gt_reset_engine(engine); else ret = intel_guc_reset_engine(&engine->i915->guc, engine); if (ret) { @@ -1126,7 +1116,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) * active request and can drop it, adjust head to skip the offending * request to resume executing remaining requests in the queue. */ - intel_engine_reset(engine, true); + __intel_engine_reset(engine, true); /* * The engine and its registers (and workarounds in case of render) @@ -1142,16 +1132,15 @@ out: return ret; } -static void i915_reset_device(struct drm_i915_private *i915, - u32 engine_mask, - const char *reason) +static void intel_gt_reset_global(struct intel_gt *gt, + u32 engine_mask, + const char *reason) { - struct i915_gpu_error *error = &i915->gpu_error; - struct kobject *kobj = &i915->drm.primary->kdev->kobj; + struct kobject *kobj = >->i915->drm.primary->kdev->kobj; char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; - struct i915_wedge_me w; + struct intel_wedge_me w; kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); @@ -1159,24 +1148,24 @@ static void i915_reset_device(struct drm_i915_private *i915, kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* Use a watchdog to ensure that our reset completes */ - i915_wedge_on_timeout(&w, i915, 5 * HZ) { - intel_prepare_reset(i915); + intel_wedge_on_timeout(&w, gt, 5 * HZ) { + intel_prepare_reset(gt->i915); /* Flush everyone using a resource about to be clobbered */ - synchronize_srcu_expedited(&error->reset_backoff_srcu); + synchronize_srcu_expedited(>->reset.backoff_srcu); - i915_reset(i915, engine_mask, reason); + intel_gt_reset(gt, engine_mask, reason); - intel_finish_reset(i915); + intel_finish_reset(gt->i915); } - if (!test_bit(I915_WEDGED, &error->flags)) + if (!test_bit(I915_WEDGED, >->reset.flags)) kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } /** - * i915_handle_error - handle a gpu error - * @i915: i915 device private + * intel_gt_handle_error - handle a gpu error + * @gt: the intel_gt * @engine_mask: mask representing engines that are hung * @flags: control flags * @fmt: Error message format string @@ -1187,12 +1176,11 @@ static void i915_reset_device(struct drm_i915_private *i915, * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -void i915_handle_error(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask, - unsigned long flags, - const char *fmt, ...) +void intel_gt_handle_error(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...) { - struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; intel_wakeref_t wakeref; intel_engine_mask_t tmp; @@ -1216,33 +1204,31 @@ void i915_handle_error(struct drm_i915_private *i915, * isn't the case at least when we get here by doing a * simulated reset via debugfs, so get an RPM reference. */ - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); - engine_mask &= INTEL_INFO(i915)->engine_mask; + engine_mask &= INTEL_INFO(gt->i915)->engine_mask; if (flags & I915_ERROR_CAPTURE) { - i915_capture_error_state(i915, engine_mask, msg); - intel_gt_clear_error_registers(&i915->gt, engine_mask); + i915_capture_error_state(gt->i915, engine_mask, msg); + intel_gt_clear_error_registers(gt, engine_mask); } /* * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(i915) && !__i915_wedged(error)) { - for_each_engine_masked(engine, i915, engine_mask, tmp) { + if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) { + for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &error->flags)) + >->reset.flags)) continue; - if (i915_reset_engine(engine, msg) == 0) + if (intel_engine_reset(engine, msg) == 0) engine_mask &= ~engine->mask; - clear_bit(I915_RESET_ENGINE + engine->id, - &error->flags); - wake_up_bit(&error->flags, - I915_RESET_ENGINE + engine->id); + clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags); } } @@ -1250,9 +1236,9 @@ void i915_handle_error(struct drm_i915_private *i915, goto out; /* Full reset needs the mutex, stop any other user trying to do so. */ - if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) { - wait_event(error->reset_queue, - !test_bit(I915_RESET_BACKOFF, &error->flags)); + if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) { + wait_event(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, >->reset.flags)); goto out; /* piggy-back on the other reset */ } @@ -1260,113 +1246,119 @@ void i915_handle_error(struct drm_i915_private *i915, synchronize_rcu_expedited(); /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, i915, tmp) { + for_each_engine(engine, gt->i915, tmp) { while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &error->flags)) - wait_on_bit(&error->flags, + >->reset.flags)) + wait_on_bit(>->reset.flags, I915_RESET_ENGINE + engine->id, TASK_UNINTERRUPTIBLE); } - i915_reset_device(i915, engine_mask, msg); + intel_gt_reset_global(gt, engine_mask, msg); - for_each_engine(engine, i915, tmp) { - clear_bit(I915_RESET_ENGINE + engine->id, - &error->flags); - } - - clear_bit(I915_RESET_BACKOFF, &error->flags); - wake_up_all(&error->reset_queue); + for_each_engine(engine, gt->i915, tmp) + clear_bit_unlock(I915_RESET_ENGINE + engine->id, + >->reset.flags); + clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); + smp_mb__after_atomic(); + wake_up_all(>->reset.queue); out: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); } -int i915_reset_trylock(struct drm_i915_private *i915) +int intel_gt_reset_trylock(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; int srcu; - might_lock(&error->reset_backoff_srcu); + might_lock(>->reset.backoff_srcu); might_sleep(); rcu_read_lock(); - while (test_bit(I915_RESET_BACKOFF, &error->flags)) { + while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) { rcu_read_unlock(); - if (wait_event_interruptible(error->reset_queue, + if (wait_event_interruptible(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, - &error->flags))) + >->reset.flags))) return -EINTR; rcu_read_lock(); } - srcu = srcu_read_lock(&error->reset_backoff_srcu); + srcu = srcu_read_lock(>->reset.backoff_srcu); rcu_read_unlock(); return srcu; } -void i915_reset_unlock(struct drm_i915_private *i915, int tag) -__releases(&i915->gpu_error.reset_backoff_srcu) +void intel_gt_reset_unlock(struct intel_gt *gt, int tag) +__releases(>->reset.backoff_srcu) { - struct i915_gpu_error *error = &i915->gpu_error; - - srcu_read_unlock(&error->reset_backoff_srcu, tag); + srcu_read_unlock(>->reset.backoff_srcu, tag); } -int i915_terminally_wedged(struct drm_i915_private *i915) +int intel_gt_terminally_wedged(struct intel_gt *gt) { - struct i915_gpu_error *error = &i915->gpu_error; - might_sleep(); - if (!__i915_wedged(error)) + if (!intel_gt_is_wedged(gt)) return 0; /* Reset still in progress? Maybe we will recover? */ - if (!test_bit(I915_RESET_BACKOFF, &error->flags)) + if (!test_bit(I915_RESET_BACKOFF, >->reset.flags)) return -EIO; /* XXX intel_reset_finish() still takes struct_mutex!!! */ - if (mutex_is_locked(&i915->drm.struct_mutex)) + if (mutex_is_locked(>->i915->drm.struct_mutex)) return -EAGAIN; - if (wait_event_interruptible(error->reset_queue, + if (wait_event_interruptible(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, - &error->flags))) + >->reset.flags))) return -EINTR; - return __i915_wedged(error) ? -EIO : 0; + return intel_gt_is_wedged(gt) ? -EIO : 0; +} + +void intel_gt_init_reset(struct intel_gt *gt) +{ + init_waitqueue_head(>->reset.queue); + mutex_init(>->reset.mutex); + init_srcu_struct(>->reset.backoff_srcu); +} + +void intel_gt_fini_reset(struct intel_gt *gt) +{ + cleanup_srcu_struct(>->reset.backoff_srcu); } -static void i915_wedge_me(struct work_struct *work) +static void intel_wedge_me(struct work_struct *work) { - struct i915_wedge_me *w = container_of(work, typeof(*w), work.work); + struct intel_wedge_me *w = container_of(work, typeof(*w), work.work); - dev_err(w->i915->drm.dev, + dev_err(w->gt->i915->drm.dev, "%s timed out, cancelling all in-flight rendering.\n", w->name); - i915_gem_set_wedged(w->i915); + intel_gt_set_wedged(w->gt); } -void __i915_init_wedge(struct i915_wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name) +void __intel_init_wedge(struct intel_wedge_me *w, + struct intel_gt *gt, + long timeout, + const char *name) { - w->i915 = i915; + w->gt = gt; w->name = name; - INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me); + INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me); schedule_delayed_work(&w->work, timeout); } -void __i915_fini_wedge(struct i915_wedge_me *w) +void __intel_fini_wedge(struct intel_wedge_me *w) { cancel_delayed_work_sync(&w->work); destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; + w->gt = NULL; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 03fba0ab3868..37a987b17108 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -11,56 +11,67 @@ #include #include -#include "gt/intel_engine_types.h" +#include "intel_engine_types.h" +#include "intel_reset_types.h" struct drm_i915_private; struct i915_request; struct intel_engine_cs; +struct intel_gt; struct intel_guc; +void intel_gt_init_reset(struct intel_gt *gt); +void intel_gt_fini_reset(struct intel_gt *gt); + __printf(4, 5) -void i915_handle_error(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask, - unsigned long flags, - const char *fmt, ...); +void intel_gt_handle_error(struct intel_gt *gt, + intel_engine_mask_t engine_mask, + unsigned long flags, + const char *fmt, ...); #define I915_ERROR_CAPTURE BIT(0) -void i915_reset(struct drm_i915_private *i915, - intel_engine_mask_t stalled_mask, - const char *reason); -int i915_reset_engine(struct intel_engine_cs *engine, - const char *reason); - -void i915_reset_request(struct i915_request *rq, bool guilty); +void intel_gt_reset(struct intel_gt *gt, + intel_engine_mask_t stalled_mask, + const char *reason); +int intel_engine_reset(struct intel_engine_cs *engine, + const char *reason); -int __must_check i915_reset_trylock(struct drm_i915_private *i915); -void i915_reset_unlock(struct drm_i915_private *i915, int tag); +void __i915_request_reset(struct i915_request *rq, bool guilty); -int i915_terminally_wedged(struct drm_i915_private *i915); +int __must_check intel_gt_reset_trylock(struct intel_gt *gt); +void intel_gt_reset_unlock(struct intel_gt *gt, int tag); -bool intel_has_gpu_reset(struct drm_i915_private *i915); -bool intel_has_reset_engine(struct drm_i915_private *i915); +void intel_gt_set_wedged(struct intel_gt *gt); +bool intel_gt_unset_wedged(struct intel_gt *gt); +int intel_gt_terminally_wedged(struct intel_gt *gt); -int intel_gpu_reset(struct drm_i915_private *i915, - intel_engine_mask_t engine_mask); +int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); -int intel_reset_guc(struct drm_i915_private *i915); +int intel_reset_guc(struct intel_gt *gt); -struct i915_wedge_me { +struct intel_wedge_me { struct delayed_work work; - struct drm_i915_private *i915; + struct intel_gt *gt; const char *name; }; -void __i915_init_wedge(struct i915_wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name); -void __i915_fini_wedge(struct i915_wedge_me *w); +void __intel_init_wedge(struct intel_wedge_me *w, + struct intel_gt *gt, + long timeout, + const char *name); +void __intel_fini_wedge(struct intel_wedge_me *w); -#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \ - (W)->i915; \ - __i915_fini_wedge((W))) +#define intel_wedge_on_timeout(W, GT, TIMEOUT) \ + for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \ + (W)->gt; \ + __intel_fini_wedge((W))) + +static inline bool __intel_reset_failed(const struct intel_reset *reset) +{ + return unlikely(test_bit(I915_WEDGED, &reset->flags)); +} + +bool intel_has_gpu_reset(struct drm_i915_private *i915); +bool intel_has_reset_engine(struct drm_i915_private *i915); #endif /* I915_RESET_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h new file mode 100644 index 000000000000..31968356e0c0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_RESET_TYPES_H_ +#define __INTEL_RESET_TYPES_H_ + +#include +#include +#include + +struct intel_reset { + /** + * flags: Control various stages of the GPU reset + * + * #I915_RESET_BACKOFF - When we start a global reset, we need to + * serialise with any other users attempting to do the same, and + * any global resources that may be clobber by the reset (such as + * FENCE registers). + * + * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to + * acquire the struct_mutex to reset an engine, we need an explicit + * flag to prevent two concurrent reset attempts in the same engine. + * As the number of engines continues to grow, allocate the flags from + * the most significant bits. + * + * #I915_WEDGED - If reset fails and we can no longer use the GPU, + * we set the #I915_WEDGED bit. Prior to command submission, e.g. + * i915_request_alloc(), this bit is checked and the sequence + * aborted (with -EIO reported to userspace) if set. + */ + unsigned long flags; +#define I915_RESET_BACKOFF 0 +#define I915_RESET_MODESET 1 +#define I915_RESET_ENGINE 2 +#define I915_WEDGED (BITS_PER_LONG - 1) + + struct mutex mutex; /* serialises wedging/unwedging */ + + /** + * Waitqueue to signal when the reset has completed. Used by clients + * that wait for dev_priv->mm.wedged to settle. + */ + wait_queue_head_t queue; + + struct srcu_struct backoff_srcu; +}; + +#endif /* _INTEL_RESET_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 2873276879cb..f1e571fa2e6d 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -795,7 +795,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled) * If the request was innocent, we try to replay the request * with the restored context. */ - i915_reset_request(rq, stalled); + __i915_request_reset(rq, stalled); GEM_BUG_ON(rq->ring != engine->buffer); head = rq->head; diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 2d9cc3cd1f27..e2fa38a1ff0f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -32,7 +32,6 @@ #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_reset.h" -#include "selftests/igt_wedge_me.h" #include "selftests/igt_atomic.h" #include "selftests/mock_drm.h" @@ -43,7 +42,6 @@ #define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */ struct hang { - struct drm_i915_private *i915; struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; @@ -52,27 +50,27 @@ struct hang { u32 *batch; }; -static int hang_init(struct hang *h, struct drm_i915_private *i915) +static int hang_init(struct hang *h, struct intel_gt *gt) { void *vaddr; int err; memset(h, 0, sizeof(*h)); - h->i915 = i915; + h->gt = gt; - h->ctx = kernel_context(i915); + h->ctx = kernel_context(gt->i915); if (IS_ERR(h->ctx)) return PTR_ERR(h->ctx); GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); - h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); + h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(h->hws)) { err = PTR_ERR(h->hws); goto err_ctx; } - h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(h->obj)) { err = PTR_ERR(h->obj); goto err_hws; @@ -87,7 +85,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) h->seqno = memset(vaddr, 0xff, PAGE_SIZE); vaddr = i915_gem_object_pin_map(h->obj, - i915_coherent_map_type(i915)); + i915_coherent_map_type(gt->i915)); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto err_unpin_hws; @@ -129,7 +127,7 @@ static int move_to_active(struct i915_vma *vma, static struct i915_request * hang_create_request(struct hang *h, struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = h->i915; + struct intel_gt *gt = h->gt; struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; struct drm_i915_gem_object *obj; struct i915_request *rq = NULL; @@ -139,11 +137,11 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) u32 *batch; int err; - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(obj)) return ERR_CAST(obj); - vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(i915)); + vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915)); if (IS_ERR(vaddr)) { i915_gem_object_put(obj); return ERR_CAST(vaddr); @@ -186,7 +184,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) goto cancel_rq; batch = h->batch; - if (INTEL_GEN(i915) >= 8) { + if (INTEL_GEN(gt->i915) >= 8) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = lower_32_bits(hws_address(hws, rq)); *batch++ = upper_32_bits(hws_address(hws, rq)); @@ -200,7 +198,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; *batch++ = lower_32_bits(vma->node.start); *batch++ = upper_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 6) { + } else if (INTEL_GEN(gt->i915) >= 6) { *batch++ = MI_STORE_DWORD_IMM_GEN4; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); @@ -213,7 +211,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = MI_ARB_CHECK; *batch++ = MI_BATCH_BUFFER_START | 1 << 8; *batch++ = lower_32_bits(vma->node.start); - } else if (INTEL_GEN(i915) >= 4) { + } else if (INTEL_GEN(gt->i915) >= 4) { *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *batch++ = 0; *batch++ = lower_32_bits(hws_address(hws, rq)); @@ -249,7 +247,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) } flags = 0; - if (INTEL_GEN(i915) <= 5) + if (INTEL_GEN(gt->i915) <= 5) flags |= I915_DISPATCH_SECURE; err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags); @@ -286,7 +284,7 @@ static void hang_fini(struct hang *h) kernel_context_close(h->ctx); - igt_flush_test(h->i915, I915_WAIT_LOCKED); + igt_flush_test(h->gt->i915, I915_WAIT_LOCKED); } static bool wait_until_running(struct hang *h, struct i915_request *rq) @@ -301,7 +299,7 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq) static int igt_hang_sanitycheck(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_request *rq; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -310,13 +308,13 @@ static int igt_hang_sanitycheck(void *arg) /* Basic check that we can execute our hanging batch */ - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - for_each_engine(engine, i915, id) { - struct igt_wedge_me w; + for_each_engine(engine, gt->i915, id) { + struct intel_wedge_me w; long timeout; if (!intel_engine_can_store_dword(engine)) @@ -338,10 +336,10 @@ static int igt_hang_sanitycheck(void *arg) i915_request_add(rq); timeout = 0; - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */) timeout = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) timeout = -EIO; i915_request_put(rq); @@ -357,7 +355,7 @@ static int igt_hang_sanitycheck(void *arg) fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); return err; } @@ -368,7 +366,8 @@ static bool wait_for_idle(struct intel_engine_cs *engine) static int igt_reset_nop(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; struct i915_gem_context *ctx; unsigned int reset_count, count; @@ -379,25 +378,25 @@ static int igt_reset_nop(void *arg) /* Check that we can reset during non-user portions of requests */ - file = mock_file(i915); + file = mock_file(gt->i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + ctx = live_context(gt->i915, file); + mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } i915_gem_context_clear_bannable(ctx); - reset_count = i915_reset_count(&i915->gpu_error); + reset_count = i915_reset_count(global); count = 0; do { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { int i; for (i = 0; i < 16; i++) { @@ -413,43 +412,43 @@ static int igt_reset_nop(void *arg) } } - igt_global_reset_lock(i915); - i915_reset(i915, ALL_ENGINES, NULL); - igt_global_reset_unlock(i915); + igt_global_reset_lock(gt); + intel_gt_reset(gt, ALL_ENGINES, NULL); + igt_global_reset_unlock(gt); - mutex_unlock(&i915->drm.struct_mutex); - if (i915_reset_failed(i915)) { + mutex_unlock(>->i915->drm.struct_mutex); + if (intel_gt_is_wedged(gt)) { err = -EIO; break; } - if (i915_reset_count(&i915->gpu_error) != - reset_count + ++count) { + if (i915_reset_count(global) != reset_count + ++count) { pr_err("Full GPU reset not recorded!\n"); err = -EINVAL; break; } - err = igt_flush_test(i915, 0); + err = igt_flush_test(gt->i915, 0); if (err) break; } while (time_before(jiffies, end_time)); pr_info("%s: %d resets\n", __func__, count); - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) + mock_file_free(gt->i915, file); + if (intel_gt_is_wedged(gt)) err = -EIO; return err; } static int igt_reset_nop_engine(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; @@ -458,33 +457,32 @@ static int igt_reset_nop_engine(void *arg) /* Check that we can engine-reset during non-user portions */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; - file = mock_file(i915); + file = mock_file(gt->i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + ctx = live_context(gt->i915, file); + mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } i915_gem_context_clear_bannable(ctx); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { unsigned int reset_count, reset_engine_count; unsigned int count; IGT_TIMEOUT(end_time); - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); + reset_count = i915_reset_count(global); + reset_engine_count = i915_reset_engine_count(global, engine); count = 0; - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { int i; @@ -495,7 +493,7 @@ static int igt_reset_nop_engine(void *arg) break; } - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); for (i = 0; i < 16; i++) { struct i915_request *rq; @@ -507,20 +505,20 @@ static int igt_reset_nop_engine(void *arg) i915_request_add(rq); } - err = i915_reset_engine(engine, NULL); - mutex_unlock(&i915->drm.struct_mutex); + err = intel_engine_reset(engine, NULL); + mutex_unlock(>->i915->drm.struct_mutex); if (err) { pr_err("i915_reset_engine failed\n"); break; } - if (i915_reset_count(&i915->gpu_error) != reset_count) { + if (i915_reset_count(global) != reset_count) { pr_err("Full GPU reset recorded! (engine reset expected)\n"); err = -EINVAL; break; } - if (i915_reset_engine_count(&i915->gpu_error, engine) != + if (i915_reset_engine_count(global, engine) != reset_engine_count + ++count) { pr_err("%s engine reset not recorded!\n", engine->name); @@ -528,30 +526,31 @@ static int igt_reset_nop_engine(void *arg) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); if (err) break; - err = igt_flush_test(i915, 0); + err = igt_flush_test(gt->i915, 0); if (err) break; } - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); out: - mock_file_free(i915, file); - if (i915_reset_failed(i915)) + mock_file_free(gt->i915, file); + if (intel_gt_is_wedged(gt)) err = -EIO; return err; } -static int __igt_reset_engine(struct drm_i915_private *i915, bool active) +static int __igt_reset_engine(struct intel_gt *gt, bool active) { + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; struct hang h; @@ -559,18 +558,18 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) /* Check that we can issue an engine reset on an idle engine (no-op) */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; if (active) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); + mutex_unlock(>->i915->drm.struct_mutex); if (err) return err; } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { unsigned int reset_count, reset_engine_count; IGT_TIMEOUT(end_time); @@ -584,30 +583,29 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - reset_count = i915_reset_count(&i915->gpu_error); - reset_engine_count = i915_reset_engine_count(&i915->gpu_error, - engine); + reset_count = i915_reset_count(global); + reset_engine_count = i915_reset_engine_count(global, engine); intel_engine_pm_get(engine); - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { if (active) { struct i915_request *rq; - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); break; } i915_request_get(rq); i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); @@ -622,19 +620,19 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) i915_request_put(rq); } - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); if (err) { pr_err("i915_reset_engine failed\n"); break; } - if (i915_reset_count(&i915->gpu_error) != reset_count) { + if (i915_reset_count(global) != reset_count) { pr_err("Full GPU reset recorded! (engine reset expected)\n"); err = -EINVAL; break; } - if (i915_reset_engine_count(&i915->gpu_error, engine) != + if (i915_reset_engine_count(global, engine) != ++reset_engine_count) { pr_err("%s engine reset not recorded!\n", engine->name); @@ -642,24 +640,24 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); intel_engine_pm_put(engine); if (err) break; - err = igt_flush_test(i915, 0); + err = igt_flush_test(gt->i915, 0); if (err) break; } - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) err = -EIO; if (active) { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); } return err; @@ -701,7 +699,7 @@ static int active_request_put(struct i915_request *rq) rq->fence.seqno); GEM_TRACE_DUMP(); - i915_gem_set_wedged(rq->i915); + intel_gt_set_wedged(rq->engine->gt); err = -EIO; } @@ -778,10 +776,11 @@ err_file: return err; } -static int __igt_reset_engines(struct drm_i915_private *i915, +static int __igt_reset_engines(struct intel_gt *gt, const char *test_name, unsigned int flags) { + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine, *other; enum intel_engine_id id, tmp; struct hang h; @@ -791,13 +790,13 @@ static int __igt_reset_engines(struct drm_i915_private *i915, * with any other engine. */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); + mutex_unlock(>->i915->drm.struct_mutex); if (err) return err; @@ -805,9 +804,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915, h.ctx->sched.priority = 1024; } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { struct active_engine threads[I915_NUM_ENGINES] = {}; - unsigned long global = i915_reset_count(&i915->gpu_error); + unsigned long device = i915_reset_count(global); unsigned long count = 0, reported; IGT_TIMEOUT(end_time); @@ -823,12 +822,11 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } memset(threads, 0, sizeof(threads)); - for_each_engine(other, i915, tmp) { + for_each_engine(other, gt->i915, tmp) { struct task_struct *tsk; threads[tmp].resets = - i915_reset_engine_count(&i915->gpu_error, - other); + i915_reset_engine_count(global, other); if (!(flags & TEST_OTHERS)) continue; @@ -851,25 +849,25 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } intel_engine_pm_get(engine); - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { struct i915_request *rq = NULL; if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); break; } i915_request_get(rq); i915_request_add(rq); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); @@ -882,7 +880,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } } - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); if (err) { pr_err("i915_reset_engine(%s:%s): failed, err=%d\n", engine->name, test_name, err); @@ -894,7 +892,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, if (rq) { if (i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = - drm_info_printer(i915->drm.dev); + drm_info_printer(gt->i915->drm.dev); pr_err("i915_reset_engine(%s:%s):" " failed to complete request after reset\n", @@ -904,7 +902,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, i915_request_put(rq); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; break; } @@ -914,7 +912,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, if (!(flags & TEST_SELF) && !wait_for_idle(engine)) { struct drm_printer p = - drm_info_printer(i915->drm.dev); + drm_info_printer(gt->i915->drm.dev); pr_err("i915_reset_engine(%s:%s):" " failed to idle after reset\n", @@ -926,12 +924,12 @@ static int __igt_reset_engines(struct drm_i915_private *i915, break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); intel_engine_pm_put(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", engine->name, test_name, count); - reported = i915_reset_engine_count(&i915->gpu_error, engine); + reported = i915_reset_engine_count(global, engine); reported -= threads[engine->id].resets; if (reported != count) { pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n", @@ -941,7 +939,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915, } unwind: - for_each_engine(other, i915, tmp) { + for_each_engine(other, gt->i915, tmp) { int ret; if (!threads[tmp].task) @@ -956,22 +954,21 @@ unwind: } put_task_struct(threads[tmp].task); - if (other != engine && + if (other->uabi_class != engine->uabi_class && threads[tmp].resets != - i915_reset_engine_count(&i915->gpu_error, other)) { + i915_reset_engine_count(global, other)) { pr_err("Innocent engine %s was reset (count=%ld)\n", other->name, - i915_reset_engine_count(&i915->gpu_error, - other) - + i915_reset_engine_count(global, other) - threads[tmp].resets); if (!err) err = -EINVAL; } } - if (global != i915_reset_count(&i915->gpu_error)) { + if (device != i915_reset_count(global)) { pr_err("Global reset (count=%ld)!\n", - i915_reset_count(&i915->gpu_error) - global); + i915_reset_count(global) - device); if (!err) err = -EINVAL; } @@ -979,20 +976,20 @@ unwind: if (err) break; - mutex_lock(&i915->drm.struct_mutex); - err = igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); if (err) break; } - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) err = -EIO; if (flags & TEST_ACTIVE) { - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); hang_fini(&h); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); } return err; @@ -1018,13 +1015,13 @@ static int igt_reset_engines(void *arg) }, { } }; - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; typeof(*phases) *p; int err; for (p = phases; p->name; p++) { if (p->flags & TEST_PRIORITY) { - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) continue; } @@ -1036,38 +1033,39 @@ static int igt_reset_engines(void *arg) return 0; } -static u32 fake_hangcheck(struct drm_i915_private *i915, - intel_engine_mask_t mask) +static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask) { - u32 count = i915_reset_count(&i915->gpu_error); + u32 count = i915_reset_count(>->i915->gpu_error); - i915_reset(i915, mask, NULL); + intel_gt_reset(gt, mask, NULL); return count; } static int igt_reset_wait(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine = gt->i915->engine[RCS0]; struct i915_request *rq; unsigned int reset_count; struct hang h; long timeout; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS0])) + if (!engine || !intel_engine_can_store_dword(engine)) return 0; /* Check that we detect a stuck waiter and issue a reset */ - igt_global_reset_lock(i915); + igt_global_reset_lock(gt); - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - rq = hang_create_request(&h, i915->engine[RCS0]); + rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto fini; @@ -1077,19 +1075,19 @@ static int igt_reset_wait(void *arg) i915_request_add(rq); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; goto out_rq; } - reset_count = fake_hangcheck(i915, ALL_ENGINES); + reset_count = fake_hangcheck(gt, ALL_ENGINES); timeout = i915_request_wait(rq, 0, 10); if (timeout < 0) { @@ -1099,7 +1097,7 @@ static int igt_reset_wait(void *arg) goto out_rq; } - if (i915_reset_count(&i915->gpu_error) == reset_count) { + if (i915_reset_count(global) == reset_count) { pr_err("No GPU reset recorded!\n"); err = -EINVAL; goto out_rq; @@ -1110,10 +1108,10 @@ out_rq: fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); + mutex_unlock(>->i915->drm.struct_mutex); + igt_global_reset_unlock(gt); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; return err; @@ -1172,11 +1170,12 @@ out_unlock: return err; } -static int __igt_reset_evict_vma(struct drm_i915_private *i915, +static int __igt_reset_evict_vma(struct intel_gt *gt, struct i915_address_space *vm, int (*fn)(void *), unsigned int flags) { + struct intel_engine_cs *engine = gt->i915->engine[RCS0]; struct drm_i915_gem_object *obj; struct task_struct *tsk = NULL; struct i915_request *rq; @@ -1184,17 +1183,17 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, struct hang h; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS0])) + if (!engine || !intel_engine_can_store_dword(engine)) return 0; /* Check that we can recover an unbind stuck on a hanging request */ - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - obj = i915_gem_object_create_internal(i915, SZ_1M); + obj = i915_gem_object_create_internal(gt->i915, SZ_1M); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto fini; @@ -1214,7 +1213,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, goto out_obj; } - rq = hang_create_request(&h, i915->engine[RCS0]); + rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_obj; @@ -1252,16 +1251,16 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, if (err) goto out_rq; - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); goto out_reset; } @@ -1278,31 +1277,31 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, wait_for_completion(&arg.completion); if (wait_for(!list_empty(&rq->fence.cb_list), 10)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("igt/evict_vma kthread did not wait\n"); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); goto out_reset; } out_reset: - igt_global_reset_lock(i915); - fake_hangcheck(rq->i915, rq->engine->mask); - igt_global_reset_unlock(i915); + igt_global_reset_lock(gt); + fake_hangcheck(gt, rq->engine->mask); + igt_global_reset_unlock(gt); if (tsk) { - struct igt_wedge_me w; + struct intel_wedge_me w; /* The reset, even indirectly, should take less than 10ms. */ - igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) + intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */) err = kthread_stop(tsk); put_task_struct(tsk); } - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); out_rq: i915_request_put(rq); out_obj: @@ -1310,9 +1309,9 @@ out_obj: fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; return err; @@ -1320,26 +1319,26 @@ unlock: static int igt_reset_evict_ggtt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + return __igt_reset_evict_vma(gt, >->ggtt->vm, evict_vma, EXEC_OBJECT_WRITE); } static int igt_reset_evict_ppgtt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_context *ctx; struct drm_file *file; int err; - file = mock_file(i915); + file = mock_file(gt->i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - ctx = live_context(i915, file); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + ctx = live_context(gt->i915, file); + mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; @@ -1347,29 +1346,29 @@ static int igt_reset_evict_ppgtt(void *arg) err = 0; if (ctx->vm) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(i915, ctx->vm, + err = __igt_reset_evict_vma(gt, ctx->vm, evict_vma, EXEC_OBJECT_WRITE); out: - mock_file_free(i915, file); + mock_file_free(gt->i915, file); return err; } static int igt_reset_evict_fence(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; - return __igt_reset_evict_vma(i915, &i915->ggtt.vm, + return __igt_reset_evict_vma(gt, >->ggtt->vm, evict_fence, EXEC_OBJECT_NEEDS_FENCE); } -static int wait_for_others(struct drm_i915_private *i915, +static int wait_for_others(struct intel_gt *gt, struct intel_engine_cs *exclude) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { if (engine == exclude) continue; @@ -1382,7 +1381,8 @@ static int wait_for_others(struct drm_i915_private *i915, static int igt_reset_queue(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; struct hang h; @@ -1390,14 +1390,14 @@ static int igt_reset_queue(void *arg) /* Check that we replay pending requests following a hang */ - igt_global_reset_lock(i915); + igt_global_reset_lock(gt); - mutex_lock(&i915->drm.struct_mutex); - err = hang_init(&h, i915); + mutex_lock(>->i915->drm.struct_mutex); + err = hang_init(&h, gt); if (err) goto unlock; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { struct i915_request *prev; IGT_TIMEOUT(end_time); unsigned int count; @@ -1438,7 +1438,7 @@ static int igt_reset_queue(void *arg) * (hangcheck), or we focus on resetting just one * engine and so avoid repeatedly resetting innocents. */ - err = wait_for_others(i915, engine); + err = wait_for_others(gt, engine); if (err) { pr_err("%s(%s): Failed to idle other inactive engines after device reset\n", __func__, engine->name); @@ -1446,12 +1446,12 @@ static int igt_reset_queue(void *arg) i915_request_put(prev); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); goto fini; } if (!wait_until_running(&h, prev)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, @@ -1462,13 +1462,13 @@ static int igt_reset_queue(void *arg) i915_request_put(rq); i915_request_put(prev); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; goto fini; } - reset_count = fake_hangcheck(i915, BIT(id)); + reset_count = fake_hangcheck(gt, BIT(id)); if (prev->fence.error != -EIO) { pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", @@ -1488,7 +1488,7 @@ static int igt_reset_queue(void *arg) goto fini; } - if (i915_reset_count(&i915->gpu_error) == reset_count) { + if (i915_reset_count(global) == reset_count) { pr_err("No GPU reset recorded!\n"); i915_request_put(rq); i915_request_put(prev); @@ -1507,7 +1507,7 @@ static int igt_reset_queue(void *arg) i915_request_put(prev); - err = igt_flush_test(i915, I915_WAIT_LOCKED); + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); if (err) break; } @@ -1515,10 +1515,10 @@ static int igt_reset_queue(void *arg) fini: hang_fini(&h); unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); + mutex_unlock(>->i915->drm.struct_mutex); + igt_global_reset_unlock(gt); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; return err; @@ -1526,8 +1526,9 @@ unlock: static int igt_handle_error(void *arg) { - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS0]; + struct intel_gt *gt = arg; + struct i915_gpu_error *global = >->i915->gpu_error; + struct intel_engine_cs *engine = gt->i915->engine[RCS0]; struct hang h; struct i915_request *rq; struct i915_gpu_state *error; @@ -1535,15 +1536,15 @@ static int igt_handle_error(void *arg) /* Check that we can issue a global GPU and engine reset */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; if (!engine || !intel_engine_can_store_dword(engine)) return 0; - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); - err = hang_init(&h, i915); + err = hang_init(&h, gt); if (err) goto err_unlock; @@ -1557,28 +1558,28 @@ static int igt_handle_error(void *arg) i915_request_add(rq); if (!wait_until_running(&h, rq)) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to start request %llx, at %x\n", __func__, rq->fence.seqno, hws_seqno(&h, rq)); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); err = -EIO; goto err_request; } - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); /* Temporarily disable error capture */ - error = xchg(&i915->gpu_error.first_error, (void *)-1); + error = xchg(&global->first_error, (void *)-1); - i915_handle_error(i915, engine->mask, 0, NULL); + intel_gt_handle_error(gt, engine->mask, 0, NULL); - xchg(&i915->gpu_error.first_error, error); + xchg(&global->first_error, error); - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); if (rq->fence.error != -EIO) { pr_err("Guilty request not identified!\n"); @@ -1591,7 +1592,7 @@ err_request: err_fini: hang_fini(&h); err_unlock: - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(>->i915->drm.struct_mutex); return err; } @@ -1608,7 +1609,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, tasklet_disable_nosync(t); p->critical_section_begin(); - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); p->critical_section_end(); tasklet_enable(t); @@ -1623,7 +1624,6 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, static int igt_atomic_reset_engine(struct intel_engine_cs *engine, const struct igt_atomic_section *p) { - struct drm_i915_private *i915 = engine->i915; struct i915_request *rq; struct hang h; int err; @@ -1632,7 +1632,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, if (err) return err; - err = hang_init(&h, i915); + err = hang_init(&h, engine->gt); if (err) return err; @@ -1651,16 +1651,16 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, pr_err("%s(%s): Failed to start request %llx, at %x\n", __func__, engine->name, rq->fence.seqno, hws_seqno(&h, rq)); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(engine->gt); err = -EIO; } if (err == 0) { - struct igt_wedge_me w; + struct intel_wedge_me w; - igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/) + intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */) i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(engine->gt)) err = -EIO; } @@ -1672,30 +1672,30 @@ out: static int igt_reset_engines_atomic(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; const typeof(*igt_atomic_phases) *p; int err = 0; /* Check that the engines resets are usable from atomic context */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; - if (USES_GUC_SUBMISSION(i915)) + if (USES_GUC_SUBMISSION(gt->i915)) return 0; - igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); + igt_global_reset_lock(gt); + mutex_lock(>->i915->drm.struct_mutex); /* Flush any requests before we get started and check basics */ - if (!igt_force_reset(i915)) + if (!igt_force_reset(gt)) goto unlock; for (p = igt_atomic_phases; p->name; p++) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { err = igt_atomic_reset_engine(engine, p); if (err) goto out; @@ -1704,11 +1704,11 @@ static int igt_reset_engines_atomic(void *arg) out: /* As we poke around the guts, do a full reset before continuing. */ - igt_force_reset(i915); + igt_force_reset(gt); unlock: - mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); + mutex_unlock(>->i915->drm.struct_mutex); + igt_global_reset_unlock(gt); return err; } @@ -1730,28 +1730,29 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_evict_fence), SUBTEST(igt_handle_error), }; + struct intel_gt *gt = &i915->gt; intel_wakeref_t wakeref; bool saved_hangcheck; int err; - if (!intel_has_gpu_reset(i915)) + if (!intel_has_gpu_reset(gt->i915)) return 0; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; /* we're long past hope of a successful reset */ - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); - drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ + drain_delayed_work(>->hangcheck.work); /* flush param */ - err = i915_live_subtests(tests, i915); + err = intel_gt_live_subtests(tests, gt); - mutex_lock(&i915->drm.struct_mutex); - igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); + mutex_lock(>->i915->drm.struct_mutex); + igt_flush_test(gt->i915, I915_WAIT_LOCKED); + mutex_unlock(>->i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index b9b881ab8e7c..678e9b2edf8d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -55,7 +55,7 @@ static int live_sanitycheck(void *arg) if (!igt_wait_for_spinner(&spin, rq)) { GEM_TRACE("spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx; } @@ -211,7 +211,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer, pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", count, n); GEM_TRACE_DUMP(); - i915_gem_set_wedged(outer->i915); + intel_gt_set_wedged(outer->gt); err = -EIO; } @@ -445,7 +445,7 @@ static int live_busywait_preempt(void *arg) intel_engine_dump(engine, &p, "%s\n", engine->name); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_vma; } @@ -534,7 +534,7 @@ static int live_preempt(void *arg) if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -551,7 +551,7 @@ static int live_preempt(void *arg) if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -688,7 +688,7 @@ err_unlock: err_wedged: igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -824,7 +824,7 @@ err_unlock: err_wedged: igt_spinner_end(&b.spin); igt_spinner_end(&a.spin); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_client_b; } @@ -934,7 +934,7 @@ err_unlock: err_wedged: igt_spinner_end(&b.spin); igt_spinner_end(&a.spin); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_client_b; } @@ -1105,7 +1105,7 @@ err_unlock: err_wedged: for (i = 0; i < ARRAY_SIZE(client); i++) igt_spinner_end(&client[i].spin); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_client_3; } @@ -1251,7 +1251,7 @@ err_unlock: err_wedged: igt_spinner_end(&hi.spin); igt_spinner_end(&lo.spin); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_client_lo; } @@ -1310,7 +1310,7 @@ static int live_preempt_hang(void *arg) if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -1332,21 +1332,21 @@ static int live_preempt_hang(void *arg) HZ / 10)) { pr_err("Preemption did not occur within timeout!"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } - set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); - i915_reset_engine(engine, NULL); - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags); + intel_engine_reset(engine, NULL); + clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags); engine->execlists.preempt_hang.inject_hang = false; if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto err_ctx_lo; } @@ -1726,7 +1726,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915, request[nc]->fence.context, request[nc]->fence.seqno); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); break; } } @@ -1873,7 +1873,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915, request[n]->fence.context, request[n]->fence.seqno); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); err = -EIO; goto out; } @@ -2150,7 +2150,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) if (!HAS_EXECLISTS(i915)) return 0; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 672e32e1ef95..00a4f60cdfd5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -9,26 +9,29 @@ static int igt_global_reset(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; unsigned int reset_count; + intel_wakeref_t wakeref; int err = 0; /* Check that we can issue a global GPU reset */ - igt_global_reset_lock(i915); + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); - reset_count = i915_reset_count(&i915->gpu_error); + reset_count = i915_reset_count(>->i915->gpu_error); - i915_reset(i915, ALL_ENGINES, NULL); + intel_gt_reset(gt, ALL_ENGINES, NULL); - if (i915_reset_count(&i915->gpu_error) == reset_count) { + if (i915_reset_count(>->i915->gpu_error) == reset_count) { pr_err("No GPU reset recorded!\n"); err = -EINVAL; } - igt_global_reset_unlock(i915); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + igt_global_reset_unlock(gt); - if (i915_reset_failed(i915)) + if (intel_gt_is_wedged(gt)) err = -EIO; return err; @@ -36,72 +39,72 @@ static int igt_global_reset(void *arg) static int igt_wedged_reset(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; intel_wakeref_t wakeref; /* Check that we can recover a wedged device with a GPU reset */ - igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(>->i915->runtime_pm); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); - GEM_BUG_ON(!i915_reset_failed(i915)); - i915_reset(i915, ALL_ENGINES, NULL); + GEM_BUG_ON(!intel_gt_is_wedged(gt)); + intel_gt_reset(gt, ALL_ENGINES, NULL); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(i915); + intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + igt_global_reset_unlock(gt); - return i915_reset_failed(i915) ? -EIO : 0; + return intel_gt_is_wedged(gt) ? -EIO : 0; } static int igt_atomic_reset(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; const typeof(*igt_atomic_phases) *p; int err = 0; /* Check that the resets are usable from atomic context */ - intel_gt_pm_get(&i915->gt); - igt_global_reset_lock(i915); + intel_gt_pm_get(gt); + igt_global_reset_lock(gt); /* Flush any requests before we get started and check basics */ - if (!igt_force_reset(i915)) + if (!igt_force_reset(gt)) goto unlock; for (p = igt_atomic_phases; p->name; p++) { intel_engine_mask_t awake; - GEM_TRACE("intel_gpu_reset under %s\n", p->name); + GEM_TRACE("__intel_gt_reset under %s\n", p->name); - awake = reset_prepare(i915); + awake = reset_prepare(gt); p->critical_section_begin(); - err = intel_gpu_reset(i915, ALL_ENGINES); + err = __intel_gt_reset(gt, ALL_ENGINES); p->critical_section_end(); - reset_finish(i915, awake); + reset_finish(gt, awake); if (err) { - pr_err("intel_gpu_reset failed under %s\n", p->name); + pr_err("__intel_gt_reset failed under %s\n", p->name); break; } } /* As we poke around the guts, do a full reset before continuing. */ - igt_force_reset(i915); + igt_force_reset(gt); unlock: - igt_global_reset_unlock(i915); - intel_gt_pm_put(&i915->gt); + igt_global_reset_unlock(gt); + intel_gt_pm_put(gt); return err; } static int igt_atomic_engine_reset(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; const typeof(*igt_atomic_phases) *p; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -109,33 +112,33 @@ static int igt_atomic_engine_reset(void *arg) /* Check that the resets are usable from atomic context */ - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt->i915)) return 0; - if (USES_GUC_SUBMISSION(i915)) + if (USES_GUC_SUBMISSION(gt->i915)) return 0; - intel_gt_pm_get(&i915->gt); - igt_global_reset_lock(i915); + intel_gt_pm_get(gt); + igt_global_reset_lock(gt); /* Flush any requests before we get started and check basics */ - if (!igt_force_reset(i915)) + if (!igt_force_reset(gt)) goto out_unlock; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { tasklet_disable_nosync(&engine->execlists.tasklet); intel_engine_pm_get(engine); for (p = igt_atomic_phases; p->name; p++) { - GEM_TRACE("i915_reset_engine(%s) under %s\n", + GEM_TRACE("intel_engine_reset(%s) under %s\n", engine->name, p->name); p->critical_section_begin(); - err = i915_reset_engine(engine, NULL); + err = intel_engine_reset(engine, NULL); p->critical_section_end(); if (err) { - pr_err("i915_reset_engine(%s) failed under %s\n", + pr_err("intel_engine_reset(%s) failed under %s\n", engine->name, p->name); break; } @@ -148,11 +151,11 @@ static int igt_atomic_engine_reset(void *arg) } /* As we poke around the guts, do a full reset before continuing. */ - igt_force_reset(i915); + igt_force_reset(gt); out_unlock: - igt_global_reset_unlock(i915); - intel_gt_pm_put(&i915->gt); + igt_global_reset_unlock(gt); + intel_gt_pm_put(gt); return err; } @@ -165,17 +168,13 @@ int intel_reset_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_atomic_reset), SUBTEST(igt_atomic_engine_reset), }; - intel_wakeref_t wakeref; - int err = 0; + struct intel_gt *gt = &i915->gt; - if (!intel_has_gpu_reset(i915)) + if (!intel_has_gpu_reset(gt->i915)) return 0; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(gt)) return -EIO; /* we're long past hope of a successful reset */ - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = i915_subtests(tests, i915); - - return err; + return intel_gt_live_subtests(tests, gt); } diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index eae3b1963bf7..f0a840030382 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -7,6 +7,7 @@ #include #include "gem/i915_gem_pm.h" +#include "intel_gt.h" #include "../selftests/i915_random.h" #include "../i915_selftest.h" @@ -836,7 +837,7 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915) SUBTEST(live_hwsp_wrap), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index a0d8f1bfe0ad..ab147985fa74 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -12,7 +12,6 @@ #include "selftests/igt_flush_test.h" #include "selftests/igt_reset.h" #include "selftests/igt_spinner.h" -#include "selftests/igt_wedge_me.h" #include "selftests/mock_drm.h" #include "gem/selftests/igt_gem_utils.h" @@ -178,7 +177,7 @@ static int check_whitelist(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { struct drm_i915_gem_object *results; - struct igt_wedge_me wedge; + struct intel_wedge_me wedge; u32 *vaddr; int err; int i; @@ -189,10 +188,10 @@ static int check_whitelist(struct i915_gem_context *ctx, err = 0; i915_gem_object_lock(results); - igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ + intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */ err = i915_gem_object_set_to_cpu_domain(results, false); i915_gem_object_unlock(results); - if (i915_terminally_wedged(ctx->i915)) + if (intel_gt_is_wedged(&ctx->i915->gt)) err = -EIO; if (err) goto out_put; @@ -225,13 +224,13 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - i915_reset(engine->i915, engine->mask, "live_workarounds"); + intel_gt_reset(engine->gt, engine->mask, "live_workarounds"); return 0; } static int do_engine_reset(struct intel_engine_cs *engine) { - return i915_reset_engine(engine, "live_workarounds"); + return intel_engine_reset(engine, "live_workarounds"); } static int @@ -572,7 +571,7 @@ err_request: if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("%s: Futzing %x timedout; cancelling test\n", engine->name, reg); - i915_gem_set_wedged(ctx->i915); + intel_gt_set_wedged(&ctx->i915->gt); err = -EIO; goto out_batch; } @@ -730,7 +729,7 @@ static int live_reset_whitelist(void *arg) if (!engine || engine->whitelist.count == 0) return 0; - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); if (intel_has_reset_engine(i915)) { err = check_whitelist_across_reset(engine, @@ -749,7 +748,7 @@ static int live_reset_whitelist(void *arg) } out: - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); return err; } @@ -1118,7 +1117,7 @@ live_gpu_reset_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1127,7 +1126,7 @@ live_gpu_reset_workarounds(void *arg) if (!ok) goto out; - i915_reset(i915, ALL_ENGINES, "live_workarounds"); + intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds"); ok = verify_wa_lists(ctx, &lists, "after reset"); @@ -1135,7 +1134,7 @@ out: kernel_context_close(ctx); reference_lists_fini(i915, &lists); intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); return ok ? 0 : -ESRCH; } @@ -1160,7 +1159,7 @@ live_engine_reset_workarounds(void *arg) if (IS_ERR(ctx)) return PTR_ERR(ctx); - igt_global_reset_lock(i915); + igt_global_reset_lock(&i915->gt); wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1176,7 +1175,7 @@ live_engine_reset_workarounds(void *arg) goto err; } - i915_reset_engine(engine, "live_workarounds"); + intel_engine_reset(engine, "live_workarounds"); ok = verify_wa_lists(ctx, &lists, "after idle reset"); if (!ok) { @@ -1204,7 +1203,7 @@ live_engine_reset_workarounds(void *arg) goto err; } - i915_reset_engine(engine, "live_workarounds"); + intel_engine_reset(engine, "live_workarounds"); igt_spinner_end(&spin); igt_spinner_fini(&spin); @@ -1219,7 +1218,7 @@ live_engine_reset_workarounds(void *arg) err: reference_lists_fini(i915, &lists); intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(i915); + igt_global_reset_unlock(&i915->gt); kernel_context_close(ctx); igt_flush_test(i915, I915_WAIT_LOCKED); @@ -1238,7 +1237,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) }; int err; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 41245acb0a0f..a1843e3de6d7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1074,15 +1074,16 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, static int i915_hangcheck_info(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_gt *gt = &i915->gt; struct intel_engine_cs *engine; intel_wakeref_t wakeref; enum intel_engine_id id; - seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags); - if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) + seq_printf(m, "Reset flags: %lx\n", gt->reset.flags); + if (test_bit(I915_WEDGED, >->reset.flags)) seq_puts(m, "\tWedged\n"); - if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) + if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) seq_puts(m, "\tDevice (global) reset in progress\n"); if (!i915_modparams.enable_hangcheck) { @@ -1090,19 +1091,19 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } - if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) + if (timer_pending(>->hangcheck.work.timer)) seq_printf(m, "Hangcheck active, timer fires in %dms\n", - jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - + jiffies_to_msecs(gt->hangcheck.work.timer.expires - jiffies)); - else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) + else if (delayed_work_pending(>->hangcheck.work)) seq_puts(m, "Hangcheck active, work pending\n"); else seq_puts(m, "Hangcheck inactive\n"); - seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); + seq_printf(m, "GT active? %s\n", yesno(gt->awake)); - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - for_each_engine(engine, dev_priv, id) { + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + for_each_engine(engine, i915, id) { struct intel_instdone instdone; seq_printf(m, "%s: %d ms ago\n", @@ -1117,10 +1118,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_engine_get_instdone(engine, &instdone); seq_puts(m, "\tinstdone read =\n"); - i915_instdone_info(dev_priv, m, &instdone); + i915_instdone_info(i915, m, &instdone); seq_puts(m, "\tinstdone accu =\n"); - i915_instdone_info(dev_priv, m, + i915_instdone_info(i915, m, &engine->hangcheck.instdone); } } @@ -1128,23 +1129,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) return 0; } -static int i915_reset_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_gpu_error *error = &dev_priv->gpu_error; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); - - for_each_engine(engine, dev_priv, id) { - seq_printf(m, "%s = %u\n", engine->name, - i915_reset_engine_count(error, engine)); - } - - return 0; -} - static int ironlake_drpc_info(struct seq_file *m) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -3616,7 +3600,8 @@ static const struct file_operations i915_cur_wm_latency_fops = { static int i915_wedged_get(void *data, u64 *val) { - int ret = i915_terminally_wedged(data); + struct drm_i915_private *i915 = data; + int ret = intel_gt_terminally_wedged(&i915->gt); switch (ret) { case -EIO: @@ -3636,11 +3621,11 @@ i915_wedged_set(void *data, u64 val) struct drm_i915_private *i915 = data; /* Flush any previous reset before applying for a new one */ - wait_event(i915->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)); + wait_event(i915->gt.reset.queue, + !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags)); - i915_handle_error(i915, val, I915_ERROR_CAPTURE, - "Manually set wedged engine mask = %llx", val); + intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE, + "Manually set wedged engine mask = %llx", val); return 0; } @@ -3683,8 +3668,9 @@ i915_drop_caches_set(void *data, u64 val) val, val & DROP_ALL); if (val & DROP_RESET_ACTIVE && - wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) - i915_gem_set_wedged(i915); + wait_for(intel_engines_are_idle(&i915->gt), + I915_IDLE_ENGINES_TIMEOUT)) + intel_gt_set_wedged(&i915->gt); /* No need to check and wait for gpu resets, only libdrm auto-restarts * on ioctls on -EAGAIN. */ @@ -3719,8 +3705,8 @@ i915_drop_caches_set(void *data, u64 val) mutex_unlock(&i915->drm.struct_mutex); } - if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915)) - i915_handle_error(i915, ALL_ENGINES, 0, NULL); + if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt)) + intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL); fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) @@ -4375,7 +4361,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, {"i915_hangcheck_info", i915_hangcheck_info, 0}, - {"i915_reset_info", i915_reset_info, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_emon_status", i915_emon_status, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e2d1bed9454c..6953732719e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -947,7 +947,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) if (ret < 0) goto err_uc; intel_irq_init(dev_priv); - intel_hangcheck_init(dev_priv); intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); intel_init_audio_hooks(dev_priv); @@ -1967,7 +1966,7 @@ void i915_driver_remove(struct drm_device *dev) * all in-flight requests so that we can quickly unbind the active * resources. */ - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(&dev_priv->gt); /* Flush any external code that still may be under the RCU lock */ synchronize_rcu(); @@ -1988,7 +1987,7 @@ void i915_driver_remove(struct drm_device *dev) intel_csr_ucode_fini(dev_priv); /* Free error state after interrupts are fully disabled. */ - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&dev_priv->gt.hangcheck.work); i915_reset_error_state(dev_priv); i915_gem_driver_remove(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7e1ee30d74a9..1a0b114da77e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2400,28 +2400,10 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); void i915_driver_remove(struct drm_device *dev); void intel_engine_init_hangcheck(struct intel_engine_cs *engine); -void intel_hangcheck_init(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv); -static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) -{ - unsigned long delay; - - if (unlikely(!i915_modparams.enable_hangcheck)) - return; - - /* Don't continually defer the hangcheck so that it is always run at - * least once after work has been scheduled on any ring. Otherwise, - * we will ignore a hung ring if a second ring is kept busy. - */ - - delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); - queue_delayed_work(system_long_wq, - &dev_priv->gpu_error.hangcheck_work, delay); -} - static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; @@ -2510,30 +2492,17 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); -static inline bool __i915_wedged(struct i915_gpu_error *error) -{ - return unlikely(test_bit(I915_WEDGED, &error->flags)); -} - -static inline bool i915_reset_failed(struct drm_i915_private *i915) -{ - return __i915_wedged(&i915->gpu_error); -} - static inline u32 i915_reset_count(struct i915_gpu_error *error) { - return READ_ONCE(error->reset_count); + return atomic_read(&error->reset_count); } static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, struct intel_engine_cs *engine) { - return READ_ONCE(error->reset_engine_count[engine->id]); + return atomic_read(&error->reset_engine_count[engine->uabi_class]); } -void i915_gem_set_wedged(struct drm_i915_private *dev_priv); -bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); - void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 37fe2ed2f582..e24955b5ebc2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -894,13 +894,13 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) } } -static int wait_for_engines(struct drm_i915_private *i915) +static int wait_for_engines(struct intel_gt *gt) { - if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) { - dev_err(i915->drm.dev, + if (wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) { + dev_err(gt->i915->drm.dev, "Failed to idle engines, declaring wedged!\n"); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(gt); return -EIO; } @@ -971,7 +971,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, lockdep_assert_held(&i915->drm.struct_mutex); - err = wait_for_engines(i915); + err = wait_for_engines(&i915->gt); if (err) return err; @@ -1149,8 +1149,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * back to defaults, recovering from whatever wedged state we left it * in and so worth trying to use the device once more. */ - if (i915_terminally_wedged(i915)) - i915_gem_unset_wedged(i915); + if (intel_gt_is_wedged(&i915->gt)) + intel_gt_unset_wedged(&i915->gt); /* * If we inherit context state from the BIOS or earlier occupants @@ -1202,7 +1202,7 @@ int i915_gem_init_hw(struct drm_i915_private *i915) int ret; BUG_ON(!i915->kernel_context); - ret = i915_terminally_wedged(i915); + ret = intel_gt_terminally_wedged(gt); if (ret) return ret; @@ -1384,7 +1384,7 @@ err_active: * and ready to be torn-down. The quickest way we can accomplish * this is by declaring ourselves wedged. */ - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); goto out_ctx; } @@ -1539,7 +1539,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) err_gt: mutex_unlock(&dev_priv->drm.struct_mutex); - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(&dev_priv->gt); i915_gem_suspend(dev_priv); i915_gem_suspend_late(dev_priv); @@ -1581,10 +1581,10 @@ err_uc_misc: * wedged. But we only want to do this where the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!i915_reset_failed(dev_priv)) { + if (!intel_gt_is_wedged(&dev_priv->gt)) { i915_probe_error(dev_priv, "Failed to initialize GPU, declaring it wedged!\n"); - i915_gem_set_wedged(dev_priv); + intel_gt_set_wedged(&dev_priv->gt); } /* Minimal basic recovery for KMS */ @@ -1666,11 +1666,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) i915_gem_init__mm(dev_priv); i915_gem_init__pm(dev_priv); - init_waitqueue_head(&dev_priv->gpu_error.wait_queue); - init_waitqueue_head(&dev_priv->gpu_error.reset_queue); - mutex_init(&dev_priv->gpu_error.wedge_mutex); - init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); - atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); spin_lock_init(&dev_priv->fb_tracking.lock); @@ -1689,7 +1684,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); WARN_ON(dev_priv->mm.shrink_count); - cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); + intel_gt_cleanup_early(&dev_priv->gt); i915_gemfs_fini(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 2ecd0c6a1c94..7dfbfda48733 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -7,6 +7,7 @@ #ifndef _I915_GPU_ERROR_H_ #define _I915_GPU_ERROR_H_ +#include #include #include #include @@ -180,12 +181,6 @@ struct i915_gpu_state { }; struct i915_gpu_error { - /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - - struct delayed_work hangcheck_work; - /* For reset and error_state handling. */ spinlock_t lock; /* Protected by the above dev->gpu_error.lock. */ @@ -193,52 +188,11 @@ struct i915_gpu_error { atomic_t pending_fb_pin; - /** - * flags: Control various stages of the GPU reset - * - * #I915_RESET_BACKOFF - When we start a global reset, we need to - * serialise with any other users attempting to do the same, and - * any global resources that may be clobber by the reset (such as - * FENCE registers). - * - * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to - * acquire the struct_mutex to reset an engine, we need an explicit - * flag to prevent two concurrent reset attempts in the same engine. - * As the number of engines continues to grow, allocate the flags from - * the most significant bits. - * - * #I915_WEDGED - If reset fails and we can no longer use the GPU, - * we set the #I915_WEDGED bit. Prior to command submission, e.g. - * i915_request_alloc(), this bit is checked and the sequence - * aborted (with -EIO reported to userspace) if set. - */ - unsigned long flags; -#define I915_RESET_BACKOFF 0 -#define I915_RESET_MODESET 1 -#define I915_RESET_ENGINE 2 -#define I915_WEDGED (BITS_PER_LONG - 1) - /** Number of times the device has been reset (global) */ - u32 reset_count; + atomic_t reset_count; /** Number of times an engine has been reset */ - u32 reset_engine_count[I915_NUM_ENGINES]; - - struct mutex wedge_mutex; /* serialises wedging/unwedging */ - - /** - * Waitqueue to signal when a hang is detected. Used to for waiters - * to release the struct_mutex for the reset to procede. - */ - wait_queue_head_t wait_queue; - - /** - * Waitqueue to signal when the reset has completed. Used by clients - * that wait for dev_priv->mm.wedged to settle. - */ - wait_queue_head_t reset_queue; - - struct srcu_struct reset_backoff_srcu; + atomic_t reset_engine_count[I915_NUM_ENGINES]; }; struct drm_i915_error_state_buf { diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 222c9c56e9de..8ac7d14ec8c9 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1401,8 +1401,7 @@ long i915_request_wait(struct i915_request *rq, * serialise wait/reset with an explicit lock, we do want * lockdep to detect potential dependency cycles. */ - mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map, - 0, 0, _THIS_IP_); + mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_); /* * Optimistic spin before touching IRQs. @@ -1480,7 +1479,7 @@ long i915_request_wait(struct i915_request *rq, dma_fence_remove_callback(&rq->fence, &wait.cb); out: - mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_); + mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_); trace_i915_request_wait_end(rq); return timeout; } diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index d9b17b9e6993..acdf6eb9e262 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -72,6 +72,9 @@ int __i915_nop_teardown(int err, void *data); int __i915_live_setup(void *data); int __i915_live_teardown(int err, void *data); +int __intel_gt_live_setup(void *data); +int __intel_gt_live_teardown(int err, void *data); + int __i915_subtests(const char *caller, int (*setup)(void *data), int (*teardown)(int err, void *data), @@ -88,6 +91,12 @@ int __i915_subtests(const char *caller, __i915_live_setup, __i915_live_teardown, \ T, ARRAY_SIZE(T), data); \ }) +#define intel_gt_live_subtests(T, data) ({ \ + typecheck(struct intel_gt *, data); \ + __i915_subtests(__func__, \ + __intel_gt_live_setup, __intel_gt_live_teardown, \ + T, ARRAY_SIZE(T), data); \ +}) #define SUBTEST(x) { x, #x } diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index b663b5fe51a8..d86336aa4178 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -667,7 +667,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled) if (!i915_request_started(rq)) stalled = false; - i915_reset_request(rq, stalled); + __i915_request_reset(rq, stalled); intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); out_unlock: diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 72bbcb2597fd..bc589efd3a6d 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -38,7 +38,7 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) int ret; u32 guc_status; - ret = intel_reset_guc(dev_priv); + ret = intel_reset_guc(&dev_priv->gt); if (ret) { DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); return ret; diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 84fce379c0de..e5cd5d47e380 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -7,6 +7,7 @@ #include #include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" @@ -221,7 +222,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915) SUBTEST(live_active_retire), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index b8ffae481730..bb6dd54a6ff3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -8,6 +8,7 @@ #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" @@ -206,7 +207,7 @@ int i915_gem_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_gem_hibernate), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_live_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index a3cb0aade6f1..b6449d0a8c17 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -25,6 +25,7 @@ #include "gem/i915_gem_pm.h" #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_gt.h" #include "i915_selftest.h" @@ -557,7 +558,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 1bbfc43d4a9e..86c299663934 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -380,7 +380,7 @@ static int __igt_breadcrumbs_smoketest(void *arg) t->engine->name); GEM_TRACE_DUMP(); - i915_gem_set_wedged(t->engine->i915); + intel_gt_set_wedged(t->engine->gt); GEM_BUG_ON(!i915_request_completed(rq)); i915_sw_fence_wait(wait); err = -EIO; @@ -1234,7 +1234,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_breadcrumbs_smoketest), }; - if (i915_terminally_wedged(i915)) + if (intel_gt_is_wedged(&i915->gt)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index f46ccf817ad5..db9c645bbdfe 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -256,7 +256,7 @@ int __i915_live_setup(void *data) { struct drm_i915_private *i915 = data; - return i915_terminally_wedged(i915); + return intel_gt_terminally_wedged(&i915->gt); } int __i915_live_teardown(int err, void *data) @@ -273,6 +273,27 @@ int __i915_live_teardown(int err, void *data) return err; } +int __intel_gt_live_setup(void *data) +{ + struct intel_gt *gt = data; + + return intel_gt_terminally_wedged(gt); +} + +int __intel_gt_live_teardown(int err, void *data) +{ + struct intel_gt *gt = data; + + mutex_lock(>->i915->drm.struct_mutex); + if (igt_flush_test(gt->i915, I915_WAIT_LOCKED)) + err = -EIO; + mutex_unlock(>->i915->drm.struct_mutex); + + i915_gem_drain_freed_objects(gt->i915); + + return err; +} + int __i915_subtests(const char *caller, int (*setup)(void *data), int (*teardown)(int err, void *data), diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 5bfd1b2626a2..d3b5eb402d33 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -5,6 +5,7 @@ */ #include "gem/i915_gem_context.h" +#include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_selftest.h" @@ -13,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) { - int ret = i915_terminally_wedged(i915) ? -EIO : 0; + int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0; int repeat = !!(flags & I915_WAIT_LOCKED); cond_resched(); @@ -27,7 +28,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) __builtin_return_address(0)); GEM_TRACE_DUMP(); - i915_gem_set_wedged(i915); + intel_gt_set_wedged(&i915->gt); repeat = 0; ret = -EIO; } diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 587df6fd4ffe..7ec8f8b049c6 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -7,47 +7,45 @@ #include "igt_reset.h" #include "gt/intel_engine.h" +#include "gt/intel_gt.h" #include "../i915_drv.h" -void igt_global_reset_lock(struct drm_i915_private *i915) +void igt_global_reset_lock(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - pr_debug("%s: current gpu_error=%08lx\n", - __func__, i915->gpu_error.flags); + pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags); - while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) - wait_event(i915->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, - &i915->gpu_error.flags)); + while (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) + wait_event(gt->reset.queue, + !test_bit(I915_RESET_BACKOFF, >->reset.flags)); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt->i915, id) { while (test_and_set_bit(I915_RESET_ENGINE + id, - &i915->gpu_error.flags)) - wait_on_bit(&i915->gpu_error.flags, - I915_RESET_ENGINE + id, + >->reset.flags)) + wait_on_bit(>->reset.flags, I915_RESET_ENGINE + id, TASK_UNINTERRUPTIBLE); } } -void igt_global_reset_unlock(struct drm_i915_private *i915) +void igt_global_reset_unlock(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) - clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + for_each_engine(engine, gt->i915, id) + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); - clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); - wake_up_all(&i915->gpu_error.reset_queue); + clear_bit(I915_RESET_BACKOFF, >->reset.flags); + wake_up_all(>->reset.queue); } -bool igt_force_reset(struct drm_i915_private *i915) +bool igt_force_reset(struct intel_gt *gt) { - i915_gem_set_wedged(i915); - i915_reset(i915, 0, NULL); + intel_gt_set_wedged(gt); + intel_gt_reset(gt, 0, NULL); - return !i915_reset_failed(i915); + return !intel_gt_is_wedged(gt); } diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h index 363bd853e50f..851873b67ab3 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.h +++ b/drivers/gpu/drm/i915/selftests/igt_reset.h @@ -7,10 +7,12 @@ #ifndef __I915_SELFTESTS_IGT_RESET_H__ #define __I915_SELFTESTS_IGT_RESET_H__ -#include "../i915_drv.h" +#include -void igt_global_reset_lock(struct drm_i915_private *i915); -void igt_global_reset_unlock(struct drm_i915_private *i915); -bool igt_force_reset(struct drm_i915_private *i915); +struct intel_gt; + +void igt_global_reset_lock(struct intel_gt *gt); +void igt_global_reset_unlock(struct intel_gt *gt); +bool igt_force_reset(struct intel_gt *gt); #endif diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h deleted file mode 100644 index 08e5ff11bbd9..000000000000 --- a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation - */ - -#ifndef IGT_WEDGE_ME_H -#define IGT_WEDGE_ME_H - -#include - -#include "../i915_gem.h" - -struct drm_i915_private; - -struct igt_wedge_me { - struct delayed_work work; - struct drm_i915_private *i915; - const char *name; -}; - -static void __igt_wedge_me(struct work_struct *work) -{ - struct igt_wedge_me *w = container_of(work, typeof(*w), work.work); - - pr_err("%s timed out, cancelling test.\n", w->name); - - GEM_TRACE("%s timed out.\n", w->name); - GEM_TRACE_DUMP(); - - i915_gem_set_wedged(w->i915); -} - -static void __igt_init_wedge(struct igt_wedge_me *w, - struct drm_i915_private *i915, - long timeout, - const char *name) -{ - w->i915 = i915; - w->name = name; - - INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me); - schedule_delayed_work(&w->work, timeout); -} - -static void __igt_fini_wedge(struct igt_wedge_me *w) -{ - cancel_delayed_work_sync(&w->work); - destroy_delayed_work_on_stack(&w->work); - w->i915 = NULL; -} - -#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \ - for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \ - (W)->i915; \ - __igt_fini_wedge((W))) - -#endif /* IGT_WEDGE_ME_H */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 2741805b56c2..fd4cc4809eb8 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -183,11 +183,6 @@ struct drm_i915_private *mock_gem_device(void) intel_gt_init_early(&i915->gt, i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ - init_waitqueue_head(&i915->gpu_error.wait_queue); - init_waitqueue_head(&i915->gpu_error.reset_queue); - init_srcu_struct(&i915->gpu_error.reset_backoff_srcu); - mutex_init(&i915->gpu_error.wedge_mutex); - i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) goto err_drv; -- cgit v1.2.3 From a30eff49d08ca32b6c4e573a4b07c13b4881f2d3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 13 Jul 2019 11:00:06 +0100 Subject: drm/i915/guc: Use system workqueue for log capture MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We only employ a single task for log capture, and created a workqueue for the purpose of ensuring we had a high priority queue for low latency. We can simply use the system_highpri_wq and avoid the complication with creating our own admist the maze of mutexes. (Currently we create the wq early before we even know we need it in order to avoid trying to create it on demand while we hold the logging mutex.) Signed-off-by: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Michał Winiarski Reviewed-by: Daniele Ceraolo Spurio Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_guc.c | 39 ------------------------------------ drivers/gpu/drm/i915/intel_guc_log.c | 4 ++-- drivers/gpu/drm/i915/intel_guc_log.h | 1 - 3 files changed, 2 insertions(+), 42 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 501b74f44374..183ab9b03ed0 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -99,47 +99,9 @@ void intel_guc_init_early(struct intel_guc *guc) } } -static int guc_init_wq(struct intel_guc *guc) -{ - /* - * GuC log buffer flush work item has to do register access to - * send the ack to GuC and this work item, if not synced before - * suspend, can potentially get executed after the GFX device is - * suspended. - * By marking the WQ as freezable, we don't have to bother about - * flushing of this work item from the suspend hooks, the pending - * work item if any will be either executed before the suspend - * or scheduled later on resume. This way the handling of work - * item can be kept same between system suspend & rpm suspend. - */ - guc->log.relay.flush_wq = - alloc_ordered_workqueue("i915-guc_log", - WQ_HIGHPRI | WQ_FREEZABLE); - if (!guc->log.relay.flush_wq) { - DRM_ERROR("Couldn't allocate workqueue for GuC log\n"); - return -ENOMEM; - } - - return 0; -} - -static void guc_fini_wq(struct intel_guc *guc) -{ - struct workqueue_struct *wq; - - wq = fetch_and_zero(&guc->log.relay.flush_wq); - if (wq) - destroy_workqueue(wq); -} - int intel_guc_init_misc(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_i915(guc); - int ret; - - ret = guc_init_wq(guc); - if (ret) - return ret; intel_uc_fw_fetch(i915, &guc->fw); @@ -149,7 +111,6 @@ int intel_guc_init_misc(struct intel_guc *guc) void intel_guc_fini_misc(struct intel_guc *guc) { intel_uc_fw_cleanup_fetch(&guc->fw); - guc_fini_wq(guc); } static int guc_shared_data_create(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 06c09ac52c74..9be5d3a6fb5f 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -578,7 +578,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) * the flush notification. This means that we need to unconditionally * flush on relay enabling, since GuC only notifies us once. */ - queue_work(log->relay.flush_wq, &log->relay.flush_work); + queue_work(system_highpri_wq, &log->relay.flush_work); return 0; @@ -628,5 +628,5 @@ void intel_guc_log_relay_close(struct intel_guc_log *log) void intel_guc_log_handle_flush_event(struct intel_guc_log *log) { - queue_work(log->relay.flush_wq, &log->relay.flush_work); + queue_work(system_highpri_wq, &log->relay.flush_work); } diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h index 7bc763f10c03..1969572f1f79 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.h +++ b/drivers/gpu/drm/i915/intel_guc_log.h @@ -66,7 +66,6 @@ struct intel_guc_log { struct i915_vma *vma; struct { void *buf_addr; - struct workqueue_struct *flush_wq; struct work_struct flush_work; struct rchan *channel; struct mutex lock; -- cgit v1.2.3 From e3f503f1c7331a4f895b97426e7d769a89e864db Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:07 +0100 Subject: drm/i915/uc: replace uc init/fini misc The "misc" terminology doesn't clearly explain what we intend to cover in this phase. The only thing we used ot do in there apart from FW fetch was initializing the log workqueue, with the latter being required only in the very rare case where we enable the log relay. As we no longer create our own workqueue, piggybacking on the system_highpri_wq instead, we can rename the function to clarify that they only fetch/release the blobs. v2: only create log wq when needed (Michal), reword commit msg accordingly v3: after rebase the wq is gone, reword commit msg accordingly Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Chris Wilson Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-2-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 12 +++++------- drivers/gpu/drm/i915/intel_guc.c | 14 -------------- drivers/gpu/drm/i915/intel_guc.h | 2 -- drivers/gpu/drm/i915/intel_huc.c | 8 -------- drivers/gpu/drm/i915/intel_huc.h | 6 ------ drivers/gpu/drm/i915/intel_uc.c | 34 ++++++++-------------------------- drivers/gpu/drm/i915/intel_uc.h | 4 ++-- 7 files changed, 15 insertions(+), 65 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e24955b5ebc2..f62dbd8c86de 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1433,13 +1433,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - ret = intel_uc_init_misc(dev_priv); - if (ret) - return ret; + intel_uc_fetch_firmwares(dev_priv); ret = intel_wopcm_init(&dev_priv->wopcm); if (ret) - goto err_uc_misc; + goto err_uc_fw; /* This is just a security blanket to placate dragons. * On some systems, we very sporadically observe that the first TLBs @@ -1565,8 +1563,8 @@ err_unlock: intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); -err_uc_misc: - intel_uc_fini_misc(dev_priv); +err_uc_fw: + intel_uc_cleanup_firmwares(dev_priv); if (ret != -EIO) { i915_gem_cleanup_userptr(dev_priv); @@ -1632,7 +1630,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) intel_cleanup_gt_powersave(dev_priv); - intel_uc_fini_misc(dev_priv); + intel_uc_cleanup_firmwares(dev_priv); i915_gem_cleanup_userptr(dev_priv); intel_timelines_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 183ab9b03ed0..4173b35bf104 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -99,20 +99,6 @@ void intel_guc_init_early(struct intel_guc *guc) } } -int intel_guc_init_misc(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - - intel_uc_fw_fetch(i915, &guc->fw); - - return 0; -} - -void intel_guc_fini_misc(struct intel_guc *guc) -{ - intel_uc_fw_cleanup_fetch(&guc->fw); -} - static int guc_shared_data_create(struct intel_guc *guc) { struct i915_vma *vma; diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index ec1038c1f50e..91d538fd5f65 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -153,10 +153,8 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); void intel_guc_init_params(struct intel_guc *guc); -int intel_guc_init_misc(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); void intel_guc_fini(struct intel_guc *guc); -void intel_guc_fini_misc(struct intel_guc *guc); int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size); int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index fb6f693d3cac..2a41ee89a16d 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -44,14 +44,6 @@ void intel_huc_init_early(struct intel_huc *huc) } } -int intel_huc_init_misc(struct intel_huc *huc) -{ - struct drm_i915_private *i915 = huc_to_i915(huc); - - intel_uc_fw_fetch(i915, &huc->fw); - return 0; -} - static int intel_huc_rsa_data_create(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_i915(huc); diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h index 2a6c94e79f17..9fa3d4629f2e 100644 --- a/drivers/gpu/drm/i915/intel_huc.h +++ b/drivers/gpu/drm/i915/intel_huc.h @@ -45,17 +45,11 @@ struct intel_huc { }; void intel_huc_init_early(struct intel_huc *huc); -int intel_huc_init_misc(struct intel_huc *huc); int intel_huc_init(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc); int intel_huc_check_status(struct intel_huc *huc); -static inline void intel_huc_fini_misc(struct intel_huc *huc) -{ - intel_uc_fw_cleanup_fetch(&huc->fw); -} - static inline int intel_huc_sanitize(struct intel_huc *huc) { intel_uc_fw_sanitize(&huc->fw); diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index bc589efd3a6d..00baaccc2f1c 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -349,44 +349,26 @@ static void guc_disable_communication(struct intel_guc *guc) DRM_INFO("GuC communication disabled\n"); } -int intel_uc_init_misc(struct drm_i915_private *i915) +void intel_uc_fetch_firmwares(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - int ret; - if (!USES_GUC(i915)) - return 0; - - ret = intel_guc_init_misc(guc); - if (ret) - return ret; - - if (USES_HUC(i915)) { - ret = intel_huc_init_misc(huc); - if (ret) - goto err_guc; - } + return; - return 0; + intel_uc_fw_fetch(i915, &i915->guc.fw); -err_guc: - intel_guc_fini_misc(guc); - return ret; + if (USES_HUC(i915)) + intel_uc_fw_fetch(i915, &i915->huc.fw); } -void intel_uc_fini_misc(struct drm_i915_private *i915) +void intel_uc_cleanup_firmwares(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - if (!USES_GUC(i915)) return; if (USES_HUC(i915)) - intel_huc_fini_misc(huc); + intel_uc_fw_cleanup_fetch(&i915->huc.fw); - intel_guc_fini_misc(guc); + intel_uc_fw_cleanup_fetch(&i915->guc.fw); } int intel_uc_init(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 3ea06c87dfcd..5a1383e192dd 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -31,8 +31,8 @@ void intel_uc_init_early(struct drm_i915_private *dev_priv); void intel_uc_cleanup_early(struct drm_i915_private *dev_priv); void intel_uc_init_mmio(struct drm_i915_private *dev_priv); -int intel_uc_init_misc(struct drm_i915_private *dev_priv); -void intel_uc_fini_misc(struct drm_i915_private *dev_priv); +void intel_uc_fetch_firmwares(struct drm_i915_private *dev_priv); +void intel_uc_cleanup_firmwares(struct drm_i915_private *dev_priv); void intel_uc_sanitize(struct drm_i915_private *dev_priv); int intel_uc_init_hw(struct drm_i915_private *dev_priv); void intel_uc_fini_hw(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From bb2881f8bdde127188bb5c3e6382a6157668d579 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:08 +0100 Subject: drm/i915/uc: introduce intel_uc_fw_supported Instead of always checking in the device config is GuC and HuC are supported or not, we can save the state in the uc_fw structure and avoid going through i915 every time from the low-level uc management code. while at it FIRMWARE_NONE has been renamed to better indicate that we haven't started the fetch/load yet, but we might have already selected a blob. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-3-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_guc_fw.c | 6 +++++- drivers/gpu/drm/i915/intel_huc_fw.c | 6 +++++- drivers/gpu/drm/i915/intel_uc.c | 25 +++++++++++++------------ drivers/gpu/drm/i915/intel_uc_fw.c | 4 +++- drivers/gpu/drm/i915/intel_uc_fw.h | 30 +++++++++++++++++++++++++----- 5 files changed, 51 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index db1e0daca7db..ee95d4960c5c 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -79,8 +79,12 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - if (!HAS_GUC(i915)) + if (!HAS_GUC(i915)) { + guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; return; + } + + guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; if (i915_modparams.guc_firmware_path) { guc_fw->path = i915_modparams.guc_firmware_path; diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c index 05cbf8338f53..06e726ba9863 100644 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/intel_huc_fw.c @@ -73,8 +73,12 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - if (!HAS_HUC(dev_priv)) + if (!HAS_HUC(dev_priv)) { + huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; return; + } + + huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; if (i915_modparams.huc_firmware_path) { huc_fw->path = i915_modparams.huc_firmware_path; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 00baaccc2f1c..e2b20f8e88cf 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -75,7 +75,8 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915) { int guc_log_level; - if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915)) + if (!intel_uc_fw_supported(&i915->guc.fw) || + !intel_uc_is_using_guc(i915)) guc_log_level = GUC_LOG_LEVEL_DISABLED; else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) @@ -123,16 +124,16 @@ static void sanitize_options_early(struct drm_i915_private *i915) if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, - !HAS_GUC(i915) ? "no GuC hardware" : - "no GuC firmware"); + !intel_uc_fw_supported(guc_fw) ? + "no GuC hardware" : "no GuC firmware"); } /* Verify HuC firmware availability */ if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, - !HAS_HUC(i915) ? "no HuC hardware" : - "no HuC firmware"); + !intel_uc_fw_supported(huc_fw) ? + "no HuC hardware" : "no HuC firmware"); } /* XXX: GuC submission is unavailable for now */ @@ -152,8 +153,8 @@ static void sanitize_options_early(struct drm_i915_private *i915) if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "guc_log_level", i915_modparams.guc_log_level, - !HAS_GUC(i915) ? "no GuC hardware" : - "GuC not enabled"); + !intel_uc_fw_supported(guc_fw) ? + "no GuC hardware" : "GuC not enabled"); i915_modparams.guc_log_level = 0; } @@ -380,7 +381,7 @@ int intel_uc_init(struct drm_i915_private *i915) if (!USES_GUC(i915)) return 0; - if (!HAS_GUC(i915)) + if (!intel_uc_fw_supported(&guc->fw)) return -ENODEV; /* XXX: GuC submission is unavailable for now */ @@ -423,7 +424,7 @@ void intel_uc_fini(struct drm_i915_private *i915) if (!USES_GUC(i915)) return; - GEM_BUG_ON(!HAS_GUC(i915)); + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); if (USES_GUC_SUBMISSION(i915)) intel_guc_submission_fini(guc); @@ -439,7 +440,7 @@ static void __uc_sanitize(struct drm_i915_private *i915) struct intel_guc *guc = &i915->guc; struct intel_huc *huc = &i915->huc; - GEM_BUG_ON(!HAS_GUC(i915)); + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); intel_huc_sanitize(huc); intel_guc_sanitize(guc); @@ -464,7 +465,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) if (!USES_GUC(i915)) return 0; - GEM_BUG_ON(!HAS_GUC(i915)); + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); guc_reset_interrupts(guc); @@ -561,7 +562,7 @@ void intel_uc_fini_hw(struct drm_i915_private *i915) if (!intel_guc_is_loaded(guc)) return; - GEM_BUG_ON(!HAS_GUC(i915)); + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); if (USES_GUC_SUBMISSION(i915)) intel_guc_submission_disable(guc); diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c index f342ddd47df8..8ce7210907c0 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/intel_uc_fw.c @@ -47,6 +47,8 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, size_t size; int err; + GEM_BUG_ON(!intel_uc_fw_supported(uc_fw)); + if (!uc_fw->path) { dev_info(dev_priv->drm.dev, "%s: No firmware was defined for %s!\n", @@ -328,7 +330,7 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) if (obj) i915_gem_object_put(obj); - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE; + uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; } /** diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h index 24e66469153c..833d04d06576 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/intel_uc_fw.h @@ -26,6 +26,7 @@ #define _INTEL_UC_FW_H_ #include +#include "i915_gem.h" struct drm_printer; struct drm_i915_private; @@ -34,8 +35,10 @@ struct drm_i915_private; #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" enum intel_uc_fw_status { + INTEL_UC_FIRMWARE_NOT_SUPPORTED = -2, /* no uc HW */ INTEL_UC_FIRMWARE_FAIL = -1, - INTEL_UC_FIRMWARE_NONE = 0, + INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */ + INTEL_UC_FIRMWARE_NOT_STARTED = 1, INTEL_UC_FIRMWARE_PENDING, INTEL_UC_FIRMWARE_SUCCESS }; @@ -79,10 +82,14 @@ static inline const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) { switch (status) { + case INTEL_UC_FIRMWARE_NOT_SUPPORTED: + return "N/A - uc HW not available"; case INTEL_UC_FIRMWARE_FAIL: return "FAIL"; - case INTEL_UC_FIRMWARE_NONE: - return "NONE"; + case INTEL_UC_FIRMWARE_UNINITIALIZED: + return "UNINITIALIZED"; + case INTEL_UC_FIRMWARE_NOT_STARTED: + return "NOT_STARTED"; case INTEL_UC_FIRMWARE_PENDING: return "PENDING"; case INTEL_UC_FIRMWARE_SUCCESS: @@ -106,9 +113,15 @@ static inline void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type) { + /* + * we use FIRMWARE_UNINITIALIZED to detect checks against fetch_status + * before we're looked at the HW caps to see if we have uc support + */ + BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); + uc_fw->path = NULL; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE; - uc_fw->load_status = INTEL_UC_FIRMWARE_NONE; + uc_fw->fetch_status = INTEL_UC_FIRMWARE_UNINITIALIZED; + uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_STARTED; uc_fw->type = type; } @@ -122,6 +135,13 @@ static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; } +static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw) +{ + /* shouldn't call this before checking hw/blob availability */ + GEM_BUG_ON(uc_fw->fetch_status == INTEL_UC_FIRMWARE_UNINITIALIZED); + return uc_fw->fetch_status != INTEL_UC_FIRMWARE_NOT_SUPPORTED; +} + static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) { if (intel_uc_fw_is_loaded(uc_fw)) -- cgit v1.2.3 From 9cbd51c2c0edbafdaab7f0fa7569d1f455113a9b Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:09 +0100 Subject: drm/i915/guc: move guc irq functions to intel_guc parameter No functional change, just moving the guc_to_i915 from the caller into the irq function. This will help with the upcoming move of guc under intel_gt. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-4-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 40 ++++++++++++++++++++++++++-------------- drivers/gpu/drm/i915/i915_irq.h | 13 +++++++------ drivers/gpu/drm/i915/intel_guc.h | 6 +++--- drivers/gpu/drm/i915/intel_uc.c | 6 +++--- 4 files changed, 39 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7c5ba5cbea34..831d185c07d2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -599,8 +599,10 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) gen6_reset_rps_interrupts(dev_priv); } -void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) +void gen9_reset_guc_interrupts(struct intel_guc *guc) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); @@ -608,61 +610,71 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen9_enable_guc_interrupts(struct intel_guc *guc) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); - if (!dev_priv->guc.interrupts.enabled) { + if (!guc->interrupts.enabled) { WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_guc_events); - dev_priv->guc.interrupts.enabled = true; + guc->interrupts.enabled = true; gen6_enable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events); } spin_unlock_irq(&dev_priv->irq_lock); } -void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen9_disable_guc_interrupts(struct intel_guc *guc) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); + assert_rpm_wakelock_held(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); - dev_priv->guc.interrupts.enabled = false; + guc->interrupts.enabled = false; gen6_disable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events); spin_unlock_irq(&dev_priv->irq_lock); intel_synchronize_irq(dev_priv); - gen9_reset_guc_interrupts(dev_priv); + gen9_reset_guc_interrupts(guc); } -void gen11_reset_guc_interrupts(struct drm_i915_private *i915) +void gen11_reset_guc_interrupts(struct intel_guc *guc) { + struct drm_i915_private *i915 = guc_to_i915(guc); + spin_lock_irq(&i915->irq_lock); gen11_reset_one_iir(&i915->gt, 0, GEN11_GUC); spin_unlock_irq(&i915->irq_lock); } -void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen11_enable_guc_interrupts(struct intel_guc *guc) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); + spin_lock_irq(&dev_priv->irq_lock); - if (!dev_priv->guc.interrupts.enabled) { + if (!guc->interrupts.enabled) { u32 events = REG_FIELD_PREP(ENGINE1_MASK, GEN11_GUC_INTR_GUC2HOST); WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC)); I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events); I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events); - dev_priv->guc.interrupts.enabled = true; + guc->interrupts.enabled = true; } spin_unlock_irq(&dev_priv->irq_lock); } -void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv) +void gen11_disable_guc_interrupts(struct intel_guc *guc) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); + spin_lock_irq(&dev_priv->irq_lock); - dev_priv->guc.interrupts.enabled = false; + guc->interrupts.enabled = false; I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); @@ -670,7 +682,7 @@ void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); intel_synchronize_irq(dev_priv); - gen11_reset_guc_interrupts(dev_priv); + gen11_reset_guc_interrupts(guc); } /** diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 4f803f910177..8918809cd805 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -12,6 +12,7 @@ struct drm_i915_private; struct intel_crtc; +struct intel_guc; void intel_irq_init(struct drm_i915_private *dev_priv); void intel_irq_fini(struct drm_i915_private *dev_priv); @@ -112,12 +113,12 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, u8 pipe_mask); -void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv); -void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv); -void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv); -void gen11_reset_guc_interrupts(struct drm_i915_private *i915); -void gen11_enable_guc_interrupts(struct drm_i915_private *i915); -void gen11_disable_guc_interrupts(struct drm_i915_private *i915); +void gen9_reset_guc_interrupts(struct intel_guc *guc); +void gen9_enable_guc_interrupts(struct intel_guc *guc); +void gen9_disable_guc_interrupts(struct intel_guc *guc); +void gen11_reset_guc_interrupts(struct intel_guc *guc); +void gen11_enable_guc_interrupts(struct intel_guc *guc); +void gen11_disable_guc_interrupts(struct intel_guc *guc); bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq, int *vpos, int *hpos, diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 91d538fd5f65..6852352381ce 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -56,9 +56,9 @@ struct intel_guc { struct { bool enabled; - void (*reset)(struct drm_i915_private *i915); - void (*enable)(struct drm_i915_private *i915); - void (*disable)(struct drm_i915_private *i915); + void (*reset)(struct intel_guc *guc); + void (*enable)(struct intel_guc *guc); + void (*disable)(struct intel_guc *guc); } interrupts; struct i915_vma *ads_vma; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index e2b20f8e88cf..4ea7661705b1 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -272,17 +272,17 @@ static void guc_handle_mmio_msg(struct intel_guc *guc) static void guc_reset_interrupts(struct intel_guc *guc) { - guc->interrupts.reset(guc_to_i915(guc)); + guc->interrupts.reset(guc); } static void guc_enable_interrupts(struct intel_guc *guc) { - guc->interrupts.enable(guc_to_i915(guc)); + guc->interrupts.enable(guc); } static void guc_disable_interrupts(struct intel_guc *guc) { - guc->interrupts.disable(guc_to_i915(guc)); + guc->interrupts.disable(guc); } static int guc_enable_communication(struct intel_guc *guc) -- cgit v1.2.3 From 633023a4e61885c0c9ceefed0865ec0063940616 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:10 +0100 Subject: drm/i915/guc: unify guc irq handling The 16-bit guc irq vector is unchanged across gens, the only thing that moved is its position (from the upper 16 bits of the PM regs to its own register). Instead of duplicating all defines and functions to handle the 2 different positions, we can work on the vector and shift it as appropriate. While at it, update the handler to work on intel_guc. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-5-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 24 +++++++++--------------- drivers/gpu/drm/i915/i915_reg.h | 10 ---------- drivers/gpu/drm/i915/intel_guc_reg.h | 32 ++++++++++++++++---------------- 3 files changed, 25 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 831d185c07d2..42d6d8bfac70 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -264,7 +264,7 @@ static void gen2_irq_init(struct intel_uncore *uncore, gen2_irq_init((uncore), imr_val, ier_val) static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); -static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); +static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir); /* For display hotplug interrupt */ static inline void @@ -658,8 +658,7 @@ void gen11_enable_guc_interrupts(struct intel_guc *guc) spin_lock_irq(&dev_priv->irq_lock); if (!guc->interrupts.enabled) { - u32 events = REG_FIELD_PREP(ENGINE1_MASK, - GEN11_GUC_INTR_GUC2HOST); + u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC)); I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events); @@ -1656,7 +1655,7 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { gen6_rps_irq_handler(i915, gt_iir[2]); - gen9_guc_irq_handler(i915, gt_iir[2]); + guc_irq_handler(&i915->guc, gt_iir[2] >> 16); } } @@ -1955,16 +1954,10 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); } -static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) +static void guc_irq_handler(struct intel_guc *guc, u16 iir) { - if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) - intel_guc_to_host_event_handler(&dev_priv->guc); -} - -static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir) -{ - if (iir & GEN11_GUC_INTR_GUC2HOST) - intel_guc_to_host_event_handler(&i915->guc); + if (iir & GUC_INTR_GUC2HOST) + intel_guc_to_host_event_handler(guc); } static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) @@ -3092,7 +3085,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, struct drm_i915_private *i915 = gt->i915; if (instance == OTHER_GUC_INSTANCE) - return gen11_guc_irq_handler(i915, iir); + return guc_irq_handler(&i915->guc, iir); if (instance == OTHER_GTPM_INSTANCE) return gen11_rps_irq_handler(gt, iir); @@ -4764,8 +4757,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; + /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11) - dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; + dev_priv->pm_guc_events = GUC_INTR_GUC2HOST << 16; /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0dd4506323f2..fdd9bc01e694 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7357,16 +7357,6 @@ enum { #define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which))) #define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which))) -#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31) -#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30) -#define GEN9_GUC_DISPLAY_EVENT (1 << 29) -#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28) -#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27) -#define GEN9_GUC_DB_RING_EVENT (1 << 26) -#define GEN9_GUC_DMA_DONE_EVENT (1 << 25) -#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24) -#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23) - #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 #define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */ diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index a5ab7bc5504c..e3cbb23299ce 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h @@ -141,21 +141,21 @@ struct guc_doorbell_info { #define GUC_PM_P24C_IER _MMIO(0xC55C) /* GuC Interrupt Vector */ -#define GEN11_GUC_INTR_GUC2HOST (1 << 15) -#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14) -#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13) -#define GEN11_GUC_INTR_SEM_SIG (1 << 12) -#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11) -#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10) -#define GEN11_GUC_INTR_DMA_DONE (1 << 9) -#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8) -#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7) -#define GEN11_GUC_INTR_SW_INT_6 (1 << 6) -#define GEN11_GUC_INTR_SW_INT_5 (1 << 5) -#define GEN11_GUC_INTR_SW_INT_4 (1 << 4) -#define GEN11_GUC_INTR_SW_INT_3 (1 << 3) -#define GEN11_GUC_INTR_SW_INT_2 (1 << 2) -#define GEN11_GUC_INTR_SW_INT_1 (1 << 1) -#define GEN11_GUC_INTR_SW_INT_0 (1 << 0) +#define GUC_INTR_GUC2HOST BIT(15) +#define GUC_INTR_EXEC_ERROR BIT(14) +#define GUC_INTR_DISPLAY_EVENT BIT(13) +#define GUC_INTR_SEM_SIG BIT(12) +#define GUC_INTR_IOMMU2GUC BIT(11) +#define GUC_INTR_DOORBELL_RANG BIT(10) +#define GUC_INTR_DMA_DONE BIT(9) +#define GUC_INTR_FATAL_ERROR BIT(8) +#define GUC_INTR_NOTIF_ERROR BIT(7) +#define GUC_INTR_SW_INT_6 BIT(6) +#define GUC_INTR_SW_INT_5 BIT(5) +#define GUC_INTR_SW_INT_4 BIT(4) +#define GUC_INTR_SW_INT_3 BIT(3) +#define GUC_INTR_SW_INT_2 BIT(2) +#define GUC_INTR_SW_INT_1 BIT(1) +#define GUC_INTR_SW_INT_0 BIT(0) #endif -- cgit v1.2.3 From 0f261b241d9cd01d58dce85a35df26fd38320b10 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:11 +0100 Subject: drm/i915/uc: move GuC and HuC files under gt/uc/ Both microcontrollers are part of the GT HW and are closely related to GT operations. To keep all the files cleanly together, they've been placed in their own subdir inside the gt/ folder Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Acked-by: Michal Wajdeczko Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-6-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/Makefile | 21 +- drivers/gpu/drm/i915/Makefile.header-test | 4 - drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- drivers/gpu/drm/i915/gt/uc/Makefile | 5 + drivers/gpu/drm/i915/gt/uc/Makefile.header-test | 16 + drivers/gpu/drm/i915/gt/uc/intel_guc.c | 603 +++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc.h | 202 ++++ drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 202 ++++ drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h | 34 + drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c | 915 ++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h | 111 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 319 ++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h | 33 + drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 691 ++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 632 +++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_log.h | 99 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h | 161 +++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 1165 +++++++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h | 87 ++ drivers/gpu/drm/i915/gt/uc/intel_huc.c | 174 +++ drivers/gpu/drm/i915/gt/uc/intel_huc.h | 59 ++ drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 219 ++++ drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h | 15 + drivers/gpu/drm/i915/gt/uc/intel_uc.c | 635 +++++++++++ drivers/gpu/drm/i915/gt/uc/intel_uc.h | 64 ++ drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 359 +++++++ drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 177 ++++ drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 336 ++++++ drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gpu_error.h | 2 +- drivers/gpu/drm/i915/intel_guc.c | 603 ----------- drivers/gpu/drm/i915/intel_guc.h | 202 ---- drivers/gpu/drm/i915/intel_guc_ads.c | 202 ---- drivers/gpu/drm/i915/intel_guc_ads.h | 34 - drivers/gpu/drm/i915/intel_guc_ct.c | 915 ---------------- drivers/gpu/drm/i915/intel_guc_ct.h | 111 -- drivers/gpu/drm/i915/intel_guc_fw.c | 319 ------ drivers/gpu/drm/i915/intel_guc_fw.h | 33 - drivers/gpu/drm/i915/intel_guc_fwif.h | 691 ------------ drivers/gpu/drm/i915/intel_guc_log.c | 632 ----------- drivers/gpu/drm/i915/intel_guc_log.h | 99 -- drivers/gpu/drm/i915/intel_guc_reg.h | 161 --- drivers/gpu/drm/i915/intel_guc_submission.c | 1164 -------------------- drivers/gpu/drm/i915/intel_guc_submission.h | 87 -- drivers/gpu/drm/i915/intel_huc.c | 174 --- drivers/gpu/drm/i915/intel_huc.h | 59 -- drivers/gpu/drm/i915/intel_huc_fw.c | 219 ---- drivers/gpu/drm/i915/intel_huc_fw.h | 15 - drivers/gpu/drm/i915/intel_uc.c | 635 ----------- drivers/gpu/drm/i915/intel_uc.h | 64 -- drivers/gpu/drm/i915/intel_uc_fw.c | 359 ------- drivers/gpu/drm/i915/intel_uc_fw.h | 177 ---- drivers/gpu/drm/i915/selftests/intel_guc.c | 336 ------ 55 files changed, 7329 insertions(+), 7310 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/uc/Makefile create mode 100644 drivers/gpu/drm/i915/gt/uc/Makefile.header-test create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_log.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_log.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_huc.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_huc.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_uc.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_uc.h create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h create mode 100644 drivers/gpu/drm/i915/gt/uc/selftest_guc.c delete mode 100644 drivers/gpu/drm/i915/intel_guc.c delete mode 100644 drivers/gpu/drm/i915/intel_guc.h delete mode 100644 drivers/gpu/drm/i915/intel_guc_ads.c delete mode 100644 drivers/gpu/drm/i915/intel_guc_ads.h delete mode 100644 drivers/gpu/drm/i915/intel_guc_ct.c delete mode 100644 drivers/gpu/drm/i915/intel_guc_ct.h delete mode 100644 drivers/gpu/drm/i915/intel_guc_fw.c delete mode 100644 drivers/gpu/drm/i915/intel_guc_fw.h delete mode 100644 drivers/gpu/drm/i915/intel_guc_fwif.h delete mode 100644 drivers/gpu/drm/i915/intel_guc_log.c delete mode 100644 drivers/gpu/drm/i915/intel_guc_log.h delete mode 100644 drivers/gpu/drm/i915/intel_guc_reg.h delete mode 100644 drivers/gpu/drm/i915/intel_guc_submission.c delete mode 100644 drivers/gpu/drm/i915/intel_guc_submission.h delete mode 100644 drivers/gpu/drm/i915/intel_huc.c delete mode 100644 drivers/gpu/drm/i915/intel_huc.h delete mode 100644 drivers/gpu/drm/i915/intel_huc_fw.c delete mode 100644 drivers/gpu/drm/i915/intel_huc_fw.h delete mode 100644 drivers/gpu/drm/i915/intel_uc.c delete mode 100644 drivers/gpu/drm/i915/intel_uc.h delete mode 100644 drivers/gpu/drm/i915/intel_uc_fw.c delete mode 100644 drivers/gpu/drm/i915/intel_uc_fw.h delete mode 100644 drivers/gpu/drm/i915/selftests/intel_guc.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5266dbeab01f..524516251a40 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -139,16 +139,17 @@ i915-y += \ intel_wopcm.o # general-purpose microcontroller (GuC) support -i915-y += intel_uc.o \ - intel_uc_fw.o \ - intel_guc.o \ - intel_guc_ads.o \ - intel_guc_ct.o \ - intel_guc_fw.o \ - intel_guc_log.o \ - intel_guc_submission.o \ - intel_huc.o \ - intel_huc_fw.o +obj-y += gt/uc/ +i915-y += gt/uc/intel_uc.o \ + gt/uc/intel_uc_fw.o \ + gt/uc/intel_guc.o \ + gt/uc/intel_guc_ads.o \ + gt/uc/intel_guc_ct.o \ + gt/uc/intel_guc_fw.o \ + gt/uc/intel_guc_log.o \ + gt/uc/intel_guc_submission.o \ + gt/uc/intel_huc.o \ + gt/uc/intel_huc_fw.o # modesetting core code obj-y += display/ diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test index 2fd61869bdaa..88ad1ad31c9b 100644 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ b/drivers/gpu/drm/i915/Makefile.header-test @@ -19,14 +19,10 @@ header_test := \ i915_vgpu.h \ intel_csr.h \ intel_drv.h \ - intel_guc_ct.h \ - intel_guc_fwif.h \ - intel_guc_reg.h \ intel_gvt.h \ intel_pm.h \ intel_runtime_pm.h \ intel_sideband.h \ - intel_uc_fw.h \ intel_uncore.h \ intel_wakeref.h diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 7b150ec3d10a..8490d4a991ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -19,7 +19,7 @@ #include "intel_gt_pm.h" #include "intel_reset.h" -#include "intel_guc.h" +#include "uc/intel_guc.h" #define RESET_MAX_RETRIES 3 diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile new file mode 100644 index 000000000000..db9718aa3ee9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/Makefile @@ -0,0 +1,5 @@ +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/../.. + +# Extra header tests +include $(src)/Makefile.header-test diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile.header-test b/drivers/gpu/drm/i915/gt/uc/Makefile.header-test new file mode 100644 index 000000000000..61e06cbb4b32 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/Makefile.header-test @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: MIT +# Copyright © 2019 Intel Corporation + +# Test the headers are compilable as standalone units +header_test := $(notdir $(wildcard $(src)/*.h)) + +quiet_cmd_header_test = HDRTEST $@ + cmd_header_test = echo "\#include \"$( $@ + +header_test_%.c: %.h + $(call cmd,header_test) + +extra-$(CONFIG_DRM_I915_WERROR) += \ + $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) + +clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c new file mode 100644 index 000000000000..4173b35bf104 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -0,0 +1,603 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "intel_guc.h" +#include "intel_guc_ads.h" +#include "intel_guc_submission.h" +#include "i915_drv.h" + +static void gen8_guc_raise_irq(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); +} + +static void gen11_guc_raise_irq(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0); +} + +static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) +{ + GEM_BUG_ON(!guc->send_regs.base); + GEM_BUG_ON(!guc->send_regs.count); + GEM_BUG_ON(i >= guc->send_regs.count); + + return _MMIO(guc->send_regs.base + 4 * i); +} + +void intel_guc_init_send_regs(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + enum forcewake_domains fw_domains = 0; + unsigned int i; + + if (INTEL_GEN(dev_priv) >= 11) { + guc->send_regs.base = + i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); + guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; + } else { + guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); + guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; + BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); + } + + for (i = 0; i < guc->send_regs.count; i++) { + fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, + guc_send_reg(guc, i), + FW_REG_READ | FW_REG_WRITE); + } + guc->send_regs.fw_domains = fw_domains; +} + +void intel_guc_init_early(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + + intel_guc_fw_init_early(guc); + intel_guc_ct_init_early(&guc->ct); + intel_guc_log_init_early(&guc->log); + + mutex_init(&guc->send_mutex); + spin_lock_init(&guc->irq_lock); + guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; + if (INTEL_GEN(i915) >= 11) { + guc->notify = gen11_guc_raise_irq; + guc->interrupts.reset = gen11_reset_guc_interrupts; + guc->interrupts.enable = gen11_enable_guc_interrupts; + guc->interrupts.disable = gen11_disable_guc_interrupts; + } else { + guc->notify = gen8_guc_raise_irq; + guc->interrupts.reset = gen9_reset_guc_interrupts; + guc->interrupts.enable = gen9_enable_guc_interrupts; + guc->interrupts.disable = gen9_disable_guc_interrupts; + } +} + +static int guc_shared_data_create(struct intel_guc *guc) +{ + struct i915_vma *vma; + void *vaddr; + + vma = intel_guc_allocate_vma(guc, PAGE_SIZE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + i915_vma_unpin_and_release(&vma, 0); + return PTR_ERR(vaddr); + } + + guc->shared_data = vma; + guc->shared_data_vaddr = vaddr; + + return 0; +} + +static void guc_shared_data_destroy(struct intel_guc *guc) +{ + i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); +} + +int intel_guc_init(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + int ret; + + ret = intel_uc_fw_init(&guc->fw); + if (ret) + goto err_fetch; + + ret = guc_shared_data_create(guc); + if (ret) + goto err_fw; + GEM_BUG_ON(!guc->shared_data); + + ret = intel_guc_log_create(&guc->log); + if (ret) + goto err_shared; + + ret = intel_guc_ads_create(guc); + if (ret) + goto err_log; + GEM_BUG_ON(!guc->ads_vma); + + ret = intel_guc_ct_init(&guc->ct); + if (ret) + goto err_ads; + + /* We need to notify the guc whenever we change the GGTT */ + i915_ggtt_enable_guc(dev_priv); + + return 0; + +err_ads: + intel_guc_ads_destroy(guc); +err_log: + intel_guc_log_destroy(&guc->log); +err_shared: + guc_shared_data_destroy(guc); +err_fw: + intel_uc_fw_fini(&guc->fw); +err_fetch: + intel_uc_fw_cleanup_fetch(&guc->fw); + return ret; +} + +void intel_guc_fini(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + i915_ggtt_disable_guc(dev_priv); + + intel_guc_ct_fini(&guc->ct); + + intel_guc_ads_destroy(guc); + intel_guc_log_destroy(&guc->log); + guc_shared_data_destroy(guc); + intel_uc_fw_fini(&guc->fw); + intel_uc_fw_cleanup_fetch(&guc->fw); +} + +static u32 guc_ctl_debug_flags(struct intel_guc *guc) +{ + u32 level = intel_guc_log_get_level(&guc->log); + u32 flags = 0; + + if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) + flags |= GUC_LOG_DISABLED; + else + flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << + GUC_LOG_VERBOSITY_SHIFT; + + return flags; +} + +static u32 guc_ctl_feature_flags(struct intel_guc *guc) +{ + u32 flags = 0; + + if (!USES_GUC_SUBMISSION(guc_to_i915(guc))) + flags |= GUC_CTL_DISABLE_SCHEDULER; + + return flags; +} + +static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) +{ + u32 flags = 0; + + if (USES_GUC_SUBMISSION(guc_to_i915(guc))) { + u32 ctxnum, base; + + base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); + ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16; + + base >>= PAGE_SHIFT; + flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) | + (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT); + } + return flags; +} + +static u32 guc_ctl_log_params_flags(struct intel_guc *guc) +{ + u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT; + u32 flags; + + #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0) + #define UNIT SZ_1M + #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE + #else + #define UNIT SZ_4K + #define FLAG 0 + #endif + + BUILD_BUG_ON(!CRASH_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!DPC_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT)); + BUILD_BUG_ON(!ISR_BUFFER_SIZE); + BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT)); + + BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); + BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT)); + BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) > + (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT)); + + flags = GUC_LOG_VALID | + GUC_LOG_NOTIFY_ON_HALF_FULL | + FLAG | + ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) | + ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) | + ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) | + (offset << GUC_LOG_BUF_ADDR_SHIFT); + + #undef UNIT + #undef FLAG + + return flags; +} + +static u32 guc_ctl_ads_flags(struct intel_guc *guc) +{ + u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; + u32 flags = ads << GUC_ADS_ADDR_SHIFT; + + return flags; +} + +/* + * Initialise the GuC parameter block before starting the firmware + * transfer. These parameters are read by the firmware on startup + * and cannot be changed thereafter. + */ +void intel_guc_init_params(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 params[GUC_CTL_MAX_DWORDS]; + int i; + + memset(params, 0, sizeof(params)); + + params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); + params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); + params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); + params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); + params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); + + for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) + DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); + + /* + * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and + * they are power context saved so it's ok to release forcewake + * when we are done here and take it again at xfer time. + */ + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER); + + I915_WRITE(SOFT_SCRATCH(0), 0); + + for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) + I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); + + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER); +} + +int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + WARN(1, "Unexpected send: action=%#x\n", *action); + return -ENODEV; +} + +void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) +{ + WARN(1, "Unexpected event: no suitable handler\n"); +} + +/* + * This function implements the MMIO based host to GuC interface. + */ +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = &dev_priv->uncore; + u32 status; + int i; + int ret; + + GEM_BUG_ON(!len); + GEM_BUG_ON(len > guc->send_regs.count); + + /* We expect only action code */ + GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); + + /* If CT is available, we expect to use MMIO only during init/fini */ + GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && + *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); + + mutex_lock(&guc->send_mutex); + intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); + + for (i = 0; i < len; i++) + intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); + + intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); + + intel_guc_notify(guc); + + /* + * No GuC command should ever take longer than 10ms. + * Fast commands should still complete in 10us. + */ + ret = __intel_wait_for_register_fw(uncore, + guc_send_reg(guc, 0), + INTEL_GUC_MSG_TYPE_MASK, + INTEL_GUC_MSG_TYPE_RESPONSE << + INTEL_GUC_MSG_TYPE_SHIFT, + 10, 10, &status); + /* If GuC explicitly returned an error, convert it to -EIO */ + if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status)) + ret = -EIO; + + if (ret) { + DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n", + action[0], ret, status); + goto out; + } + + if (response_buf) { + int count = min(response_buf_size, guc->send_regs.count - 1); + + for (i = 0; i < count; i++) + response_buf[i] = I915_READ(guc_send_reg(guc, i + 1)); + } + + /* Use data from the GuC response as our return value */ + ret = INTEL_GUC_MSG_TO_DATA(status); + +out: + intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); + mutex_unlock(&guc->send_mutex); + + return ret; +} + +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len) +{ + u32 msg; + + if (unlikely(!len)) + return -EPROTO; + + /* Make sure to handle only enabled messages */ + msg = payload[0] & guc->msg_enabled_mask; + + if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) + intel_guc_log_handle_flush_event(&guc->log); + + return 0; +} + +int intel_guc_sample_forcewake(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 action[2]; + + action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; + /* WaRsDisableCoarsePowerGating:skl,cnl */ + if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) + action[1] = 0; + else + /* bit 0 and 1 are for Render and Media domain separately */ + action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +/** + * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode + * @guc: intel_guc structure + * @rsa_offset: rsa offset w.r.t ggtt base of huc vma + * + * Triggers a HuC firmware authentication request to the GuC via intel_guc_send + * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by + * intel_huc_auth(). + * + * Return: non-zero code on error + */ +int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) +{ + u32 action[] = { + INTEL_GUC_ACTION_AUTHENTICATE_HUC, + rsa_offset + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +/** + * intel_guc_suspend() - notify GuC entering suspend state + * @guc: the guc + */ +int intel_guc_suspend(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + int ret; + u32 status; + u32 action[] = { + INTEL_GUC_ACTION_ENTER_S_STATE, + GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ + }; + + /* + * The ENTER_S_STATE action queues the save/restore operation in GuC FW + * and then returns, so waiting on the H2G is not enough to guarantee + * GuC is done. When all the processing is done, GuC writes + * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll + * on that. Note that GuC does not ensure that the value in the register + * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is + * in progress so we need to take care of that ourselves as well. + */ + + I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); + + ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); + if (ret) + return ret; + + ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14), + INTEL_GUC_SLEEP_STATE_INVALID_MASK, + 0, 0, 10, &status); + if (ret) + return ret; + + if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) { + DRM_ERROR("GuC failed to change sleep state. " + "action=0x%x, err=%u\n", + action[0], status); + return -EIO; + } + + return 0; +} + +/** + * intel_guc_reset_engine() - ask GuC to reset an engine + * @guc: intel_guc structure + * @engine: engine to be reset + */ +int intel_guc_reset_engine(struct intel_guc *guc, + struct intel_engine_cs *engine) +{ + u32 data[7]; + + GEM_BUG_ON(!guc->execbuf_client); + + data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET; + data[1] = engine->guc_id; + data[2] = 0; + data[3] = 0; + data[4] = 0; + data[5] = guc->execbuf_client->stage_id; + data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); + + return intel_guc_send(guc, data, ARRAY_SIZE(data)); +} + +/** + * intel_guc_resume() - notify GuC resuming from suspend state + * @guc: the guc + */ +int intel_guc_resume(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_EXIT_S_STATE, + GUC_POWER_D0, + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +/** + * DOC: GuC Address Space + * + * The layout of GuC address space is shown below: + * + * :: + * + * +===========> +====================+ <== FFFF_FFFF + * ^ | Reserved | + * | +====================+ <== GUC_GGTT_TOP + * | | | + * | | DRAM | + * GuC | | + * Address +===> +====================+ <== GuC ggtt_pin_bias + * Space ^ | | + * | | | | + * | GuC | GuC | + * | WOPCM | WOPCM | + * | Size | | + * | | | | + * v v | | + * +=======+===> +====================+ <== 0000_0000 + * + * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM + * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped + * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. + */ + +/** + * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage + * @guc: the guc + * @size: size of area to allocate (both virtual space and memory) + * + * This is a wrapper to create an object for use with the GuC. In order to + * use it inside the GuC, an object needs to be pinned lifetime, so we allocate + * both some backing storage and a range inside the Global GTT. We must pin + * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that + * range is reserved inside GuC. + * + * Return: A i915_vma if successful, otherwise an ERR_PTR. + */ +struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u64 flags; + int ret; + + obj = i915_gem_object_create_shmem(dev_priv, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL); + if (IS_ERR(vma)) + goto err; + + flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + ret = i915_vma_pin(vma, 0, 0, flags); + if (ret) { + vma = ERR_PTR(ret); + goto err; + } + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h new file mode 100644 index 000000000000..6852352381ce --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -0,0 +1,202 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_GUC_H_ +#define _INTEL_GUC_H_ + +#include "intel_uncore.h" +#include "intel_guc_fw.h" +#include "intel_guc_fwif.h" +#include "intel_guc_ct.h" +#include "intel_guc_log.h" +#include "intel_guc_reg.h" +#include "intel_uc_fw.h" +#include "i915_utils.h" +#include "i915_vma.h" + +struct __guc_ads_blob; + +/* + * Top level structure of GuC. It handles firmware loading and manages client + * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy + * ExecList submission. + */ +struct intel_guc { + struct intel_uc_fw fw; + struct intel_guc_log log; + struct intel_guc_ct ct; + + /* Log snapshot if GuC errors during load */ + struct drm_i915_gem_object *load_err_log; + + /* intel_guc_recv interrupt related state */ + spinlock_t irq_lock; + unsigned int msg_enabled_mask; + + struct { + bool enabled; + void (*reset)(struct intel_guc *guc); + void (*enable)(struct intel_guc *guc); + void (*disable)(struct intel_guc *guc); + } interrupts; + + struct i915_vma *ads_vma; + struct __guc_ads_blob *ads_blob; + + struct i915_vma *stage_desc_pool; + void *stage_desc_pool_vaddr; + struct ida stage_ids; + struct i915_vma *shared_data; + void *shared_data_vaddr; + + struct intel_guc_client *execbuf_client; + + DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); + /* Cyclic counter mod pagesize */ + u32 db_cacheline; + + /* GuC's FW specific registers used in MMIO send */ + struct { + u32 base; + unsigned int count; + enum forcewake_domains fw_domains; + } send_regs; + + /* Store msg (e.g. log flush) that we see while CTBs are disabled */ + u32 mmio_msg; + + /* To serialize the intel_guc_send actions */ + struct mutex send_mutex; + + /* GuC's FW specific send function */ + int (*send)(struct intel_guc *guc, const u32 *data, u32 len, + u32 *response_buf, u32 response_buf_size); + + /* GuC's FW specific event handler function */ + void (*handler)(struct intel_guc *guc); + + /* GuC's FW specific notify function */ + void (*notify)(struct intel_guc *guc); +}; + +static +inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) +{ + return guc->send(guc, action, len, NULL, 0); +} + +static inline int +intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + return guc->send(guc, action, len, response_buf, response_buf_size); +} + +static inline void intel_guc_notify(struct intel_guc *guc) +{ + guc->notify(guc); +} + +static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) +{ + guc->handler(guc); +} + +/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ +#define GUC_GGTT_TOP 0xFEE00000 + +/** + * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma + * @guc: intel_guc structure. + * @vma: i915 graphics virtual memory area. + * + * GuC does not allow any gfx GGTT address that falls into range + * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. + * Currently, in order to exclude [0, ggtt.pin_bias) address space from + * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() + * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. + * + * Return: GGTT offset of the @vma. + */ +static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, + struct i915_vma *vma) +{ + u32 offset = i915_ggtt_offset(vma); + + GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); + GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); + + return offset; +} + +void intel_guc_init_early(struct intel_guc *guc); +void intel_guc_init_send_regs(struct intel_guc *guc); +void intel_guc_init_params(struct intel_guc *guc); +int intel_guc_init(struct intel_guc *guc); +void intel_guc_fini(struct intel_guc *guc); +int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +void intel_guc_to_host_event_handler(struct intel_guc *guc); +void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len); +int intel_guc_sample_forcewake(struct intel_guc *guc); +int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); +int intel_guc_suspend(struct intel_guc *guc); +int intel_guc_resume(struct intel_guc *guc); +struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); + +static inline bool intel_guc_is_loaded(struct intel_guc *guc) +{ + return intel_uc_fw_is_loaded(&guc->fw); +} + +static inline int intel_guc_sanitize(struct intel_guc *guc) +{ + intel_uc_fw_sanitize(&guc->fw); + guc->mmio_msg = 0; + + return 0; +} + +static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) +{ + spin_lock_irq(&guc->irq_lock); + guc->msg_enabled_mask |= mask; + spin_unlock_irq(&guc->irq_lock); +} + +static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) +{ + spin_lock_irq(&guc->irq_lock); + guc->msg_enabled_mask &= ~mask; + spin_unlock_irq(&guc->irq_lock); +} + +int intel_guc_reset_engine(struct intel_guc *guc, + struct intel_engine_cs *engine); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c new file mode 100644 index 000000000000..69859d1e047f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -0,0 +1,202 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "intel_guc_ads.h" +#include "intel_uc.h" +#include "i915_drv.h" + +/* + * The Additional Data Struct (ADS) has pointers for different buffers used by + * the GuC. One single gem object contains the ADS struct itself (guc_ads), the + * scheduling policies (guc_policies), a structure describing a collection of + * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save + * its internal state for sleep. + */ + +static void guc_policy_init(struct guc_policy *policy) +{ + policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US; + policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US; + policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US; + policy->policy_flags = 0; +} + +static void guc_policies_init(struct guc_policies *policies) +{ + struct guc_policy *policy; + u32 p, i; + + policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US; + policies->max_num_work_items = POLICY_MAX_NUM_WI; + + for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { + for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) { + policy = &policies->policy[p][i]; + + guc_policy_init(policy); + } + } + + policies->is_valid = 1; +} + +static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num) +{ + memset(pool, 0, num * sizeof(*pool)); +} + +/* + * The first 80 dwords of the register state context, containing the + * execlists and ppgtt registers. + */ +#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) + +/* The ads obj includes the struct itself and buffers passed to GuC */ +struct __guc_ads_blob { + struct guc_ads ads; + struct guc_policies policies; + struct guc_mmio_reg_state reg_state; + struct guc_gt_system_info system_info; + struct guc_clients_info clients_info; + struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; + u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; +} __packed; + +static void __guc_ads_init(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct __guc_ads_blob *blob = guc->ads_blob; + const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; + u32 base; + u8 engine_class; + + /* GuC scheduling policies */ + guc_policies_init(&blob->policies); + + /* + * GuC expects a per-engine-class context image and size + * (minus hwsp and ring context). The context image will be + * used to reinitialize engines after a reset. It must exist + * and be pinned in the GGTT, so that the address won't change after + * we have told GuC where to find it. The context size will be used + * to validate that the LRC base + size fall within allowed GGTT. + */ + for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { + if (engine_class == OTHER_CLASS) + continue; + /* + * TODO: Set context pointer to default state to allow + * GuC to re-init guilty contexts after internal reset. + */ + blob->ads.golden_context_lrca[engine_class] = 0; + blob->ads.eng_state_size[engine_class] = + intel_engine_context_size(dev_priv, engine_class) - + skipped_size; + } + + /* System info */ + blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask); + blob->system_info.rcs_enabled = 1; + blob->system_info.bcs_enabled = 1; + + blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv); + blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv); + blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + + base = intel_guc_ggtt_offset(guc, guc->ads_vma); + + /* Clients info */ + guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool)); + + blob->clients_info.clients_num = 1; + blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool); + blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool); + + /* ADS */ + blob->ads.scheduler_policies = base + ptr_offset(blob, policies); + blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); + blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); + blob->ads.gt_system_info = base + ptr_offset(blob, system_info); + blob->ads.clients_info = base + ptr_offset(blob, clients_info); + + i915_gem_object_flush_map(guc->ads_vma->obj); +} + +/** + * intel_guc_ads_create() - allocates and initializes GuC ADS. + * @guc: intel_guc struct + * + * GuC needs memory block (Additional Data Struct), where it will store + * some data. Allocate and initialize such memory block for GuC use. + */ +int intel_guc_ads_create(struct intel_guc *guc) +{ + const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); + struct i915_vma *vma; + void *blob; + int ret; + + GEM_BUG_ON(guc->ads_vma); + + vma = intel_guc_allocate_vma(guc, size); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto err_vma; + } + + guc->ads_vma = vma; + guc->ads_blob = blob; + + __guc_ads_init(guc); + + return 0; + +err_vma: + i915_vma_unpin_and_release(&guc->ads_vma, 0); + return ret; +} + +void intel_guc_ads_destroy(struct intel_guc *guc) +{ + i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); +} + +/** + * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse + * @guc: intel_guc struct + * + * GuC stores some data in ADS, which might be stale after a reset. + * Reinitialize whole ADS in case any part of it was corrupted during + * previous GuC run. + */ +void intel_guc_ads_reset(struct intel_guc *guc) +{ + if (!guc->ads_vma) + return; + __guc_ads_init(guc); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h new file mode 100644 index 000000000000..7f40f9cd5fb9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h @@ -0,0 +1,34 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_GUC_ADS_H_ +#define _INTEL_GUC_ADS_H_ + +struct intel_guc; + +int intel_guc_ads_create(struct intel_guc *guc); +void intel_guc_ads_destroy(struct intel_guc *guc); +void intel_guc_ads_reset(struct intel_guc *guc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c new file mode 100644 index 000000000000..9e383a47609f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c @@ -0,0 +1,915 @@ +/* + * Copyright © 2016-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "i915_drv.h" +#include "intel_guc_ct.h" + +#ifdef CONFIG_DRM_I915_DEBUG_GUC +#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__) +#else +#define CT_DEBUG_DRIVER(...) do { } while (0) +#endif + +struct ct_request { + struct list_head link; + u32 fence; + u32 status; + u32 response_len; + u32 *response_buf; +}; + +struct ct_incoming_request { + struct list_head link; + u32 msg[]; +}; + +enum { CTB_SEND = 0, CTB_RECV = 1 }; + +enum { CTB_OWNER_HOST = 0 }; + +static void ct_incoming_request_worker_func(struct work_struct *w); + +/** + * intel_guc_ct_init_early - Initialize CT state without requiring device access + * @ct: pointer to CT struct + */ +void intel_guc_ct_init_early(struct intel_guc_ct *ct) +{ + /* we're using static channel owners */ + ct->host_channel.owner = CTB_OWNER_HOST; + + spin_lock_init(&ct->lock); + INIT_LIST_HEAD(&ct->pending_requests); + INIT_LIST_HEAD(&ct->incoming_requests); + INIT_WORK(&ct->worker, ct_incoming_request_worker_func); +} + +static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) +{ + return container_of(ct, struct intel_guc, ct); +} + +static inline const char *guc_ct_buffer_type_to_str(u32 type) +{ + switch (type) { + case INTEL_GUC_CT_BUFFER_TYPE_SEND: + return "SEND"; + case INTEL_GUC_CT_BUFFER_TYPE_RECV: + return "RECV"; + default: + return ""; + } +} + +static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, + u32 cmds_addr, u32 size, u32 owner) +{ + CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", + desc, cmds_addr, size, owner); + memset(desc, 0, sizeof(*desc)); + desc->addr = cmds_addr; + desc->size = size; + desc->owner = owner; +} + +static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) +{ + CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", + desc, desc->head, desc->tail); + desc->head = 0; + desc->tail = 0; + desc->is_in_error = 0; +} + +static int guc_action_register_ct_buffer(struct intel_guc *guc, + u32 desc_addr, + u32 type) +{ + u32 action[] = { + INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER, + desc_addr, + sizeof(struct guc_ct_buffer_desc), + type + }; + int err; + + /* Can't use generic send(), CT registration must go over MMIO */ + err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); + if (err) + DRM_ERROR("CT: register %s buffer failed; err=%d\n", + guc_ct_buffer_type_to_str(type), err); + return err; +} + +static int guc_action_deregister_ct_buffer(struct intel_guc *guc, + u32 owner, + u32 type) +{ + u32 action[] = { + INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER, + owner, + type + }; + int err; + + /* Can't use generic send(), CT deregistration must go over MMIO */ + err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); + if (err) + DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n", + guc_ct_buffer_type_to_str(type), owner, err); + return err; +} + +static int ctch_init(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + struct i915_vma *vma; + void *blob; + int err; + int i; + + GEM_BUG_ON(ctch->vma); + + /* We allocate 1 page to hold both descriptors and both buffers. + * ___________..................... + * |desc (SEND)| : + * |___________| PAGE/4 + * :___________....................: + * |desc (RECV)| : + * |___________| PAGE/4 + * :_______________________________: + * |cmds (SEND) | + * | PAGE/4 + * |_______________________________| + * |cmds (RECV) | + * | PAGE/4 + * |_______________________________| + * + * Each message can use a maximum of 32 dwords and we don't expect to + * have more than 1 in flight at any time, so we have enough space. + * Some logic further ahead will rely on the fact that there is only 1 + * page and that it is always mapped, so if the size is changed the + * other code will need updating as well. + */ + + /* allocate vma */ + vma = intel_guc_allocate_vma(guc, PAGE_SIZE); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_out; + } + ctch->vma = vma; + + /* map first page */ + blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(blob)) { + err = PTR_ERR(blob); + goto err_vma; + } + CT_DEBUG_DRIVER("CT: vma base=%#x\n", + intel_guc_ggtt_offset(guc, ctch->vma)); + + /* store pointers to desc and cmds */ + for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { + GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); + ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i; + ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; + } + + return 0; + +err_vma: + i915_vma_unpin_and_release(&ctch->vma, 0); +err_out: + CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", + ctch->owner, err); + return err; +} + +static void ctch_fini(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + GEM_BUG_ON(ctch->enabled); + + i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); +} + +static int ctch_enable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + u32 base; + int err; + int i; + + GEM_BUG_ON(!ctch->vma); + + GEM_BUG_ON(ctch->enabled); + + /* vma should be already allocated and map'ed */ + base = intel_guc_ggtt_offset(guc, ctch->vma); + + /* (re)initialize descriptors + * cmds buffers are in the second half of the blob page + */ + for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { + GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); + guc_ct_buffer_desc_init(ctch->ctbs[i].desc, + base + PAGE_SIZE/4 * i + PAGE_SIZE/2, + PAGE_SIZE/4, + ctch->owner); + } + + /* register buffers, starting wirh RECV buffer + * descriptors are in first half of the blob + */ + err = guc_action_register_ct_buffer(guc, + base + PAGE_SIZE/4 * CTB_RECV, + INTEL_GUC_CT_BUFFER_TYPE_RECV); + if (unlikely(err)) + goto err_out; + + err = guc_action_register_ct_buffer(guc, + base + PAGE_SIZE/4 * CTB_SEND, + INTEL_GUC_CT_BUFFER_TYPE_SEND); + if (unlikely(err)) + goto err_deregister; + + ctch->enabled = true; + + return 0; + +err_deregister: + guc_action_deregister_ct_buffer(guc, + ctch->owner, + INTEL_GUC_CT_BUFFER_TYPE_RECV); +err_out: + DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); + return err; +} + +static void ctch_disable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) +{ + GEM_BUG_ON(!ctch->enabled); + + ctch->enabled = false; + + guc_action_deregister_ct_buffer(guc, + ctch->owner, + INTEL_GUC_CT_BUFFER_TYPE_SEND); + guc_action_deregister_ct_buffer(guc, + ctch->owner, + INTEL_GUC_CT_BUFFER_TYPE_RECV); +} + +static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) +{ + /* For now it's trivial */ + return ++ctch->next_fence; +} + +/** + * DOC: CTB Host to GuC request + * + * Format of the CTB Host to GuC request message is as follows:: + * + * +------------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | ... | [n-1] | + * +------------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+ + * | | 0 | 1 | ... | n | + * +============+=========+=========+=========+=========+ + * | len >= 1 | FENCE | request specific data | + * +------+-----+---------+---------+---------+---------+ + * + * ^-----------------len-------------------^ + */ + +static int ctb_write(struct intel_guc_ct_buffer *ctb, + const u32 *action, + u32 len /* in dwords */, + u32 fence, + bool want_response) +{ + struct guc_ct_buffer_desc *desc = ctb->desc; + u32 head = desc->head / 4; /* in dwords */ + u32 tail = desc->tail / 4; /* in dwords */ + u32 size = desc->size / 4; /* in dwords */ + u32 used; /* in dwords */ + u32 header; + u32 *cmds = ctb->cmds; + unsigned int i; + + GEM_BUG_ON(desc->size % 4); + GEM_BUG_ON(desc->head % 4); + GEM_BUG_ON(desc->tail % 4); + GEM_BUG_ON(tail >= size); + + /* + * tail == head condition indicates empty. GuC FW does not support + * using up the entire buffer to get tail == head meaning full. + */ + if (tail < head) + used = (size - head) + tail; + else + used = tail - head; + + /* make sure there is a space including extra dw for the fence */ + if (unlikely(used + len + 1 >= size)) + return -ENOSPC; + + /* + * Write the message. The format is the following: + * DW0: header (including action code) + * DW1: fence + * DW2+: action data + */ + header = (len << GUC_CT_MSG_LEN_SHIFT) | + (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | + (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | + (action[0] << GUC_CT_MSG_ACTION_SHIFT); + + CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n", + 4, &header, 4, &fence, + 4 * (len - 1), &action[1]); + + cmds[tail] = header; + tail = (tail + 1) % size; + + cmds[tail] = fence; + tail = (tail + 1) % size; + + for (i = 1; i < len; i++) { + cmds[tail] = action[i]; + tail = (tail + 1) % size; + } + + /* now update desc tail (back in bytes) */ + desc->tail = tail * 4; + GEM_BUG_ON(desc->tail > desc->size); + + return 0; +} + +/** + * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update. + * @desc: buffer descriptor + * @fence: response fence + * @status: placeholder for status + * + * Guc will update CT buffer descriptor with new fence and status + * after processing the command identified by the fence. Wait for + * specified fence and then read from the descriptor status of the + * command. + * + * Return: + * * 0 response received (status is valid) + * * -ETIMEDOUT no response within hardcoded timeout + * * -EPROTO no response, CT buffer is in error + */ +static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc, + u32 fence, + u32 *status) +{ + int err; + + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No GuC command should ever take longer than 10ms. + */ +#define done (READ_ONCE(desc->fence) == fence) + err = wait_for_us(done, 10); + if (err) + err = wait_for(done, 10); +#undef done + + if (unlikely(err)) { + DRM_ERROR("CT: fence %u failed; reported fence=%u\n", + fence, desc->fence); + + if (WARN_ON(desc->is_in_error)) { + /* Something went wrong with the messaging, try to reset + * the buffer and hope for the best + */ + guc_ct_buffer_desc_reset(desc); + err = -EPROTO; + } + } + + *status = desc->status; + return err; +} + +/** + * wait_for_ct_request_update - Wait for CT request state update. + * @req: pointer to pending request + * @status: placeholder for status + * + * For each sent request, Guc shall send bac CT response message. + * Our message handler will update status of tracked request once + * response message with given fence is received. Wait here and + * check for valid response status value. + * + * Return: + * * 0 response received (status is valid) + * * -ETIMEDOUT no response within hardcoded timeout + */ +static int wait_for_ct_request_update(struct ct_request *req, u32 *status) +{ + int err; + + /* + * Fast commands should complete in less than 10us, so sample quickly + * up to that length of time, then switch to a slower sleep-wait loop. + * No GuC command should ever take longer than 10ms. + */ +#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status)) + err = wait_for_us(done, 10); + if (err) + err = wait_for(done, 10); +#undef done + + if (unlikely(err)) + DRM_ERROR("CT: fence %u err %d\n", req->fence, err); + + *status = req->status; + return err; +} + +static int ctch_send(struct intel_guc_ct *ct, + struct intel_guc_ct_channel *ctch, + const u32 *action, + u32 len, + u32 *response_buf, + u32 response_buf_size, + u32 *status) +{ + struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; + struct guc_ct_buffer_desc *desc = ctb->desc; + struct ct_request request; + unsigned long flags; + u32 fence; + int err; + + GEM_BUG_ON(!ctch->enabled); + GEM_BUG_ON(!len); + GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); + GEM_BUG_ON(!response_buf && response_buf_size); + + fence = ctch_get_next_fence(ctch); + request.fence = fence; + request.status = 0; + request.response_len = response_buf_size; + request.response_buf = response_buf; + + spin_lock_irqsave(&ct->lock, flags); + list_add_tail(&request.link, &ct->pending_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + err = ctb_write(ctb, action, len, fence, !!response_buf); + if (unlikely(err)) + goto unlink; + + intel_guc_notify(ct_to_guc(ct)); + + if (response_buf) + err = wait_for_ct_request_update(&request, status); + else + err = wait_for_ctb_desc_update(desc, fence, status); + if (unlikely(err)) + goto unlink; + + if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) { + err = -EIO; + goto unlink; + } + + if (response_buf) { + /* There shall be no data in the status */ + WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status)); + /* Return actual response len */ + err = request.response_len; + } else { + /* There shall be no response payload */ + WARN_ON(request.response_len); + /* Return data decoded from the status dword */ + err = INTEL_GUC_MSG_TO_DATA(*status); + } + +unlink: + spin_lock_irqsave(&ct->lock, flags); + list_del(&request.link); + spin_unlock_irqrestore(&ct->lock, flags); + + return err; +} + +/* + * Command Transport (CT) buffer based GuC send function. + */ +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size) +{ + struct intel_guc_ct *ct = &guc->ct; + struct intel_guc_ct_channel *ctch = &ct->host_channel; + u32 status = ~0; /* undefined */ + int ret; + + mutex_lock(&guc->send_mutex); + + ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, + &status); + if (unlikely(ret < 0)) { + DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", + action[0], ret, status); + } else if (unlikely(ret)) { + CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n", + action[0], ret, ret); + } + + mutex_unlock(&guc->send_mutex); + return ret; +} + +static inline unsigned int ct_header_get_len(u32 header) +{ + return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK; +} + +static inline unsigned int ct_header_get_action(u32 header) +{ + return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK; +} + +static inline bool ct_header_is_response(u32 header) +{ + return !!(header & GUC_CT_MSG_IS_RESPONSE); +} + +static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) +{ + struct guc_ct_buffer_desc *desc = ctb->desc; + u32 head = desc->head / 4; /* in dwords */ + u32 tail = desc->tail / 4; /* in dwords */ + u32 size = desc->size / 4; /* in dwords */ + u32 *cmds = ctb->cmds; + s32 available; /* in dwords */ + unsigned int len; + unsigned int i; + + GEM_BUG_ON(desc->size % 4); + GEM_BUG_ON(desc->head % 4); + GEM_BUG_ON(desc->tail % 4); + GEM_BUG_ON(tail >= size); + GEM_BUG_ON(head >= size); + + /* tail == head condition indicates empty */ + available = tail - head; + if (unlikely(available == 0)) + return -ENODATA; + + /* beware of buffer wrap case */ + if (unlikely(available < 0)) + available += size; + CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail); + GEM_BUG_ON(available < 0); + + data[0] = cmds[head]; + head = (head + 1) % size; + + /* message len with header */ + len = ct_header_get_len(data[0]) + 1; + if (unlikely(len > (u32)available)) { + DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n", + 4, data, + 4 * (head + available - 1 > size ? + size - head : available - 1), &cmds[head], + 4 * (head + available - 1 > size ? + available - 1 - size + head : 0), &cmds[0]); + return -EPROTO; + } + + for (i = 1; i < len; i++) { + data[i] = cmds[head]; + head = (head + 1) % size; + } + CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data); + + desc->head = head * 4; + return 0; +} + +/** + * DOC: CTB GuC to Host response + * + * Format of the CTB GuC to Host response message is as follows:: + * + * +------------+---------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | [3] | ... | [n-1] | + * +------------+---------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+---------+ + * | | 0 | 1 | 2 | ... | n | + * +============+=========+=========+=========+=========+=========+ + * | len >= 2 | FENCE | STATUS | response specific data | + * +------+-----+---------+---------+---------+---------+---------+ + * + * ^-----------------------len-----------------------^ + */ + +static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) +{ + u32 header = msg[0]; + u32 len = ct_header_get_len(header); + u32 msglen = len + 1; /* total message length including header */ + u32 fence; + u32 status; + u32 datalen; + struct ct_request *req; + bool found = false; + + GEM_BUG_ON(!ct_header_is_response(header)); + GEM_BUG_ON(!in_irq()); + + /* Response payload shall at least include fence and status */ + if (unlikely(len < 2)) { + DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); + return -EPROTO; + } + + fence = msg[1]; + status = msg[2]; + datalen = len - 2; + + /* Format of the status follows RESPONSE message */ + if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) { + DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); + return -EPROTO; + } + + CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); + + spin_lock(&ct->lock); + list_for_each_entry(req, &ct->pending_requests, link) { + if (unlikely(fence != req->fence)) { + CT_DEBUG_DRIVER("CT: request %u awaits response\n", + req->fence); + continue; + } + if (unlikely(datalen > req->response_len)) { + DRM_ERROR("CT: response %u too long %*ph\n", + req->fence, 4 * msglen, msg); + datalen = 0; + } + if (datalen) + memcpy(req->response_buf, msg + 3, 4 * datalen); + req->response_len = datalen; + WRITE_ONCE(req->status, status); + found = true; + break; + } + spin_unlock(&ct->lock); + + if (!found) + DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg); + return 0; +} + +static void ct_process_request(struct intel_guc_ct *ct, + u32 action, u32 len, const u32 *payload) +{ + struct intel_guc *guc = ct_to_guc(ct); + int ret; + + CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload); + + switch (action) { + case INTEL_GUC_ACTION_DEFAULT: + ret = intel_guc_to_host_process_recv_msg(guc, payload, len); + if (unlikely(ret)) + goto fail_unexpected; + break; + + default: +fail_unexpected: + DRM_ERROR("CT: unexpected request %x %*ph\n", + action, 4 * len, payload); + break; + } +} + +static bool ct_process_incoming_requests(struct intel_guc_ct *ct) +{ + unsigned long flags; + struct ct_incoming_request *request; + u32 header; + u32 *payload; + bool done; + + spin_lock_irqsave(&ct->lock, flags); + request = list_first_entry_or_null(&ct->incoming_requests, + struct ct_incoming_request, link); + if (request) + list_del(&request->link); + done = !!list_empty(&ct->incoming_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + if (!request) + return true; + + header = request->msg[0]; + payload = &request->msg[1]; + ct_process_request(ct, + ct_header_get_action(header), + ct_header_get_len(header), + payload); + + kfree(request); + return done; +} + +static void ct_incoming_request_worker_func(struct work_struct *w) +{ + struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); + bool done; + + done = ct_process_incoming_requests(ct); + if (!done) + queue_work(system_unbound_wq, &ct->worker); +} + +/** + * DOC: CTB GuC to Host request + * + * Format of the CTB GuC to Host request message is as follows:: + * + * +------------+---------+---------+---------+---------+---------+ + * | msg[0] | [1] | [2] | [3] | ... | [n-1] | + * +------------+---------+---------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+---------+---------+ + * | | 0 | 1 | 2 | ... | n | + * +============+=========+=========+=========+=========+=========+ + * | len | request specific data | + * +------+-----+---------+---------+---------+---------+---------+ + * + * ^-----------------------len-----------------------^ + */ + +static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) +{ + u32 header = msg[0]; + u32 len = ct_header_get_len(header); + u32 msglen = len + 1; /* total message length including header */ + struct ct_incoming_request *request; + unsigned long flags; + + GEM_BUG_ON(ct_header_is_response(header)); + + request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC); + if (unlikely(!request)) { + DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg); + return 0; /* XXX: -ENOMEM ? */ + } + memcpy(request->msg, msg, 4 * msglen); + + spin_lock_irqsave(&ct->lock, flags); + list_add_tail(&request->link, &ct->incoming_requests); + spin_unlock_irqrestore(&ct->lock, flags); + + queue_work(system_unbound_wq, &ct->worker); + return 0; +} + +static void ct_process_host_channel(struct intel_guc_ct *ct) +{ + struct intel_guc_ct_channel *ctch = &ct->host_channel; + struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV]; + u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ + int err = 0; + + if (!ctch->enabled) + return; + + do { + err = ctb_read(ctb, msg); + if (err) + break; + + if (ct_header_is_response(msg[0])) + err = ct_handle_response(ct, msg); + else + err = ct_handle_request(ct, msg); + } while (!err); + + if (GEM_WARN_ON(err == -EPROTO)) { + DRM_ERROR("CT: corrupted message detected!\n"); + ctb->desc->is_in_error = 1; + } +} + +/* + * When we're communicating with the GuC over CT, GuC uses events + * to notify us about new messages being posted on the RECV buffer. + */ +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) +{ + struct intel_guc_ct *ct = &guc->ct; + + ct_process_host_channel(ct); +} + +/** + * intel_guc_ct_init - Init CT communication + * @ct: pointer to CT struct + * + * Allocate memory required for communication via + * the CT channel. + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_init(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + int err; + + err = ctch_init(guc, ctch); + if (unlikely(err)) { + DRM_ERROR("CT: can't open channel %d; err=%d\n", + ctch->owner, err); + return err; + } + + GEM_BUG_ON(!ctch->vma); + return 0; +} + +/** + * intel_guc_ct_fini - Fini CT communication + * @ct: pointer to CT struct + * + * Deallocate memory required for communication via + * the CT channel. + */ +void intel_guc_ct_fini(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + ctch_fini(guc, ctch); +} + +/** + * intel_guc_ct_enable - Enable buffer based command transport. + * @ct: pointer to CT struct + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_enable(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + if (ctch->enabled) + return 0; + + return ctch_enable(guc, ctch); +} + +/** + * intel_guc_ct_disable - Disable buffer based command transport. + * @ct: pointer to CT struct + */ +void intel_guc_ct_disable(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + if (!ctch->enabled) + return; + + ctch_disable(guc, ctch); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h new file mode 100644 index 000000000000..8c1f6d133168 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h @@ -0,0 +1,111 @@ +/* + * Copyright © 2016-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef _INTEL_GUC_CT_H_ +#define _INTEL_GUC_CT_H_ + +#include +#include + +#include "intel_guc_fwif.h" + +struct i915_vma; +struct intel_guc; + +/** + * DOC: Command Transport (CT). + * + * Buffer based command transport is a replacement for MMIO based mechanism. + * It can be used to perform both host-2-guc and guc-to-host communication. + */ + +/** Represents single command transport buffer. + * + * A single command transport buffer consists of two parts, the header + * record (command transport buffer descriptor) and the actual buffer which + * holds the commands. + * + * @desc: pointer to the buffer descriptor + * @cmds: pointer to the commands buffer + */ +struct intel_guc_ct_buffer { + struct guc_ct_buffer_desc *desc; + u32 *cmds; +}; + +/** Represents pair of command transport buffers. + * + * Buffers go in pairs to allow bi-directional communication. + * To simplify the code we place both of them in the same vma. + * Buffers from the same pair must share unique owner id. + * + * @vma: pointer to the vma with pair of CT buffers + * @ctbs: buffers for sending(0) and receiving(1) commands + * @owner: unique identifier + * @next_fence: fence to be used with next send command + */ +struct intel_guc_ct_channel { + struct i915_vma *vma; + struct intel_guc_ct_buffer ctbs[2]; + u32 owner; + u32 next_fence; + bool enabled; +}; + +/** Holds all command transport channels. + * + * @host_channel: main channel used by the host + */ +struct intel_guc_ct { + struct intel_guc_ct_channel host_channel; + /* other channels are tbd */ + + /** @lock: protects pending requests list */ + spinlock_t lock; + + /** @pending_requests: list of requests waiting for response */ + struct list_head pending_requests; + + /** @incoming_requests: list of incoming requests */ + struct list_head incoming_requests; + + /** @worker: worker for handling incoming requests */ + struct work_struct worker; +}; + +void intel_guc_ct_init_early(struct intel_guc_ct *ct); +int intel_guc_ct_init(struct intel_guc_ct *ct); +void intel_guc_ct_fini(struct intel_guc_ct *ct); +int intel_guc_ct_enable(struct intel_guc_ct *ct); +void intel_guc_ct_disable(struct intel_guc_ct *ct); + +static inline void intel_guc_ct_stop(struct intel_guc_ct *ct) +{ + ct->host_channel.enabled = false; +} + +int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, + u32 *response_buf, u32 response_buf_size); +void intel_guc_to_host_event_handler_ct(struct intel_guc *guc); + +#endif /* _INTEL_GUC_CT_H_ */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c new file mode 100644 index 000000000000..ee95d4960c5c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -0,0 +1,319 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Vinit Azad + * Ben Widawsky + * Dave Gordon + * Alex Dai + */ + +#include "intel_guc_fw.h" +#include "i915_drv.h" + +#define __MAKE_GUC_FW_PATH(KEY) \ + "i915/" \ + __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \ + __stringify(KEY##_GUC_FW_MAJOR) "." \ + __stringify(KEY##_GUC_FW_MINOR) "." \ + __stringify(KEY##_GUC_FW_PATCH) ".bin" + +#define SKL_GUC_FW_PREFIX skl +#define SKL_GUC_FW_MAJOR 33 +#define SKL_GUC_FW_MINOR 0 +#define SKL_GUC_FW_PATCH 0 +#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL) +MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH); + +#define BXT_GUC_FW_PREFIX bxt +#define BXT_GUC_FW_MAJOR 33 +#define BXT_GUC_FW_MINOR 0 +#define BXT_GUC_FW_PATCH 0 +#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT) +MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH); + +#define KBL_GUC_FW_PREFIX kbl +#define KBL_GUC_FW_MAJOR 33 +#define KBL_GUC_FW_MINOR 0 +#define KBL_GUC_FW_PATCH 0 +#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL) +MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); + +#define GLK_GUC_FW_PREFIX glk +#define GLK_GUC_FW_MAJOR 33 +#define GLK_GUC_FW_MINOR 0 +#define GLK_GUC_FW_PATCH 0 +#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK) +MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH); + +#define ICL_GUC_FW_PREFIX icl +#define ICL_GUC_FW_MAJOR 33 +#define ICL_GUC_FW_MINOR 0 +#define ICL_GUC_FW_PATCH 0 +#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL) +MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH); + +static void guc_fw_select(struct intel_uc_fw *guc_fw) +{ + struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); + struct drm_i915_private *i915 = guc_to_i915(guc); + + GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); + + if (!HAS_GUC(i915)) { + guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; + return; + } + + guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; + + if (i915_modparams.guc_firmware_path) { + guc_fw->path = i915_modparams.guc_firmware_path; + guc_fw->major_ver_wanted = 0; + guc_fw->minor_ver_wanted = 0; + } else if (IS_ICELAKE(i915)) { + guc_fw->path = ICL_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR; + } else if (IS_GEMINILAKE(i915)) { + guc_fw->path = GLK_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR; + } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { + guc_fw->path = KBL_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR; + } else if (IS_BROXTON(i915)) { + guc_fw->path = BXT_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR; + } else if (IS_SKYLAKE(i915)) { + guc_fw->path = SKL_GUC_FIRMWARE_PATH; + guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR; + guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR; + } +} + +/** + * intel_guc_fw_init_early() - initializes GuC firmware struct + * @guc: intel_guc struct + * + * On platforms with GuC selects firmware for uploading + */ +void intel_guc_fw_init_early(struct intel_guc *guc) +{ + struct intel_uc_fw *guc_fw = &guc->fw; + + intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC); + guc_fw_select(guc_fw); +} + +static void guc_prepare_xfer(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + /* Must program this register before loading the ucode with DMA */ + I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES | + GUC_ENABLE_READ_CACHE_LOGIC | + GUC_ENABLE_MIA_CACHING | + GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | + GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | + GUC_ENABLE_MIA_CLOCK_GATING); + + if (IS_GEN9_LP(dev_priv)) + I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + else + I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + + if (IS_GEN(dev_priv, 9)) { + /* DOP Clock Gating Enable for GuC clocks */ + I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | + I915_READ(GEN7_MISCCPCTL))); + + /* allows for 5us (in 10ns units) before GT can go to RC6 */ + I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); + } +} + +/* Copy RSA signature from the fw image to HW for verification */ +static void guc_xfer_rsa(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uc_fw *fw = &guc->fw; + struct sg_table *pages = fw->obj->mm.pages; + u32 rsa[UOS_RSA_SCRATCH_COUNT]; + int i; + + sg_pcopy_to_buffer(pages->sgl, pages->nents, + rsa, sizeof(rsa), fw->rsa_offset); + + for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) + I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); +} + +static bool guc_xfer_completed(struct intel_guc *guc, u32 *status) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + /* Did we complete the xfer? */ + *status = I915_READ(DMA_CTRL); + return !(*status & START_DMA); +} + +/* + * Read the GuC status register (GUC_STATUS) and store it in the + * specified location; then return a boolean indicating whether + * the value matches either of two values representing completion + * of the GuC boot process. + * + * This is used for polling the GuC status in a wait_for() + * loop below. + */ +static inline bool guc_ready(struct intel_guc *guc, u32 *status) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + u32 val = I915_READ(GUC_STATUS); + u32 uk_val = val & GS_UKERNEL_MASK; + + *status = val; + return (uk_val == GS_UKERNEL_READY) || + ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); +} + +static int guc_wait_ucode(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + u32 status; + int ret; + + /* + * Wait for the GuC to start up. + * NB: Docs recommend not using the interrupt for completion. + * Measurements indicate this should take no more than 20ms, so a + * timeout here indicates that the GuC has failed and is unusable. + * (Higher levels of the driver may decide to reset the GuC and + * attempt the ucode load again if this happens.) + */ + ret = wait_for(guc_ready(guc, &status), 100); + DRM_DEBUG_DRIVER("GuC status %#x\n", status); + + if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { + DRM_ERROR("GuC firmware signature verification failed\n"); + ret = -ENOEXEC; + } + + if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { + DRM_ERROR("GuC firmware exception. EIP: %#x\n", + intel_uncore_read(&i915->uncore, SOFT_SCRATCH(13))); + ret = -ENXIO; + } + + if (ret == 0 && !guc_xfer_completed(guc, &status)) { + DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", + status); + ret = -ENXIO; + } + + return ret; +} + +/* + * Transfer the firmware image to RAM for execution by the microcontroller. + * + * Architecturally, the DMA engine is bidirectional, and can potentially even + * transfer between GTT locations. This functionality is left out of the API + * for now as there is no need for it. + */ +static int guc_xfer_ucode(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uc_fw *guc_fw = &guc->fw; + unsigned long offset; + + /* + * The header plus uCode will be copied to WOPCM via DMA, excluding any + * other components + */ + I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); + + /* Set the source address for the new blob */ + offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset; + I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); + I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); + + /* + * Set the DMA destination. Current uCode expects the code to be + * loaded at 8k; locations below this are used for the stack. + */ + I915_WRITE(DMA_ADDR_1_LOW, 0x2000); + I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + + /* Finally start the DMA */ + I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); + + return guc_wait_ucode(guc); +} +/* + * Load the GuC firmware blob into the MinuteIA. + */ +static int guc_fw_xfer(struct intel_uc_fw *guc_fw) +{ + struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); + struct drm_i915_private *dev_priv = guc_to_i915(guc); + int ret; + + GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); + + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); + + guc_prepare_xfer(guc); + + /* + * Note that GuC needs the CSS header plus uKernel code to be copied + * by the DMA engine in one operation, whereas the RSA signature is + * loaded via MMIO. + */ + guc_xfer_rsa(guc); + + ret = guc_xfer_ucode(guc); + + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + + return ret; +} + +/** + * intel_guc_fw_upload() - load GuC uCode to device + * @guc: intel_guc structure + * + * Called from intel_uc_init_hw() during driver load, resume from sleep and + * after a GPU reset. + * + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. + * + * Return: non-zero code on error + */ +int intel_guc_fw_upload(struct intel_guc *guc) +{ + return intel_uc_fw_upload(&guc->fw, guc_fw_xfer); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h new file mode 100644 index 000000000000..4ec5d3d9e2b0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h @@ -0,0 +1,33 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_GUC_FW_H_ +#define _INTEL_GUC_FW_H_ + +struct intel_guc; + +void intel_guc_fw_init_early(struct intel_guc *guc); +int intel_guc_fw_upload(struct intel_guc *guc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h new file mode 100644 index 000000000000..30cca3a29323 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -0,0 +1,691 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ +#ifndef _INTEL_GUC_FWIF_H +#define _INTEL_GUC_FWIF_H + +#include +#include +#include + +#define GUC_CLIENT_PRIORITY_KMD_HIGH 0 +#define GUC_CLIENT_PRIORITY_HIGH 1 +#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2 +#define GUC_CLIENT_PRIORITY_NORMAL 3 +#define GUC_CLIENT_PRIORITY_NUM 4 + +#define GUC_MAX_STAGE_DESCRIPTORS 1024 +#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS + +#define GUC_RENDER_ENGINE 0 +#define GUC_VIDEO_ENGINE 1 +#define GUC_BLITTER_ENGINE 2 +#define GUC_VIDEOENHANCE_ENGINE 3 +#define GUC_VIDEO_ENGINE2 4 +#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) + +#define GUC_MAX_ENGINE_CLASSES 5 +#define GUC_MAX_INSTANCES_PER_CLASS 16 + +#define GUC_DOORBELL_INVALID 256 + +#define GUC_DB_SIZE (PAGE_SIZE) +#define GUC_WQ_SIZE (PAGE_SIZE * 2) + +/* Work queue item header definitions */ +#define WQ_STATUS_ACTIVE 1 +#define WQ_STATUS_SUSPENDED 2 +#define WQ_STATUS_CMD_ERROR 3 +#define WQ_STATUS_ENGINE_ID_NOT_USED 4 +#define WQ_STATUS_SUSPENDED_FROM_RESET 5 +#define WQ_TYPE_SHIFT 0 +#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) +#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) +#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) +#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) +#define WQ_TARGET_SHIFT 10 +#define WQ_LEN_SHIFT 16 +#define WQ_NO_WCFLUSH_WAIT (1 << 27) +#define WQ_PRESENT_WORKLOAD (1 << 28) + +#define WQ_RING_TAIL_SHIFT 20 +#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ +#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) + +#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) +#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) +#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) +#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3) +#define GUC_STAGE_DESC_ATTR_RESET BIT(4) +#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5) +#define GUC_STAGE_DESC_ATTR_PCH BIT(6) +#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) + +/* New GuC control data */ +#define GUC_CTL_CTXINFO 0 +#define GUC_CTL_CTXNUM_IN16_SHIFT 0 +#define GUC_CTL_BASE_ADDR_SHIFT 12 + +#define GUC_CTL_LOG_PARAMS 1 +#define GUC_LOG_VALID (1 << 0) +#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) +#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) +#define GUC_LOG_CRASH_SHIFT 4 +#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT) +#define GUC_LOG_DPC_SHIFT 6 +#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT) +#define GUC_LOG_ISR_SHIFT 9 +#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) +#define GUC_LOG_BUF_ADDR_SHIFT 12 + +#define GUC_CTL_WA 2 +#define GUC_CTL_FEATURE 3 +#define GUC_CTL_DISABLE_SCHEDULER (1 << 14) + +#define GUC_CTL_DEBUG 4 +#define GUC_LOG_VERBOSITY_SHIFT 0 +#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) +#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) +#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT) +#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT) +/* Verbosity range-check limits, without the shift */ +#define GUC_LOG_VERBOSITY_MIN 0 +#define GUC_LOG_VERBOSITY_MAX 3 +#define GUC_LOG_VERBOSITY_MASK 0x0000000f +#define GUC_LOG_DESTINATION_MASK (3 << 4) +#define GUC_LOG_DISABLED (1 << 6) +#define GUC_PROFILE_ENABLED (1 << 7) + +#define GUC_CTL_ADS 5 +#define GUC_ADS_ADDR_SHIFT 1 +#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) + +#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ + +/** + * DOC: GuC Firmware Layout + * + * The GuC firmware layout looks like this: + * + * +-------------------------------+ + * | uc_css_header | + * | | + * | contains major/minor version | + * +-------------------------------+ + * | uCode | + * +-------------------------------+ + * | RSA signature | + * +-------------------------------+ + * | modulus key | + * +-------------------------------+ + * | exponent val | + * +-------------------------------+ + * + * The firmware may or may not have modulus key and exponent data. The header, + * uCode and RSA signature are must-have components that will be used by driver. + * Length of each components, which is all in dwords, can be found in header. + * In the case that modulus and exponent are not present in fw, a.k.a truncated + * image, the length value still appears in header. + * + * Driver will do some basic fw size validation based on the following rules: + * + * 1. Header, uCode and RSA are must-have components. + * 2. All firmware components, if they present, are in the sequence illustrated + * in the layout table above. + * 3. Length info of each component can be found in header, in dwords. + * 4. Modulus and exponent key are not required by driver. They may not appear + * in fw. So driver will load a truncated firmware in this case. + * + * HuC firmware layout is same as GuC firmware. + * Only HuC version information is saved in a different way. + */ + +struct uc_css_header { + u32 module_type; + /* header_size includes all non-uCode bits, including css_header, rsa + * key, modulus key and exponent data. */ + u32 header_size_dw; + u32 header_version; + u32 module_id; + u32 module_vendor; + u32 date; +#define CSS_DATE_DAY (0xFF << 0) +#define CSS_DATE_MONTH (0xFF << 8) +#define CSS_DATE_YEAR (0xFFFF << 16) + u32 size_dw; /* uCode plus header_size_dw */ + u32 key_size_dw; + u32 modulus_size_dw; + u32 exponent_size_dw; + u32 time; +#define CSS_TIME_HOUR (0xFF << 0) +#define CSS_DATE_MIN (0xFF << 8) +#define CSS_DATE_SEC (0xFFFF << 16) + char username[8]; + char buildnumber[12]; + u32 sw_version; +#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) +#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) +#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) + u32 reserved[14]; + u32 header_info; +} __packed; + +/* Work item for submitting workloads into work queue of GuC. */ +struct guc_wq_item { + u32 header; + u32 context_desc; + u32 submit_element_info; + u32 fence_id; +} __packed; + +struct guc_process_desc { + u32 stage_id; + u64 db_base_addr; + u32 head; + u32 tail; + u32 error_offset; + u64 wq_base_addr; + u32 wq_size_bytes; + u32 wq_status; + u32 engine_presence; + u32 priority; + u32 reserved[30]; +} __packed; + +/* engine id and context id is packed into guc_execlist_context.context_id*/ +#define GUC_ELC_CTXID_OFFSET 0 +#define GUC_ELC_ENGINE_OFFSET 29 + +/* The execlist context including software and HW information */ +struct guc_execlist_context { + u32 context_desc; + u32 context_id; + u32 ring_status; + u32 ring_lrca; + u32 ring_begin; + u32 ring_end; + u32 ring_next_free_location; + u32 ring_current_tail_pointer_value; + u8 engine_state_submit_value; + u8 engine_state_wait_value; + u16 pagefault_count; + u16 engine_submit_queue_count; +} __packed; + +/* + * This structure describes a stage set arranged for a particular communication + * between uKernel (GuC) and Driver (KMD). Technically, this is known as a + * "GuC Context descriptor" in the specs, but we use the term "stage descriptor" + * to avoid confusion with all the other things already named "context" in the + * driver. A static pool of these descriptors are stored inside a GEM object + * (stage_desc_pool) which is held for the entire lifetime of our interaction + * with the GuC, being allocated before the GuC is loaded with its firmware. + */ +struct guc_stage_desc { + u32 sched_common_area; + u32 stage_id; + u32 pas_id; + u8 engines_used; + u64 db_trigger_cpu; + u32 db_trigger_uk; + u64 db_trigger_phy; + u16 db_id; + + struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM]; + + u8 attribute; + + u32 priority; + + u32 wq_sampled_tail_offset; + u32 wq_total_submit_enqueues; + + u32 process_desc; + u32 wq_addr; + u32 wq_size; + + u32 engine_presence; + + u8 engine_suspended; + + u8 reserved0[3]; + u64 reserved1[1]; + + u64 desc_private; +} __packed; + +/** + * DOC: CTB based communication + * + * The CTB (command transport buffer) communication between Host and GuC + * is based on u32 data stream written to the shared buffer. One buffer can + * be used to transmit data only in one direction (one-directional channel). + * + * Current status of the each buffer is stored in the buffer descriptor. + * Buffer descriptor holds tail and head fields that represents active data + * stream. The tail field is updated by the data producer (sender), and head + * field is updated by the data consumer (receiver):: + * + * +------------+ + * | DESCRIPTOR | +=================+============+========+ + * +============+ | | MESSAGE(s) | | + * | address |--------->+=================+============+========+ + * +------------+ + * | head | ^-----head--------^ + * +------------+ + * | tail | ^---------tail-----------------^ + * +------------+ + * | size | ^---------------size--------------------^ + * +------------+ + * + * Each message in data stream starts with the single u32 treated as a header, + * followed by optional set of u32 data that makes message specific payload:: + * + * +------------+---------+---------+---------+ + * | MESSAGE | + * +------------+---------+---------+---------+ + * | msg[0] | [1] | ... | [n-1] | + * +------------+---------+---------+---------+ + * | MESSAGE | MESSAGE PAYLOAD | + * + HEADER +---------+---------+---------+ + * | | 0 | ... | n | + * +======+=====+=========+=========+=========+ + * | 31:16| code| | | | + * +------+-----+ | | | + * | 15:5|flags| | | | + * +------+-----+ | | | + * | 4:0| len| | | | + * +------+-----+---------+---------+---------+ + * + * ^-------------len-------------^ + * + * The message header consists of: + * + * - **len**, indicates length of the message payload (in u32) + * - **code**, indicates message code + * - **flags**, holds various bits to control message handling + */ + +/* + * Describes single command transport buffer. + * Used by both guc-master and clients. + */ +struct guc_ct_buffer_desc { + u32 addr; /* gfx address */ + u64 host_private; /* host private data */ + u32 size; /* size in bytes */ + u32 head; /* offset updated by GuC*/ + u32 tail; /* offset updated by owner */ + u32 is_in_error; /* error indicator */ + u32 fence; /* fence updated by GuC */ + u32 status; /* status updated by GuC */ + u32 owner; /* id of the channel owner */ + u32 owner_sub_id; /* owner-defined field for extra tracking */ + u32 reserved[5]; +} __packed; + +/* Type of command transport buffer */ +#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u +#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u + +/* + * Definition of the command transport message header (DW0) + * + * bit[4..0] message len (in dwords) + * bit[7..5] reserved + * bit[8] response (G2H only) + * bit[8] write fence to desc (H2G only) + * bit[9] write status to H2G buff (H2G only) + * bit[10] send status back via G2H (H2G only) + * bit[15..11] reserved + * bit[31..16] action code + */ +#define GUC_CT_MSG_LEN_SHIFT 0 +#define GUC_CT_MSG_LEN_MASK 0x1F +#define GUC_CT_MSG_IS_RESPONSE (1 << 8) +#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) +#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) +#define GUC_CT_MSG_SEND_STATUS (1 << 10) +#define GUC_CT_MSG_ACTION_SHIFT 16 +#define GUC_CT_MSG_ACTION_MASK 0xFFFF + +#define GUC_FORCEWAKE_RENDER (1 << 0) +#define GUC_FORCEWAKE_MEDIA (1 << 1) + +#define GUC_POWER_UNSPECIFIED 0 +#define GUC_POWER_D0 1 +#define GUC_POWER_D1 2 +#define GUC_POWER_D2 3 +#define GUC_POWER_D3 4 + +/* Scheduling policy settings */ + +/* Reset engine upon preempt failure */ +#define POLICY_RESET_ENGINE (1<<0) +/* Preempt to idle on quantum expiry */ +#define POLICY_PREEMPT_TO_IDLE (1<<1) + +#define POLICY_MAX_NUM_WI 15 +#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 +#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 +#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000 +#define POLICY_DEFAULT_FAULT_TIME_US 250000 + +struct guc_policy { + /* Time for one workload to execute. (in micro seconds) */ + u32 execution_quantum; + /* Time to wait for a preemption request to completed before issuing a + * reset. (in micro seconds). */ + u32 preemption_time; + /* How much time to allow to run after the first fault is observed. + * Then preempt afterwards. (in micro seconds) */ + u32 fault_time; + u32 policy_flags; + u32 reserved[8]; +} __packed; + +struct guc_policies { + struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES]; + u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES]; + /* In micro seconds. How much time to allow before DPC processing is + * called back via interrupt (to prevent DPC queue drain starving). + * Typically 1000s of micro seconds (example only, not granularity). */ + u32 dpc_promote_time; + + /* Must be set to take these new values. */ + u32 is_valid; + + /* Max number of WIs to process per call. A large value may keep CS + * idle. */ + u32 max_num_work_items; + + u32 reserved[4]; +} __packed; + +/* GuC MMIO reg state struct */ + + +#define GUC_REGSET_MAX_REGISTERS 64 +#define GUC_S3_SAVE_SPACE_PAGES 10 + +struct guc_mmio_reg { + u32 offset; + u32 value; + u32 flags; +#define GUC_REGSET_MASKED (1 << 0) +} __packed; + +struct guc_mmio_regset { + struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS]; + u32 values_valid; + u32 number_of_registers; +} __packed; + +/* GuC register sets */ +struct guc_mmio_reg_state { + struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; + u32 reserved[98]; +} __packed; + +/* HW info */ +struct guc_gt_system_info { + u32 slice_enabled; + u32 rcs_enabled; + u32 reserved0; + u32 bcs_enabled; + u32 vdbox_enable_mask; + u32 vdbox_sfc_support_mask; + u32 vebox_enable_mask; + u32 reserved[9]; +} __packed; + +/* Clients info */ +struct guc_ct_pool_entry { + struct guc_ct_buffer_desc desc; + u32 reserved[7]; +} __packed; + +#define GUC_CT_POOL_SIZE 2 + +struct guc_clients_info { + u32 clients_num; + u32 reserved0[13]; + u32 ct_pool_addr; + u32 ct_pool_count; + u32 reserved[4]; +} __packed; + +/* GuC Additional Data Struct */ +struct guc_ads { + u32 reg_state_addr; + u32 reg_state_buffer; + u32 scheduler_policies; + u32 gt_system_info; + u32 clients_info; + u32 control_data; + u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES]; + u32 eng_state_size[GUC_MAX_ENGINE_CLASSES]; + u32 reserved[16]; +} __packed; + +/* GuC logging structures */ + +enum guc_log_buffer_type { + GUC_ISR_LOG_BUFFER, + GUC_DPC_LOG_BUFFER, + GUC_CRASH_DUMP_LOG_BUFFER, + GUC_MAX_LOG_BUFFER +}; + +/** + * struct guc_log_buffer_state - GuC log buffer state + * + * Below state structure is used for coordination of retrieval of GuC firmware + * logs. Separate state is maintained for each log buffer type. + * read_ptr points to the location where i915 read last in log buffer and + * is read only for GuC firmware. write_ptr is incremented by GuC with number + * of bytes written for each log entry and is read only for i915. + * When any type of log buffer becomes half full, GuC sends a flush interrupt. + * GuC firmware expects that while it is writing to 2nd half of the buffer, + * first half would get consumed by Host and then get a flush completed + * acknowledgment from Host, so that it does not end up doing any overwrite + * causing loss of logs. So when buffer gets half filled & i915 has requested + * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr + * to the value of write_ptr and raise the interrupt. + * On receiving the interrupt i915 should read the buffer, clear flush_to_file + * field and also update read_ptr with the value of sample_write_ptr, before + * sending an acknowledgment to GuC. marker & version fields are for internal + * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every + * time GuC detects the log buffer overflow. + */ +struct guc_log_buffer_state { + u32 marker[2]; + u32 read_ptr; + u32 write_ptr; + u32 size; + u32 sampled_write_ptr; + union { + struct { + u32 flush_to_file:1; + u32 buffer_full_cnt:4; + u32 reserved:27; + }; + u32 flags; + }; + u32 version; +} __packed; + +struct guc_ctx_report { + u32 report_return_status; + u32 reserved1[64]; + u32 affected_count; + u32 reserved2[2]; +} __packed; + +/* GuC Shared Context Data Struct */ +struct guc_shared_ctx_data { + u32 addr_of_last_preempted_data_low; + u32 addr_of_last_preempted_data_high; + u32 addr_of_last_preempted_data_high_tmp; + u32 padding; + u32 is_mapped_to_proxy; + u32 proxy_ctx_id; + u32 engine_reset_ctx_id; + u32 media_reset_count; + u32 reserved1[8]; + u32 uk_last_ctx_switch_reason; + u32 was_reset; + u32 lrca_gpu_addr; + u64 execlist_ctx; + u32 reserved2[66]; + struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; +} __packed; + +/** + * DOC: MMIO based communication + * + * The MMIO based communication between Host and GuC uses software scratch + * registers, where first register holds data treated as message header, + * and other registers are used to hold message payload. + * + * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, + * but no H2G command takes more than 8 parameters and the GuC FW + * itself uses an 8-element array to store the H2G message. + * + * +-----------+---------+---------+---------+ + * | MMIO[0] | MMIO[1] | ... | MMIO[n] | + * +-----------+---------+---------+---------+ + * | header | optional payload | + * +======+====+=========+=========+=========+ + * | 31:28|type| | | | + * +------+----+ | | | + * | 27:16|data| | | | + * +------+----+ | | | + * | 15:0|code| | | | + * +------+----+---------+---------+---------+ + * + * The message header consists of: + * + * - **type**, indicates message type + * - **code**, indicates message code, is specific for **type** + * - **data**, indicates message data, optional, depends on **code** + * + * The following message **types** are supported: + * + * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code + * must be priovided in **code** field. Optional action specific parameters + * can be provided in remaining payload registers or **data** field. + * + * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, + * action response status will be provided in **code** field. Optional + * response data can be returned in remaining payload registers or **data** + * field. + */ + +#define GUC_MAX_MMIO_MSG_LEN 8 + +#define INTEL_GUC_MSG_TYPE_SHIFT 28 +#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) +#define INTEL_GUC_MSG_DATA_SHIFT 16 +#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) +#define INTEL_GUC_MSG_CODE_SHIFT 0 +#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) + +#define __INTEL_GUC_MSG_GET(T, m) \ + (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT) +#define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m) +#define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m) +#define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m) + +enum intel_guc_msg_type { + INTEL_GUC_MSG_TYPE_REQUEST = 0x0, + INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, +}; + +#define __INTEL_GUC_MSG_TYPE_IS(T, m) \ + (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T) +#define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m) +#define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m) + +enum intel_guc_action { + INTEL_GUC_ACTION_DEFAULT = 0x0, + INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, + INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, + INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, + INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, + INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, + INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, + INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005, + INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, + INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, + INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, + INTEL_GUC_ACTION_LIMIT +}; + +enum intel_guc_preempt_options { + INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, + INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, +}; + +enum intel_guc_report_status { + INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, + INTEL_GUC_REPORT_STATUS_ACKED = 0x1, + INTEL_GUC_REPORT_STATUS_ERROR = 0x2, + INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, +}; + +enum intel_guc_sleep_state_status { + INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, + INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, + INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 +#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 +}; + +#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) +#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 +#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) +#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) + +enum intel_guc_response_status { + INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, + INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, +}; + +#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \ + (typecheck(u32, (m)) && \ + ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \ + ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \ + (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT))) + +/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ +enum intel_guc_recv_message { + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3) +}; + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c new file mode 100644 index 000000000000..9be5d3a6fb5f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -0,0 +1,632 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include + +#include "intel_guc_log.h" +#include "i915_drv.h" + +static void guc_log_capture_logs(struct intel_guc_log *log); + +/** + * DOC: GuC firmware log + * + * Firmware log is enabled by setting i915.guc_log_level to the positive level. + * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from + * i915_guc_load_status will print out firmware loading status and scratch + * registers value. + */ + +static int guc_action_flush_log_complete(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static int guc_action_flush_log(struct intel_guc *guc) +{ + u32 action[] = { + INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, + 0 + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static int guc_action_control_log(struct intel_guc *guc, bool enable, + bool default_logging, u32 verbosity) +{ + u32 action[] = { + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, + (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | + (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | + (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) + }; + + GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) +{ + return container_of(log, struct intel_guc, log); +} + +static void guc_log_enable_flush_events(struct intel_guc_log *log) +{ + intel_guc_enable_msg(log_to_guc(log), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); +} + +static void guc_log_disable_flush_events(struct intel_guc_log *log) +{ + intel_guc_disable_msg(log_to_guc(log), + INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | + INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); +} + +/* + * Sub buffer switch callback. Called whenever relay has to switch to a new + * sub buffer, relay stays on the same sub buffer if 0 is returned. + */ +static int subbuf_start_callback(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + size_t prev_padding) +{ + /* + * Use no-overwrite mode by default, where relay will stop accepting + * new data if there are no empty sub buffers left. + * There is no strict synchronization enforced by relay between Consumer + * and Producer. In overwrite mode, there is a possibility of getting + * inconsistent/garbled data, the producer could be writing on to the + * same sub buffer from which Consumer is reading. This can't be avoided + * unless Consumer is fast enough and can always run in tandem with + * Producer. + */ + if (relay_buf_full(buf)) + return 0; + + return 1; +} + +/* + * file_create() callback. Creates relay file in debugfs. + */ +static struct dentry *create_buf_file_callback(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + struct dentry *buf_file; + + /* + * This to enable the use of a single buffer for the relay channel and + * correspondingly have a single file exposed to User, through which + * it can collect the logs in order without any post-processing. + * Need to set 'is_global' even if parent is NULL for early logging. + */ + *is_global = 1; + + if (!parent) + return NULL; + + buf_file = debugfs_create_file(filename, mode, + parent, buf, &relay_file_operations); + if (IS_ERR(buf_file)) + return NULL; + + return buf_file; +} + +/* + * file_remove() default callback. Removes relay file in debugfs. + */ +static int remove_buf_file_callback(struct dentry *dentry) +{ + debugfs_remove(dentry); + return 0; +} + +/* relay channel callbacks */ +static struct rchan_callbacks relay_callbacks = { + .subbuf_start = subbuf_start_callback, + .create_buf_file = create_buf_file_callback, + .remove_buf_file = remove_buf_file_callback, +}; + +static void guc_move_to_next_buf(struct intel_guc_log *log) +{ + /* + * Make sure the updates made in the sub buffer are visible when + * Consumer sees the following update to offset inside the sub buffer. + */ + smp_wmb(); + + /* All data has been written, so now move the offset of sub buffer. */ + relay_reserve(log->relay.channel, log->vma->obj->base.size); + + /* Switch to the next sub buffer */ + relay_flush(log->relay.channel); +} + +static void *guc_get_write_buffer(struct intel_guc_log *log) +{ + /* + * Just get the base address of a new sub buffer and copy data into it + * ourselves. NULL will be returned in no-overwrite mode, if all sub + * buffers are full. Could have used the relay_write() to indirectly + * copy the data, but that would have been bit convoluted, as we need to + * write to only certain locations inside a sub buffer which cannot be + * done without using relay_reserve() along with relay_write(). So its + * better to use relay_reserve() alone. + */ + return relay_reserve(log->relay.channel, 0); +} + +static bool guc_check_log_buf_overflow(struct intel_guc_log *log, + enum guc_log_buffer_type type, + unsigned int full_cnt) +{ + unsigned int prev_full_cnt = log->stats[type].sampled_overflow; + bool overflow = false; + + if (full_cnt != prev_full_cnt) { + overflow = true; + + log->stats[type].overflow = full_cnt; + log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; + + if (full_cnt < prev_full_cnt) { + /* buffer_full_cnt is a 4 bit counter */ + log->stats[type].sampled_overflow += 16; + } + + dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev, + "GuC log buffer overflow\n"); + } + + return overflow; +} + +static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) +{ + switch (type) { + case GUC_ISR_LOG_BUFFER: + return ISR_BUFFER_SIZE; + case GUC_DPC_LOG_BUFFER: + return DPC_BUFFER_SIZE; + case GUC_CRASH_DUMP_LOG_BUFFER: + return CRASH_BUFFER_SIZE; + default: + MISSING_CASE(type); + } + + return 0; +} + +static void guc_read_update_log_buffer(struct intel_guc_log *log) +{ + unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; + struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; + struct guc_log_buffer_state log_buf_state_local; + enum guc_log_buffer_type type; + void *src_data, *dst_data; + bool new_overflow; + + mutex_lock(&log->relay.lock); + + if (WARN_ON(!intel_guc_log_relay_enabled(log))) + goto out_unlock; + + /* Get the pointer to shared GuC log buffer */ + log_buf_state = src_data = log->relay.buf_addr; + + /* Get the pointer to local buffer to store the logs */ + log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); + + if (unlikely(!log_buf_snapshot_state)) { + /* + * Used rate limited to avoid deluge of messages, logs might be + * getting consumed by User at a slow rate. + */ + DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); + log->relay.full_count++; + + goto out_unlock; + } + + /* Actual logs are present from the 2nd page */ + src_data += PAGE_SIZE; + dst_data += PAGE_SIZE; + + for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { + /* + * Make a copy of the state structure, inside GuC log buffer + * (which is uncached mapped), on the stack to avoid reading + * from it multiple times. + */ + memcpy(&log_buf_state_local, log_buf_state, + sizeof(struct guc_log_buffer_state)); + buffer_size = guc_get_log_buffer_size(type); + read_offset = log_buf_state_local.read_ptr; + write_offset = log_buf_state_local.sampled_write_ptr; + full_cnt = log_buf_state_local.buffer_full_cnt; + + /* Bookkeeping stuff */ + log->stats[type].flush += log_buf_state_local.flush_to_file; + new_overflow = guc_check_log_buf_overflow(log, type, full_cnt); + + /* Update the state of shared log buffer */ + log_buf_state->read_ptr = write_offset; + log_buf_state->flush_to_file = 0; + log_buf_state++; + + /* First copy the state structure in snapshot buffer */ + memcpy(log_buf_snapshot_state, &log_buf_state_local, + sizeof(struct guc_log_buffer_state)); + + /* + * The write pointer could have been updated by GuC firmware, + * after sending the flush interrupt to Host, for consistency + * set write pointer value to same value of sampled_write_ptr + * in the snapshot buffer. + */ + log_buf_snapshot_state->write_ptr = write_offset; + log_buf_snapshot_state++; + + /* Now copy the actual logs. */ + if (unlikely(new_overflow)) { + /* copy the whole buffer in case of overflow */ + read_offset = 0; + write_offset = buffer_size; + } else if (unlikely((read_offset > buffer_size) || + (write_offset > buffer_size))) { + DRM_ERROR("invalid log buffer state\n"); + /* copy whole buffer as offsets are unreliable */ + read_offset = 0; + write_offset = buffer_size; + } + + /* Just copy the newly written data */ + if (read_offset > write_offset) { + i915_memcpy_from_wc(dst_data, src_data, write_offset); + bytes_to_copy = buffer_size - read_offset; + } else { + bytes_to_copy = write_offset - read_offset; + } + i915_memcpy_from_wc(dst_data + read_offset, + src_data + read_offset, bytes_to_copy); + + src_data += buffer_size; + dst_data += buffer_size; + } + + guc_move_to_next_buf(log); + +out_unlock: + mutex_unlock(&log->relay.lock); +} + +static void capture_logs_work(struct work_struct *work) +{ + struct intel_guc_log *log = + container_of(work, struct intel_guc_log, relay.flush_work); + + guc_log_capture_logs(log); +} + +static int guc_log_map(struct intel_guc_log *log) +{ + void *vaddr; + + lockdep_assert_held(&log->relay.lock); + + if (!log->vma) + return -ENODEV; + + /* + * Create a WC (Uncached for read) vmalloc mapping of log + * buffer pages, so that we can directly get the data + * (up-to-date) from memory. + */ + vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + log->relay.buf_addr = vaddr; + + return 0; +} + +static void guc_log_unmap(struct intel_guc_log *log) +{ + lockdep_assert_held(&log->relay.lock); + + i915_gem_object_unpin_map(log->vma->obj); + log->relay.buf_addr = NULL; +} + +void intel_guc_log_init_early(struct intel_guc_log *log) +{ + mutex_init(&log->relay.lock); + INIT_WORK(&log->relay.flush_work, capture_logs_work); +} + +static int guc_log_relay_create(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct rchan *guc_log_relay_chan; + size_t n_subbufs, subbuf_size; + int ret; + + lockdep_assert_held(&log->relay.lock); + + /* Keep the size of sub buffers same as shared log buffer */ + subbuf_size = log->vma->size; + + /* + * Store up to 8 snapshots, which is large enough to buffer sufficient + * boot time logs and provides enough leeway to User, in terms of + * latency, for consuming the logs from relay. Also doesn't take + * up too much memory. + */ + n_subbufs = 8; + + guc_log_relay_chan = relay_open("guc_log", + dev_priv->drm.primary->debugfs_root, + subbuf_size, n_subbufs, + &relay_callbacks, dev_priv); + if (!guc_log_relay_chan) { + DRM_ERROR("Couldn't create relay chan for GuC logging\n"); + + ret = -ENOMEM; + return ret; + } + + GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); + log->relay.channel = guc_log_relay_chan; + + return 0; +} + +static void guc_log_relay_destroy(struct intel_guc_log *log) +{ + lockdep_assert_held(&log->relay.lock); + + relay_close(log->relay.channel); + log->relay.channel = NULL; +} + +static void guc_log_capture_logs(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *dev_priv = guc_to_i915(guc); + intel_wakeref_t wakeref; + + guc_read_update_log_buffer(log); + + /* + * Generally device is expected to be active only at this + * time, so get/put should be really quick. + */ + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) + guc_action_flush_log_complete(guc); +} + +int intel_guc_log_create(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct i915_vma *vma; + u32 guc_log_size; + int ret; + + GEM_BUG_ON(log->vma); + + /* + * GuC Log buffer Layout + * + * +===============================+ 00B + * | Crash dump state header | + * +-------------------------------+ 32B + * | DPC state header | + * +-------------------------------+ 64B + * | ISR state header | + * +-------------------------------+ 96B + * | | + * +===============================+ PAGE_SIZE (4KB) + * | Crash Dump logs | + * +===============================+ + CRASH_SIZE + * | DPC logs | + * +===============================+ + DPC_SIZE + * | ISR logs | + * +===============================+ + ISR_SIZE + */ + guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE + + ISR_BUFFER_SIZE; + + vma = intel_guc_allocate_vma(guc, guc_log_size); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + log->vma = vma; + + log->level = i915_modparams.guc_log_level; + + return 0; + +err: + DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret); + return ret; +} + +void intel_guc_log_destroy(struct intel_guc_log *log) +{ + i915_vma_unpin_and_release(&log->vma, 0); +} + +int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *dev_priv = guc_to_i915(guc); + intel_wakeref_t wakeref; + int ret = 0; + + BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); + GEM_BUG_ON(!log->vma); + + /* + * GuC is recognizing log levels starting from 0 to max, we're using 0 + * as indication that logging should be disabled. + */ + if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) + return -EINVAL; + + mutex_lock(&dev_priv->drm.struct_mutex); + + if (log->level == level) + goto out_unlock; + + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) + ret = guc_action_control_log(guc, + GUC_LOG_LEVEL_IS_VERBOSE(level), + GUC_LOG_LEVEL_IS_ENABLED(level), + GUC_LOG_LEVEL_TO_VERBOSITY(level)); + if (ret) { + DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); + goto out_unlock; + } + + log->level = level; + +out_unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); + + return ret; +} + +bool intel_guc_log_relay_enabled(const struct intel_guc_log *log) +{ + return log->relay.buf_addr; +} + +int intel_guc_log_relay_open(struct intel_guc_log *log) +{ + int ret; + + mutex_lock(&log->relay.lock); + + if (intel_guc_log_relay_enabled(log)) { + ret = -EEXIST; + goto out_unlock; + } + + /* + * We require SSE 4.1 for fast reads from the GuC log buffer and + * it should be present on the chipsets supporting GuC based + * submisssions. + */ + if (!i915_has_memcpy_from_wc()) { + ret = -ENXIO; + goto out_unlock; + } + + ret = guc_log_relay_create(log); + if (ret) + goto out_unlock; + + ret = guc_log_map(log); + if (ret) + goto out_relay; + + mutex_unlock(&log->relay.lock); + + guc_log_enable_flush_events(log); + + /* + * When GuC is logging without us relaying to userspace, we're ignoring + * the flush notification. This means that we need to unconditionally + * flush on relay enabling, since GuC only notifies us once. + */ + queue_work(system_highpri_wq, &log->relay.flush_work); + + return 0; + +out_relay: + guc_log_relay_destroy(log); +out_unlock: + mutex_unlock(&log->relay.lock); + + return ret; +} + +void intel_guc_log_relay_flush(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *i915 = guc_to_i915(guc); + intel_wakeref_t wakeref; + + /* + * Before initiating the forceful flush, wait for any pending/ongoing + * flush to complete otherwise forceful flush may not actually happen. + */ + flush_work(&log->relay.flush_work); + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + guc_action_flush_log(guc); + + /* GuC would have updated log buffer by now, so capture it */ + guc_log_capture_logs(log); +} + +void intel_guc_log_relay_close(struct intel_guc_log *log) +{ + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *i915 = guc_to_i915(guc); + + guc_log_disable_flush_events(log); + intel_synchronize_irq(i915); + + flush_work(&log->relay.flush_work); + + mutex_lock(&log->relay.lock); + GEM_BUG_ON(!intel_guc_log_relay_enabled(log)); + guc_log_unmap(log); + guc_log_relay_destroy(log); + mutex_unlock(&log->relay.lock); +} + +void intel_guc_log_handle_flush_event(struct intel_guc_log *log) +{ + queue_work(system_highpri_wq, &log->relay.flush_work); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h new file mode 100644 index 000000000000..1969572f1f79 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -0,0 +1,99 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_GUC_LOG_H_ +#define _INTEL_GUC_LOG_H_ + +#include +#include +#include + +#include "intel_guc_fwif.h" +#include "i915_gem.h" + +struct intel_guc; + +#ifdef CONFIG_DRM_I915_DEBUG_GUC +#define CRASH_BUFFER_SIZE SZ_2M +#define DPC_BUFFER_SIZE SZ_8M +#define ISR_BUFFER_SIZE SZ_8M +#else +#define CRASH_BUFFER_SIZE SZ_8K +#define DPC_BUFFER_SIZE SZ_32K +#define ISR_BUFFER_SIZE SZ_32K +#endif + +/* + * While we're using plain log level in i915, GuC controls are much more... + * "elaborate"? We have a couple of bits for verbosity, separate bit for actual + * log enabling, and separate bit for default logging - which "conveniently" + * ignores the enable bit. + */ +#define GUC_LOG_LEVEL_DISABLED 0 +#define GUC_LOG_LEVEL_NON_VERBOSE 1 +#define GUC_LOG_LEVEL_IS_ENABLED(x) ((x) > GUC_LOG_LEVEL_DISABLED) +#define GUC_LOG_LEVEL_IS_VERBOSE(x) ((x) > GUC_LOG_LEVEL_NON_VERBOSE) +#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({ \ + typeof(x) _x = (x); \ + GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0; \ +}) +#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2) +#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX) + +struct intel_guc_log { + u32 level; + struct i915_vma *vma; + struct { + void *buf_addr; + struct work_struct flush_work; + struct rchan *channel; + struct mutex lock; + u32 full_count; + } relay; + /* logging related stats */ + struct { + u32 sampled_overflow; + u32 overflow; + u32 flush; + } stats[GUC_MAX_LOG_BUFFER]; +}; + +void intel_guc_log_init_early(struct intel_guc_log *log); +int intel_guc_log_create(struct intel_guc_log *log); +void intel_guc_log_destroy(struct intel_guc_log *log); + +int intel_guc_log_set_level(struct intel_guc_log *log, u32 level); +bool intel_guc_log_relay_enabled(const struct intel_guc_log *log); +int intel_guc_log_relay_open(struct intel_guc_log *log); +void intel_guc_log_relay_flush(struct intel_guc_log *log); +void intel_guc_log_relay_close(struct intel_guc_log *log); + +void intel_guc_log_handle_flush_event(struct intel_guc_log *log); + +static inline u32 intel_guc_log_get_level(struct intel_guc_log *log) +{ + return log->level; +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h new file mode 100644 index 000000000000..e3cbb23299ce --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -0,0 +1,161 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ +#ifndef _INTEL_GUC_REG_H_ +#define _INTEL_GUC_REG_H_ + +#include +#include + +#include "i915_reg.h" + +/* Definitions of GuC H/W registers, bits, etc */ + +#define GUC_STATUS _MMIO(0xc000) +#define GS_RESET_SHIFT 0 +#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT) +#define GS_BOOTROM_SHIFT 1 +#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) +#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) +#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT) +#define GS_UKERNEL_SHIFT 8 +#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT) +#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) +#define GS_MIA_SHIFT 16 +#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) +#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT) +#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT) +#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT) +#define GS_AUTH_STATUS_SHIFT 30 +#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT) +#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT) +#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT) + +#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) +#define SOFT_SCRATCH_COUNT 16 + +#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4) +#define GEN11_SOFT_SCRATCH_COUNT 4 + +#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) +#define UOS_RSA_SCRATCH_COUNT 64 + +#define DMA_ADDR_0_LOW _MMIO(0xc300) +#define DMA_ADDR_0_HIGH _MMIO(0xc304) +#define DMA_ADDR_1_LOW _MMIO(0xc308) +#define DMA_ADDR_1_HIGH _MMIO(0xc30c) +#define DMA_ADDRESS_SPACE_WOPCM (7 << 16) +#define DMA_ADDRESS_SPACE_GTT (8 << 16) +#define DMA_COPY_SIZE _MMIO(0xc310) +#define DMA_CTRL _MMIO(0xc314) +#define HUC_UKERNEL (1<<9) +#define UOS_MOVE (1<<4) +#define START_DMA (1<<0) +#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340) +#define GUC_WOPCM_OFFSET_VALID (1<<0) +#define HUC_LOADING_AGENT_VCR (0<<1) +#define HUC_LOADING_AGENT_GUC (1<<1) +#define GUC_WOPCM_OFFSET_SHIFT 14 +#define GUC_WOPCM_OFFSET_MASK (0x3ffff << GUC_WOPCM_OFFSET_SHIFT) +#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) + +#define HUC_STATUS2 _MMIO(0xD3B0) +#define HUC_FW_VERIFIED (1<<7) + +#define GEN11_HUC_KERNEL_LOAD_INFO _MMIO(0xC1DC) +#define HUC_LOAD_SUCCESSFUL (1 << 0) + +#define GUC_WOPCM_SIZE _MMIO(0xc050) +#define GUC_WOPCM_SIZE_LOCKED (1<<0) +#define GUC_WOPCM_SIZE_SHIFT 12 +#define GUC_WOPCM_SIZE_MASK (0xfffff << GUC_WOPCM_SIZE_SHIFT) + +#define GEN8_GT_PM_CONFIG _MMIO(0x138140) +#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) +#define GEN9_GT_PM_CONFIG _MMIO(0x13816c) +#define GT_DOORBELL_ENABLE (1<<0) + +#define GEN8_GTCR _MMIO(0x4274) +#define GEN8_GTCR_INVALIDATE (1<<0) + +#define GUC_ARAT_C6DIS _MMIO(0xA178) + +#define GUC_SHIM_CONTROL _MMIO(0xc064) +#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) +#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) +#define GUC_ENABLE_MIA_CACHING (1<<2) +#define GUC_GEN10_MSGCH_ENABLE (1<<4) +#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA (1<<9) +#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10) +#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15) +#define GUC_GEN10_SHIM_WC_ENABLE (1<<21) + +#define GUC_SEND_INTERRUPT _MMIO(0xc4c8) +#define GUC_SEND_TRIGGER (1<<0) +#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0) + +#define GUC_NUM_DOORBELLS 256 + +/* format of the HW-monitored doorbell cacheline */ +struct guc_doorbell_info { + u32 db_status; +#define GUC_DOORBELL_DISABLED 0 +#define GUC_DOORBELL_ENABLED 1 + + u32 cookie; + u32 reserved[14]; +} __packed; + +#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) +#define GEN8_DRB_VALID (1<<0) +#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) + +#define DE_GUCRMR _MMIO(0x44054) + +#define GUC_BCS_RCS_IER _MMIO(0xC550) +#define GUC_VCS2_VCS1_IER _MMIO(0xC554) +#define GUC_WD_VECS_IER _MMIO(0xC558) +#define GUC_PM_P24C_IER _MMIO(0xC55C) + +/* GuC Interrupt Vector */ +#define GUC_INTR_GUC2HOST BIT(15) +#define GUC_INTR_EXEC_ERROR BIT(14) +#define GUC_INTR_DISPLAY_EVENT BIT(13) +#define GUC_INTR_SEM_SIG BIT(12) +#define GUC_INTR_IOMMU2GUC BIT(11) +#define GUC_INTR_DOORBELL_RANG BIT(10) +#define GUC_INTR_DMA_DONE BIT(9) +#define GUC_INTR_FATAL_ERROR BIT(8) +#define GUC_INTR_NOTIF_ERROR BIT(7) +#define GUC_INTR_SW_INT_6 BIT(6) +#define GUC_INTR_SW_INT_5 BIT(5) +#define GUC_INTR_SW_INT_4 BIT(4) +#define GUC_INTR_SW_INT_3 BIT(3) +#define GUC_INTR_SW_INT_2 BIT(2) +#define GUC_INTR_SW_INT_1 BIT(1) +#define GUC_INTR_SW_INT_0 BIT(0) + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c new file mode 100644 index 000000000000..e1e4e683ce21 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -0,0 +1,1165 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include + +#include "gem/i915_gem_context.h" + +#include "gt/intel_context.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_lrc_reg.h" +#include "intel_guc_submission.h" + +#include "i915_drv.h" + +enum { + GUC_PREEMPT_NONE = 0, + GUC_PREEMPT_INPROGRESS, + GUC_PREEMPT_FINISHED, +}; +#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 +#define GUC_PREEMPT_BREADCRUMB_BYTES \ + (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) + +/** + * DOC: GuC-based command submission + * + * GuC client: + * A intel_guc_client refers to a submission path through GuC. Currently, there + * is only one client, which is charged with all submissions to the GuC. This + * struct is the owner of a doorbell, a process descriptor and a workqueue (all + * of them inside a single gem object that contains all required pages for these + * elements). + * + * GuC stage descriptor: + * During initialization, the driver allocates a static pool of 1024 such + * descriptors, and shares them with the GuC. + * Currently, there exists a 1:1 mapping between a intel_guc_client and a + * guc_stage_desc (via the client's stage_id), so effectively only one + * gets used. This stage descriptor lets the GuC know about the doorbell, + * workqueue and process descriptor. Theoretically, it also lets the GuC + * know about our HW contexts (context ID, etc...), but we actually + * employ a kind of submission where the GuC uses the LRCA sent via the work + * item instead (the single guc_stage_desc associated to execbuf client + * contains information about the default kernel context only, but this is + * essentially unused). This is called a "proxy" submission. + * + * The Scratch registers: + * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes + * a value to the action register (SOFT_SCRATCH_0) along with any data. It then + * triggers an interrupt on the GuC via another register write (0xC4C8). + * Firmware writes a success/fail code back to the action register after + * processes the request. The kernel driver polls waiting for this update and + * then proceeds. + * See intel_guc_send() + * + * Doorbells: + * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) + * mapped into process space. + * + * Work Items: + * There are several types of work items that the host may place into a + * workqueue, each with its own requirements and limitations. Currently only + * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which + * represents in-order queue. The kernel driver packs ring tail pointer and an + * ELSP context descriptor dword into Work Item. + * See guc_add_request() + * + */ + +static inline struct i915_priolist *to_priolist(struct rb_node *rb) +{ + return rb_entry(rb, struct i915_priolist, node); +} + +static inline bool is_high_priority(struct intel_guc_client *client) +{ + return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH || + client->priority == GUC_CLIENT_PRIORITY_HIGH); +} + +static int reserve_doorbell(struct intel_guc_client *client) +{ + unsigned long offset; + unsigned long end; + u16 id; + + GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID); + + /* + * The bitmap tracks which doorbell registers are currently in use. + * It is split into two halves; the first half is used for normal + * priority contexts, the second half for high-priority ones. + */ + offset = 0; + end = GUC_NUM_DOORBELLS / 2; + if (is_high_priority(client)) { + offset = end; + end += offset; + } + + id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset); + if (id == end) + return -ENOSPC; + + __set_bit(id, client->guc->doorbell_bitmap); + client->doorbell_id = id; + DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n", + client->stage_id, yesno(is_high_priority(client)), + id); + return 0; +} + +static bool has_doorbell(struct intel_guc_client *client) +{ + if (client->doorbell_id == GUC_DOORBELL_INVALID) + return false; + + return test_bit(client->doorbell_id, client->guc->doorbell_bitmap); +} + +static void unreserve_doorbell(struct intel_guc_client *client) +{ + GEM_BUG_ON(!has_doorbell(client)); + + __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap); + client->doorbell_id = GUC_DOORBELL_INVALID; +} + +/* + * Tell the GuC to allocate or deallocate a specific doorbell + */ + +static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id) +{ + u32 action[] = { + INTEL_GUC_ACTION_ALLOCATE_DOORBELL, + stage_id + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id) +{ + u32 action[] = { + INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, + stage_id + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client) +{ + struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr; + + return &base[client->stage_id]; +} + +/* + * Initialise, update, or clear doorbell data shared with the GuC + * + * These functions modify shared data and so need access to the mapped + * client object which contains the page being used for the doorbell + */ + +static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id) +{ + struct guc_stage_desc *desc; + + /* Update the GuC's idea of the doorbell ID */ + desc = __get_stage_desc(client); + desc->db_id = new_id; +} + +static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) +{ + return client->vaddr + client->doorbell_offset; +} + +static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); + return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; +} + +static void __init_doorbell(struct intel_guc_client *client) +{ + struct guc_doorbell_info *doorbell; + + doorbell = __get_doorbell(client); + doorbell->db_status = GUC_DOORBELL_ENABLED; + doorbell->cookie = 0; +} + +static void __fini_doorbell(struct intel_guc_client *client) +{ + struct guc_doorbell_info *doorbell; + u16 db_id = client->doorbell_id; + + doorbell = __get_doorbell(client); + doorbell->db_status = GUC_DOORBELL_DISABLED; + + /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit + * to go to zero after updating db_status before we call the GuC to + * release the doorbell + */ + if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10)) + WARN_ONCE(true, "Doorbell never became invalid after disable\n"); +} + +static int create_doorbell(struct intel_guc_client *client) +{ + int ret; + + if (WARN_ON(!has_doorbell(client))) + return -ENODEV; /* internal setup error, should never happen */ + + __update_doorbell_desc(client, client->doorbell_id); + __init_doorbell(client); + + ret = __guc_allocate_doorbell(client->guc, client->stage_id); + if (ret) { + __fini_doorbell(client); + __update_doorbell_desc(client, GUC_DOORBELL_INVALID); + DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", + client->stage_id, ret); + return ret; + } + + return 0; +} + +static int destroy_doorbell(struct intel_guc_client *client) +{ + int ret; + + GEM_BUG_ON(!has_doorbell(client)); + + __fini_doorbell(client); + ret = __guc_deallocate_doorbell(client->guc, client->stage_id); + if (ret) + DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", + client->stage_id, ret); + + __update_doorbell_desc(client, GUC_DOORBELL_INVALID); + + return ret; +} + +static unsigned long __select_cacheline(struct intel_guc *guc) +{ + unsigned long offset; + + /* Doorbell uses a single cache line within a page */ + offset = offset_in_page(guc->db_cacheline); + + /* Moving to next cache line to reduce contention */ + guc->db_cacheline += cache_line_size(); + + DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n", + offset, guc->db_cacheline, cache_line_size()); + return offset; +} + +static inline struct guc_process_desc * +__get_process_desc(struct intel_guc_client *client) +{ + return client->vaddr + client->proc_desc_offset; +} + +/* + * Initialise the process descriptor shared with the GuC firmware. + */ +static void guc_proc_desc_init(struct intel_guc_client *client) +{ + struct guc_process_desc *desc; + + desc = memset(__get_process_desc(client), 0, sizeof(*desc)); + + /* + * XXX: pDoorbell and WQVBaseAddress are pointers in process address + * space for ring3 clients (set them as in mmap_ioctl) or kernel + * space for kernel clients (map on demand instead? May make debug + * easier to have it mapped). + */ + desc->wq_base_addr = 0; + desc->db_base_addr = 0; + + desc->stage_id = client->stage_id; + desc->wq_size_bytes = GUC_WQ_SIZE; + desc->wq_status = WQ_STATUS_ACTIVE; + desc->priority = client->priority; +} + +static void guc_proc_desc_fini(struct intel_guc_client *client) +{ + struct guc_process_desc *desc; + + desc = __get_process_desc(client); + memset(desc, 0, sizeof(*desc)); +} + +static int guc_stage_desc_pool_create(struct intel_guc *guc) +{ + struct i915_vma *vma; + void *vaddr; + + vma = intel_guc_allocate_vma(guc, + PAGE_ALIGN(sizeof(struct guc_stage_desc) * + GUC_MAX_STAGE_DESCRIPTORS)); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + i915_vma_unpin_and_release(&vma, 0); + return PTR_ERR(vaddr); + } + + guc->stage_desc_pool = vma; + guc->stage_desc_pool_vaddr = vaddr; + ida_init(&guc->stage_ids); + + return 0; +} + +static void guc_stage_desc_pool_destroy(struct intel_guc *guc) +{ + ida_destroy(&guc->stage_ids); + i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); +} + +/* + * Initialise/clear the stage descriptor shared with the GuC firmware. + * + * This descriptor tells the GuC where (in GGTT space) to find the important + * data structures relating to this client (doorbell, process descriptor, + * write queue, etc). + */ +static void guc_stage_desc_init(struct intel_guc_client *client) +{ + struct intel_guc *guc = client->guc; + struct guc_stage_desc *desc; + u32 gfx_addr; + + desc = __get_stage_desc(client); + memset(desc, 0, sizeof(*desc)); + + desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | + GUC_STAGE_DESC_ATTR_KERNEL; + if (is_high_priority(client)) + desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT; + desc->stage_id = client->stage_id; + desc->priority = client->priority; + desc->db_id = client->doorbell_id; + + /* + * The doorbell, process descriptor, and workqueue are all parts + * of the client object, which the GuC will reference via the GGTT + */ + gfx_addr = intel_guc_ggtt_offset(guc, client->vma); + desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + + client->doorbell_offset; + desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client)); + desc->db_trigger_uk = gfx_addr + client->doorbell_offset; + desc->process_desc = gfx_addr + client->proc_desc_offset; + desc->wq_addr = gfx_addr + GUC_DB_SIZE; + desc->wq_size = GUC_WQ_SIZE; + + desc->desc_private = ptr_to_u64(client); +} + +static void guc_stage_desc_fini(struct intel_guc_client *client) +{ + struct guc_stage_desc *desc; + + desc = __get_stage_desc(client); + memset(desc, 0, sizeof(*desc)); +} + +/* Construct a Work Item and append it to the GuC's Work Queue */ +static void guc_wq_item_append(struct intel_guc_client *client, + u32 target_engine, u32 context_desc, + u32 ring_tail, u32 fence_id) +{ + /* wqi_len is in DWords, and does not include the one-word header */ + const size_t wqi_size = sizeof(struct guc_wq_item); + const u32 wqi_len = wqi_size / sizeof(u32) - 1; + struct guc_process_desc *desc = __get_process_desc(client); + struct guc_wq_item *wqi; + u32 wq_off; + + lockdep_assert_held(&client->wq_lock); + + /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we + * should not have the case where structure wqi is across page, neither + * wrapped to the beginning. This simplifies the implementation below. + * + * XXX: if not the case, we need save data to a temp wqi and copy it to + * workqueue buffer dw by dw. + */ + BUILD_BUG_ON(wqi_size != 16); + + /* We expect the WQ to be active if we're appending items to it */ + GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); + + /* Free space is guaranteed. */ + wq_off = READ_ONCE(desc->tail); + GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), + GUC_WQ_SIZE) < wqi_size); + GEM_BUG_ON(wq_off & (wqi_size - 1)); + + /* WQ starts from the page after doorbell / process_desc */ + wqi = client->vaddr + wq_off + GUC_DB_SIZE; + + if (I915_SELFTEST_ONLY(client->use_nop_wqi)) { + wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT); + } else { + /* Now fill in the 4-word work queue item */ + wqi->header = WQ_TYPE_INORDER | + (wqi_len << WQ_LEN_SHIFT) | + (target_engine << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; + wqi->context_desc = context_desc; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); + wqi->fence_id = fence_id; + } + + /* Make the update visible to GuC */ + WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); +} + +static void guc_ring_doorbell(struct intel_guc_client *client) +{ + struct guc_doorbell_info *db; + u32 cookie; + + lockdep_assert_held(&client->wq_lock); + + /* pointer of current doorbell cacheline */ + db = __get_doorbell(client); + + /* + * We're not expecting the doorbell cookie to change behind our back, + * we also need to treat 0 as a reserved value. + */ + cookie = READ_ONCE(db->cookie); + WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie); + + /* XXX: doorbell was lost and need to acquire it again */ + GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); +} + +static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) +{ + struct intel_guc_client *client = guc->execbuf_client; + struct intel_engine_cs *engine = rq->engine; + u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); + u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); + + guc_wq_item_append(client, engine->guc_id, ctx_desc, + ring_tail, rq->fence.seqno); + guc_ring_doorbell(client); + + client->submissions[engine->id] += 1; +} + +/* + * When we're doing submissions using regular execlists backend, writing to + * ELSP from CPU side is enough to make sure that writes to ringbuffer pages + * pinned in mappable aperture portion of GGTT are visible to command streamer. + * Writes done by GuC on our behalf are not guaranteeing such ordering, + * therefore, to ensure the flush, we're issuing a POSTING READ. + */ +static void flush_ggtt_writes(struct i915_vma *vma) +{ + struct drm_i915_private *i915 = vma->vm->i915; + + if (i915_vma_is_map_and_fenceable(vma)) + intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); +} + +static void guc_submit(struct intel_engine_cs *engine, + struct i915_request **out, + struct i915_request **end) +{ + struct intel_guc *guc = &engine->i915->guc; + struct intel_guc_client *client = guc->execbuf_client; + + spin_lock(&client->wq_lock); + + do { + struct i915_request *rq = *out++; + + flush_ggtt_writes(rq->ring->vma); + guc_add_request(guc, rq); + } while (out != end); + + spin_unlock(&client->wq_lock); +} + +static inline int rq_prio(const struct i915_request *rq) +{ + return rq->sched.attr.priority | __NO_PREEMPTION; +} + +static struct i915_request *schedule_in(struct i915_request *rq, int idx) +{ + trace_i915_request_in(rq, idx); + + if (!rq->hw_context->inflight) + rq->hw_context->inflight = rq->engine; + intel_context_inflight_inc(rq->hw_context); + + return i915_request_get(rq); +} + +static void schedule_out(struct i915_request *rq) +{ + trace_i915_request_out(rq); + + intel_context_inflight_dec(rq->hw_context); + if (!intel_context_inflight_count(rq->hw_context)) + rq->hw_context->inflight = NULL; + + i915_request_put(rq); +} + +static void __guc_dequeue(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request **first = execlists->inflight; + struct i915_request ** const last_port = first + execlists->port_mask; + struct i915_request *last = first[0]; + struct i915_request **port; + bool submit = false; + struct rb_node *rb; + + lockdep_assert_held(&engine->active.lock); + + if (last) { + if (*++first) + return; + + last = NULL; + } + + port = first; + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + struct i915_request *rq, *rn; + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + if (last && rq->hw_context != last->hw_context) { + if (port == last_port) + goto done; + + *port = schedule_in(last, + port - execlists->inflight); + port++; + } + + list_del_init(&rq->sched.link); + __i915_request_submit(rq); + submit = true; + last = rq; + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } +done: + execlists->queue_priority_hint = + rb ? to_priolist(rb)->priority : INT_MIN; + if (submit) { + *port = schedule_in(last, port - execlists->inflight); + *++port = NULL; + guc_submit(engine, first, port); + } + execlists->active = execlists->inflight; +} + +static void guc_submission_tasklet(unsigned long data) +{ + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request **port, *rq; + unsigned long flags; + + spin_lock_irqsave(&engine->active.lock, flags); + + for (port = execlists->inflight; (rq = *port); port++) { + if (!i915_request_completed(rq)) + break; + + schedule_out(rq); + } + if (port != execlists->inflight) { + int idx = port - execlists->inflight; + int rem = ARRAY_SIZE(execlists->inflight) - idx; + memmove(execlists->inflight, port, rem * sizeof(*port)); + } + + __guc_dequeue(engine); + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void guc_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + GEM_TRACE("%s\n", engine->name); + + /* + * Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its execlists->tasklet *just* as we are + * calling engine->init_hw() and also writing the ELSP. + * Turning off the execlists->tasklet until the reset is over + * prevents the race. + */ + __tasklet_disable_sync_once(&execlists->tasklet); +} + +static void guc_reset(struct intel_engine_cs *engine, bool stalled) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq; + unsigned long flags; + + spin_lock_irqsave(&engine->active.lock, flags); + + execlists_cancel_port_requests(execlists); + + /* Push back any incomplete requests for replay after the reset. */ + rq = execlists_unwind_incomplete_requests(execlists); + if (!rq) + goto out_unlock; + + if (!i915_request_started(rq)) + stalled = false; + + __i915_request_reset(rq, stalled); + intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); + +out_unlock: + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void guc_cancel_requests(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq, *rn; + struct rb_node *rb; + unsigned long flags; + + GEM_TRACE("%s\n", engine->name); + + /* + * Before we call engine->cancel_requests(), we should have exclusive + * access to the submission state. This is arranged for us by the + * caller disabling the interrupt generation, the tasklet and other + * threads that may then access the same state, giving us a free hand + * to reset state. However, we still need to let lockdep be aware that + * we know this state may be accessed in hardirq context, so we + * disable the irq around this manipulation and we want to keep + * the spinlock focused on its duties and not accidentally conflate + * coverage to the submission's irq state. (Similarly, although we + * shouldn't need to disable irq around the manipulation of the + * submission's irq state, we also wish to remind ourselves that + * it is irq state.) + */ + spin_lock_irqsave(&engine->active.lock, flags); + + /* Cancel the requests on the HW and clear the ELSP tracker. */ + execlists_cancel_port_requests(execlists); + + /* Mark all executing requests as skipped. */ + list_for_each_entry(rq, &engine->active.requests, sched.link) { + if (!i915_request_signaled(rq)) + dma_fence_set_error(&rq->fence, -EIO); + + i915_request_mark_complete(rq); + } + + /* Flush the queued requests to the timeline list (for retiring). */ + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + list_del_init(&rq->sched.link); + __i915_request_submit(rq); + dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + execlists->queue_priority_hint = INT_MIN; + execlists->queue = RB_ROOT_CACHED; + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void guc_reset_finish(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + if (__tasklet_enable(&execlists->tasklet)) + /* And kick in case we missed a new request submission. */ + tasklet_hi_schedule(&execlists->tasklet); + + GEM_TRACE("%s: depth->%d\n", engine->name, + atomic_read(&execlists->tasklet.count)); +} + +/* + * Everything below here is concerned with setup & teardown, and is + * therefore not part of the somewhat time-critical batch-submission + * path of guc_submit() above. + */ + +/* Check that a doorbell register is in the expected state */ +static bool doorbell_ok(struct intel_guc *guc, u16 db_id) +{ + bool valid; + + GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); + + valid = __doorbell_valid(guc, db_id); + + if (test_bit(db_id, guc->doorbell_bitmap) == valid) + return true; + + DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n", + db_id, yesno(valid)); + + return false; +} + +static bool guc_verify_doorbells(struct intel_guc *guc) +{ + bool doorbells_ok = true; + u16 db_id; + + for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) + if (!doorbell_ok(guc, db_id)) + doorbells_ok = false; + + return doorbells_ok; +} + +/** + * guc_client_alloc() - Allocate an intel_guc_client + * @guc: the intel_guc structure + * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW + * The kernel client to replace ExecList submission is created with + * NORMAL priority. Priority of a client for scheduler can be HIGH, + * while a preemption context can use CRITICAL. + * + * Return: An intel_guc_client object if success, else NULL. + */ +static struct intel_guc_client * +guc_client_alloc(struct intel_guc *guc, u32 priority) +{ + struct intel_guc_client *client; + struct i915_vma *vma; + void *vaddr; + int ret; + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + return ERR_PTR(-ENOMEM); + + client->guc = guc; + client->priority = priority; + client->doorbell_id = GUC_DOORBELL_INVALID; + spin_lock_init(&client->wq_lock); + + ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, + GFP_KERNEL); + if (ret < 0) + goto err_client; + + client->stage_id = ret; + + /* The first page is doorbell/proc_desc. Two followed pages are wq. */ + vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_id; + } + + /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ + client->vma = vma; + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_vma; + } + client->vaddr = vaddr; + + ret = reserve_doorbell(client); + if (ret) + goto err_vaddr; + + client->doorbell_offset = __select_cacheline(guc); + + /* + * Since the doorbell only requires a single cacheline, we can save + * space by putting the application process descriptor in the same + * page. Use the half of the page that doesn't include the doorbell. + */ + if (client->doorbell_offset >= (GUC_DB_SIZE / 2)) + client->proc_desc_offset = 0; + else + client->proc_desc_offset = (GUC_DB_SIZE / 2); + + DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n", + priority, client, client->stage_id); + DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", + client->doorbell_id, client->doorbell_offset); + + return client; + +err_vaddr: + i915_gem_object_unpin_map(client->vma->obj); +err_vma: + i915_vma_unpin_and_release(&client->vma, 0); +err_id: + ida_simple_remove(&guc->stage_ids, client->stage_id); +err_client: + kfree(client); + return ERR_PTR(ret); +} + +static void guc_client_free(struct intel_guc_client *client) +{ + unreserve_doorbell(client); + i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); + ida_simple_remove(&client->guc->stage_ids, client->stage_id); + kfree(client); +} + +static inline bool ctx_save_restore_disabled(struct intel_context *ce) +{ + u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1]; + +#define SR_DISABLED \ + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \ + CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) + + return (sr & SR_DISABLED) == SR_DISABLED; + +#undef SR_DISABLED +} + +static int guc_clients_create(struct intel_guc *guc) +{ + struct intel_guc_client *client; + + GEM_BUG_ON(guc->execbuf_client); + + client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL); + if (IS_ERR(client)) { + DRM_ERROR("Failed to create GuC client for submission!\n"); + return PTR_ERR(client); + } + guc->execbuf_client = client; + + return 0; +} + +static void guc_clients_destroy(struct intel_guc *guc) +{ + struct intel_guc_client *client; + + client = fetch_and_zero(&guc->execbuf_client); + if (client) + guc_client_free(client); +} + +static int __guc_client_enable(struct intel_guc_client *client) +{ + int ret; + + guc_proc_desc_init(client); + guc_stage_desc_init(client); + + ret = create_doorbell(client); + if (ret) + goto fail; + + return 0; + +fail: + guc_stage_desc_fini(client); + guc_proc_desc_fini(client); + return ret; +} + +static void __guc_client_disable(struct intel_guc_client *client) +{ + /* + * By the time we're here, GuC may have already been reset. if that is + * the case, instead of trying (in vain) to communicate with it, let's + * just cleanup the doorbell HW and our internal state. + */ + if (intel_guc_is_loaded(client->guc)) + destroy_doorbell(client); + else + __fini_doorbell(client); + + guc_stage_desc_fini(client); + guc_proc_desc_fini(client); +} + +static int guc_clients_enable(struct intel_guc *guc) +{ + return __guc_client_enable(guc->execbuf_client); +} + +static void guc_clients_disable(struct intel_guc *guc) +{ + if (guc->execbuf_client) + __guc_client_disable(guc->execbuf_client); +} + +/* + * Set up the memory resources to be shared with the GuC (via the GGTT) + * at firmware loading time. + */ +int intel_guc_submission_init(struct intel_guc *guc) +{ + int ret; + + if (guc->stage_desc_pool) + return 0; + + ret = guc_stage_desc_pool_create(guc); + if (ret) + return ret; + /* + * Keep static analysers happy, let them know that we allocated the + * vma after testing that it didn't exist earlier. + */ + GEM_BUG_ON(!guc->stage_desc_pool); + + WARN_ON(!guc_verify_doorbells(guc)); + ret = guc_clients_create(guc); + if (ret) + goto err_pool; + + return 0; + +err_pool: + guc_stage_desc_pool_destroy(guc); + return ret; +} + +void intel_guc_submission_fini(struct intel_guc *guc) +{ + guc_clients_destroy(guc); + WARN_ON(!guc_verify_doorbells(guc)); + + if (guc->stage_desc_pool) + guc_stage_desc_pool_destroy(guc); +} + +static void guc_interrupts_capture(struct drm_i915_private *dev_priv) +{ + struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int irqs; + + /* tell all command streamers to forward interrupts (but not vblank) + * to GuC + */ + irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); + for_each_engine(engine, dev_priv, id) + ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); + + /* route USER_INTERRUPT to Host, all others are sent to GuC. */ + irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; + /* These three registers have the same bit definitions */ + I915_WRITE(GUC_BCS_RCS_IER, ~irqs); + I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); + I915_WRITE(GUC_WD_VECS_IER, ~irqs); + + /* + * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all + * (unmasked) PM interrupts to the GuC. All other bits of this + * register *disable* generation of a specific interrupt. + * + * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when + * writing to the PM interrupt mask register, i.e. interrupts + * that must not be disabled. + * + * If the GuC is handling these interrupts, then we must not let + * the PM code disable ANY interrupt that the GuC is expecting. + * So for each ENABLED (0) bit in this register, we must SET the + * bit in pm_intrmsk_mbz so that it's left enabled for the GuC. + * GuC needs ARAT expired interrupt unmasked hence it is set in + * pm_intrmsk_mbz. + * + * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will + * result in the register bit being left SET! + */ + rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; + rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; +} + +static void guc_interrupts_release(struct drm_i915_private *dev_priv) +{ + struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int irqs; + + /* + * tell all command streamers NOT to forward interrupts or vblank + * to GuC. + */ + irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); + irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); + for_each_engine(engine, dev_priv, id) + ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); + + /* route all GT interrupts to the host */ + I915_WRITE(GUC_BCS_RCS_IER, 0); + I915_WRITE(GUC_VCS2_VCS1_IER, 0); + I915_WRITE(GUC_WD_VECS_IER, 0); + + rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; + rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; +} + +static void guc_submission_park(struct intel_engine_cs *engine) +{ + intel_engine_park(engine); + intel_engine_unpin_breadcrumbs_irq(engine); + engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; +} + +static void guc_submission_unpark(struct intel_engine_cs *engine) +{ + engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; + intel_engine_pin_breadcrumbs_irq(engine); +} + +static void guc_set_default_submission(struct intel_engine_cs *engine) +{ + /* + * We inherit a bunch of functions from execlists that we'd like + * to keep using: + * + * engine->submit_request = execlists_submit_request; + * engine->cancel_requests = execlists_cancel_requests; + * engine->schedule = execlists_schedule; + * + * But we need to override the actual submission backend in order + * to talk to the GuC. + */ + intel_execlists_set_default_submission(engine); + + engine->execlists.tasklet.func = guc_submission_tasklet; + + engine->park = guc_submission_park; + engine->unpark = guc_submission_unpark; + + engine->reset.prepare = guc_reset_prepare; + engine->reset.reset = guc_reset; + engine->reset.finish = guc_reset_finish; + + engine->cancel_requests = guc_cancel_requests; + + engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; +} + +int intel_guc_submission_enable(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + /* + * We're using GuC work items for submitting work through GuC. Since + * we're coalescing multiple requests from a single context into a + * single work item prior to assigning it to execlist_port, we can + * never have more work items than the total number of ports (for all + * engines). The GuC firmware is controlling the HEAD of work queue, + * and it is guaranteed that it will remove the work item from the + * queue before our request is completed. + */ + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * + sizeof(struct guc_wq_item) * + I915_NUM_ENGINES > GUC_WQ_SIZE); + + GEM_BUG_ON(!guc->execbuf_client); + + err = guc_clients_enable(guc); + if (err) + return err; + + /* Take over from manual control of ELSP (execlists) */ + guc_interrupts_capture(dev_priv); + + for_each_engine(engine, dev_priv, id) { + engine->set_default_submission = guc_set_default_submission; + engine->set_default_submission(engine); + } + + return 0; +} + +void intel_guc_submission_disable(struct intel_guc *guc) +{ + struct drm_i915_private *dev_priv = guc_to_i915(guc); + + GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ + + guc_interrupts_release(dev_priv); + guc_clients_disable(guc); +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_guc.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h new file mode 100644 index 000000000000..87a38cb6faf3 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -0,0 +1,87 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_GUC_SUBMISSION_H_ +#define _INTEL_GUC_SUBMISSION_H_ + +#include + +#include "gt/intel_engine_types.h" + +#include "i915_gem.h" +#include "i915_selftest.h" + +struct drm_i915_private; + +/* + * This structure primarily describes the GEM object shared with the GuC. + * The specs sometimes refer to this object as a "GuC context", but we use + * the term "client" to avoid confusion with hardware contexts. This + * GEM object is held for the entire lifetime of our interaction with + * the GuC, being allocated before the GuC is loaded with its firmware. + * Because there's no way to update the address used by the GuC after + * initialisation, the shared object must stay pinned into the GGTT as + * long as the GuC is in use. We also keep the first page (only) mapped + * into kernel address space, as it includes shared data that must be + * updated on every request submission. + * + * The single GEM object described here is actually made up of several + * separate areas, as far as the GuC is concerned. The first page (kept + * kmap'd) includes the "process descriptor" which holds sequence data for + * the doorbell, and one cacheline which actually *is* the doorbell; a + * write to this will "ring the doorbell" (i.e. send an interrupt to the + * GuC). The subsequent pages of the client object constitute the work + * queue (a circular array of work items), again described in the process + * descriptor. Work queue pages are mapped momentarily as required. + */ +struct intel_guc_client { + struct i915_vma *vma; + void *vaddr; + struct intel_guc *guc; + + /* bitmap of (host) engine ids */ + u32 priority; + u32 stage_id; + u32 proc_desc_offset; + + u16 doorbell_id; + unsigned long doorbell_offset; + + /* Protects GuC client's WQ access */ + spinlock_t wq_lock; + /* Per-engine counts of GuC submissions */ + u64 submissions[I915_NUM_ENGINES]; + + /* For testing purposes, use nop WQ items instead of real ones */ + I915_SELFTEST_DECLARE(bool use_nop_wqi); +}; + +int intel_guc_submission_init(struct intel_guc *guc); +int intel_guc_submission_enable(struct intel_guc *guc); +void intel_guc_submission_disable(struct intel_guc *guc); +void intel_guc_submission_fini(struct intel_guc *guc); +int intel_guc_preempt_work_create(struct intel_guc *guc); +void intel_guc_preempt_work_destroy(struct intel_guc *guc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c new file mode 100644 index 000000000000..2a41ee89a16d --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -0,0 +1,174 @@ +/* + * Copyright © 2016-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include + +#include "intel_huc.h" +#include "i915_drv.h" + +void intel_huc_init_early(struct intel_huc *huc) +{ + struct drm_i915_private *i915 = huc_to_i915(huc); + + intel_huc_fw_init_early(huc); + + if (INTEL_GEN(i915) >= 11) { + huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; + huc->status.mask = HUC_LOAD_SUCCESSFUL; + huc->status.value = HUC_LOAD_SUCCESSFUL; + } else { + huc->status.reg = HUC_STATUS2; + huc->status.mask = HUC_FW_VERIFIED; + huc->status.value = HUC_FW_VERIFIED; + } +} + +static int intel_huc_rsa_data_create(struct intel_huc *huc) +{ + struct drm_i915_private *i915 = huc_to_i915(huc); + struct intel_guc *guc = &i915->guc; + struct i915_vma *vma; + void *vaddr; + + /* + * HuC firmware will sit above GUC_GGTT_TOP and will not map + * through GTT. Unfortunately, this means GuC cannot perform + * the HuC auth. as the rsa offset now falls within the GuC + * inaccessible range. We resort to perma-pinning an additional + * vma within the accessible range that only contains the rsa + * signature. The GuC can use this extra pinning to perform + * the authentication since its GGTT offset will be GuC + * accessible. + */ + vma = intel_guc_allocate_vma(guc, PAGE_SIZE); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + i915_vma_unpin_and_release(&vma, 0); + return PTR_ERR(vaddr); + } + + huc->rsa_data = vma; + huc->rsa_data_vaddr = vaddr; + + return 0; +} + +static void intel_huc_rsa_data_destroy(struct intel_huc *huc) +{ + i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP); +} + +int intel_huc_init(struct intel_huc *huc) +{ + int err; + + err = intel_huc_rsa_data_create(huc); + if (err) + return err; + + return intel_uc_fw_init(&huc->fw); +} + +void intel_huc_fini(struct intel_huc *huc) +{ + intel_uc_fw_fini(&huc->fw); + intel_huc_rsa_data_destroy(huc); +} + +/** + * intel_huc_auth() - Authenticate HuC uCode + * @huc: intel_huc structure + * + * Called after HuC and GuC firmware loading during intel_uc_init_hw(). + * + * This function pins HuC firmware image object into GGTT. + * Then it invokes GuC action to authenticate passing the offset to RSA + * signature through intel_guc_auth_huc(). It then waits for 50ms for + * firmware verification ACK and unpins the object. + */ +int intel_huc_auth(struct intel_huc *huc) +{ + struct drm_i915_private *i915 = huc_to_i915(huc); + struct intel_guc *guc = &i915->guc; + int ret; + + if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) + return -ENOEXEC; + + ret = intel_guc_auth_huc(guc, + intel_guc_ggtt_offset(guc, huc->rsa_data)); + if (ret) { + DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); + goto fail; + } + + /* Check authentication status, it should be done by now */ + ret = __intel_wait_for_register(&i915->uncore, + huc->status.reg, + huc->status.mask, + huc->status.value, + 2, 50, NULL); + if (ret) { + DRM_ERROR("HuC: Firmware not verified %d\n", ret); + goto fail; + } + + return 0; + +fail: + huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL; + + DRM_ERROR("HuC: Authentication failed %d\n", ret); + return ret; +} + +/** + * intel_huc_check_status() - check HuC status + * @huc: intel_huc structure + * + * This function reads status register to verify if HuC + * firmware was successfully loaded. + * + * Returns: 1 if HuC firmware is loaded and verified, + * 0 if HuC firmware is not loaded and -ENODEV if HuC + * is not present on this platform. + */ +int intel_huc_check_status(struct intel_huc *huc) +{ + struct drm_i915_private *dev_priv = huc_to_i915(huc); + intel_wakeref_t wakeref; + bool status = false; + + if (!HAS_HUC(dev_priv)) + return -ENODEV; + + with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) + status = (I915_READ(huc->status.reg) & huc->status.mask) == + huc->status.value; + + return status; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h new file mode 100644 index 000000000000..9fa3d4629f2e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -0,0 +1,59 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_HUC_H_ +#define _INTEL_HUC_H_ + +#include "i915_reg.h" +#include "intel_uc_fw.h" +#include "intel_huc_fw.h" + +struct intel_huc { + /* Generic uC firmware management */ + struct intel_uc_fw fw; + + /* HuC-specific additions */ + struct i915_vma *rsa_data; + void *rsa_data_vaddr; + + struct { + i915_reg_t reg; + u32 mask; + u32 value; + } status; +}; + +void intel_huc_init_early(struct intel_huc *huc); +int intel_huc_init(struct intel_huc *huc); +void intel_huc_fini(struct intel_huc *huc); +int intel_huc_auth(struct intel_huc *huc); +int intel_huc_check_status(struct intel_huc *huc); + +static inline int intel_huc_sanitize(struct intel_huc *huc) +{ + intel_uc_fw_sanitize(&huc->fw); + return 0; +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c new file mode 100644 index 000000000000..06e726ba9863 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -0,0 +1,219 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#include "intel_huc_fw.h" +#include "i915_drv.h" + +/** + * DOC: HuC Firmware + * + * Motivation: + * GEN9 introduces a new dedicated firmware for usage in media HEVC (High + * Efficiency Video Coding) operations. Userspace can use the firmware + * capabilities by adding HuC specific commands to batch buffers. + * + * Implementation: + * The same firmware loader is used as the GuC. However, the actual + * loading to HW is deferred until GEM initialization is done. + * + * Note that HuC firmware loading must be done before GuC loading. + */ + +#define BXT_HUC_FW_MAJOR 01 +#define BXT_HUC_FW_MINOR 8 +#define BXT_BLD_NUM 2893 + +#define SKL_HUC_FW_MAJOR 01 +#define SKL_HUC_FW_MINOR 07 +#define SKL_BLD_NUM 1398 + +#define KBL_HUC_FW_MAJOR 02 +#define KBL_HUC_FW_MINOR 00 +#define KBL_BLD_NUM 1810 + +#define GLK_HUC_FW_MAJOR 03 +#define GLK_HUC_FW_MINOR 01 +#define GLK_BLD_NUM 2893 + +#define ICL_HUC_FW_MAJOR 8 +#define ICL_HUC_FW_MINOR 4 +#define ICL_BLD_NUM 3238 + +#define HUC_FW_PATH(platform, major, minor, bld_num) \ + "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ + __stringify(minor) "_" __stringify(bld_num) ".bin" + +#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \ + SKL_HUC_FW_MINOR, SKL_BLD_NUM) +MODULE_FIRMWARE(I915_SKL_HUC_UCODE); + +#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \ + BXT_HUC_FW_MINOR, BXT_BLD_NUM) +MODULE_FIRMWARE(I915_BXT_HUC_UCODE); + +#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \ + KBL_HUC_FW_MINOR, KBL_BLD_NUM) +MODULE_FIRMWARE(I915_KBL_HUC_UCODE); + +#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ + GLK_HUC_FW_MINOR, GLK_BLD_NUM) +MODULE_FIRMWARE(I915_GLK_HUC_UCODE); + +#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \ + ICL_HUC_FW_MINOR, ICL_BLD_NUM) +MODULE_FIRMWARE(I915_ICL_HUC_UCODE); + +static void huc_fw_select(struct intel_uc_fw *huc_fw) +{ + struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); + struct drm_i915_private *dev_priv = huc_to_i915(huc); + + GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); + + if (!HAS_HUC(dev_priv)) { + huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; + return; + } + + huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; + + if (i915_modparams.huc_firmware_path) { + huc_fw->path = i915_modparams.huc_firmware_path; + huc_fw->major_ver_wanted = 0; + huc_fw->minor_ver_wanted = 0; + } else if (IS_SKYLAKE(dev_priv)) { + huc_fw->path = I915_SKL_HUC_UCODE; + huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR; + huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR; + } else if (IS_BROXTON(dev_priv)) { + huc_fw->path = I915_BXT_HUC_UCODE; + huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR; + huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR; + } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { + huc_fw->path = I915_KBL_HUC_UCODE; + huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; + huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; + } else if (IS_GEMINILAKE(dev_priv)) { + huc_fw->path = I915_GLK_HUC_UCODE; + huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR; + huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR; + } else if (IS_ICELAKE(dev_priv)) { + huc_fw->path = I915_ICL_HUC_UCODE; + huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR; + huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR; + } +} + +/** + * intel_huc_fw_init_early() - initializes HuC firmware struct + * @huc: intel_huc struct + * + * On platforms with HuC selects firmware for uploading + */ +void intel_huc_fw_init_early(struct intel_huc *huc) +{ + struct intel_uc_fw *huc_fw = &huc->fw; + + intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC); + huc_fw_select(huc_fw); +} + +static void huc_xfer_rsa(struct intel_huc *huc) +{ + struct intel_uc_fw *fw = &huc->fw; + struct sg_table *pages = fw->obj->mm.pages; + + /* + * HuC firmware image is outside GuC accessible range. + * Copy the RSA signature out of the image into + * the perma-pinned region set aside for it + */ + sg_pcopy_to_buffer(pages->sgl, pages->nents, + huc->rsa_data_vaddr, fw->rsa_size, + fw->rsa_offset); +} + +static int huc_xfer_ucode(struct intel_huc *huc) +{ + struct intel_uc_fw *huc_fw = &huc->fw; + struct drm_i915_private *dev_priv = huc_to_i915(huc); + struct intel_uncore *uncore = &dev_priv->uncore; + unsigned long offset = 0; + u32 size; + int ret; + + GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + /* Set the source address for the uCode */ + offset = intel_uc_fw_ggtt_offset(huc_fw) + + huc_fw->header_offset; + intel_uncore_write(uncore, DMA_ADDR_0_LOW, + lower_32_bits(offset)); + intel_uncore_write(uncore, DMA_ADDR_0_HIGH, + upper_32_bits(offset) & 0xFFFF); + + /* + * Hardware doesn't look at destination address for HuC. Set it to 0, + * but still program the correct address space. + */ + intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0); + intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + + size = huc_fw->header_size + huc_fw->ucode_size; + intel_uncore_write(uncore, DMA_COPY_SIZE, size); + + /* Start the DMA */ + intel_uncore_write(uncore, DMA_CTRL, + _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); + + /* Wait for DMA to finish */ + ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); + + DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); + + /* Disable the bits once DMA is over */ + intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + return ret; +} + +/** + * huc_fw_xfer() - DMA's the firmware + * @huc_fw: the firmware descriptor + * + * Transfer the firmware image to RAM for execution by the microcontroller. + * + * Return: 0 on success, non-zero on failure + */ +static int huc_fw_xfer(struct intel_uc_fw *huc_fw) +{ + struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); + + huc_xfer_rsa(huc); + + return huc_xfer_ucode(huc); +} + +/** + * intel_huc_fw_upload() - load HuC uCode to device + * @huc: intel_huc structure + * + * Called from intel_uc_init_hw() during driver load, resume from sleep and + * after a GPU reset. Note that HuC must be loaded before GuC. + * + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. + * + * Return: non-zero code on error + */ +int intel_huc_fw_upload(struct intel_huc *huc) +{ + return intel_uc_fw_upload(&huc->fw, huc_fw_xfer); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h new file mode 100644 index 000000000000..8a00a0ebddc5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef _INTEL_HUC_FW_H_ +#define _INTEL_HUC_FW_H_ + +struct intel_huc; + +void intel_huc_fw_init_early(struct intel_huc *huc); +int intel_huc_fw_upload(struct intel_huc *huc); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c new file mode 100644 index 000000000000..4ea7661705b1 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -0,0 +1,635 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "gt/intel_reset.h" +#include "intel_uc.h" +#include "intel_guc.h" +#include "intel_guc_ads.h" +#include "intel_guc_submission.h" +#include "i915_drv.h" + +static void guc_free_load_err_log(struct intel_guc *guc); + +/* Reset GuC providing us with fresh state for both GuC and HuC. + */ +static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) +{ + int ret; + u32 guc_status; + + ret = intel_reset_guc(&dev_priv->gt); + if (ret) { + DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); + return ret; + } + + guc_status = I915_READ(GUC_STATUS); + WARN(!(guc_status & GS_MIA_IN_RESET), + "GuC status: 0x%x, MIA core expected to be in reset\n", + guc_status); + + return ret; +} + +static int __get_platform_enable_guc(struct drm_i915_private *i915) +{ + struct intel_uc_fw *guc_fw = &i915->guc.fw; + struct intel_uc_fw *huc_fw = &i915->huc.fw; + int enable_guc = 0; + + if (!HAS_GUC(i915)) + return 0; + + /* We don't want to enable GuC/HuC on pre-Gen11 by default */ + if (INTEL_GEN(i915) < 11) + return 0; + + if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw)) + enable_guc |= ENABLE_GUC_LOAD_HUC; + + return enable_guc; +} + +static int __get_default_guc_log_level(struct drm_i915_private *i915) +{ + int guc_log_level; + + if (!intel_uc_fw_supported(&i915->guc.fw) || + !intel_uc_is_using_guc(i915)) + guc_log_level = GUC_LOG_LEVEL_DISABLED; + else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + guc_log_level = GUC_LOG_LEVEL_MAX; + else + guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE; + + /* Any platform specific fine-tuning can be done here */ + + return guc_log_level; +} + +/** + * sanitize_options_early - sanitize uC related modparam options + * @i915: device private + * + * In case of "enable_guc" option this function will attempt to modify + * it only if it was initially set to "auto(-1)". Default value for this + * modparam varies between platforms and it is hardcoded in driver code. + * Any other modparam value is only monitored against availability of the + * related hardware or firmware definitions. + * + * In case of "guc_log_level" option this function will attempt to modify + * it only if it was initially set to "auto(-1)" or if initial value was + * "enable(1..4)" on platforms without the GuC. Default value for this + * modparam varies between platforms and is usually set to "disable(0)" + * unless GuC is enabled on given platform and the driver is compiled with + * debug config when this modparam will default to "enable(1..4)". + */ +static void sanitize_options_early(struct drm_i915_private *i915) +{ + struct intel_uc_fw *guc_fw = &i915->guc.fw; + struct intel_uc_fw *huc_fw = &i915->huc.fw; + + /* A negative value means "use platform default" */ + if (i915_modparams.enable_guc < 0) + i915_modparams.enable_guc = __get_platform_enable_guc(i915); + + DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", + i915_modparams.enable_guc, + yesno(intel_uc_is_using_guc_submission(i915)), + yesno(intel_uc_is_using_huc(i915))); + + /* Verify GuC firmware availability */ + if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) { + DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + "enable_guc", i915_modparams.enable_guc, + !intel_uc_fw_supported(guc_fw) ? + "no GuC hardware" : "no GuC firmware"); + } + + /* Verify HuC firmware availability */ + if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) { + DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + "enable_guc", i915_modparams.enable_guc, + !intel_uc_fw_supported(huc_fw) ? + "no HuC hardware" : "no HuC firmware"); + } + + /* XXX: GuC submission is unavailable for now */ + if (intel_uc_is_using_guc_submission(i915)) { + DRM_INFO("Incompatible option detected: %s=%d, %s!\n", + "enable_guc", i915_modparams.enable_guc, + "GuC submission not supported"); + DRM_INFO("Switching to non-GuC submission mode!\n"); + i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; + } + + /* A negative value means "use platform/config default" */ + if (i915_modparams.guc_log_level < 0) + i915_modparams.guc_log_level = + __get_default_guc_log_level(i915); + + if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) { + DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + "guc_log_level", i915_modparams.guc_log_level, + !intel_uc_fw_supported(guc_fw) ? + "no GuC hardware" : "GuC not enabled"); + i915_modparams.guc_log_level = 0; + } + + if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { + DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + "guc_log_level", i915_modparams.guc_log_level, + "verbosity too high"); + i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX; + } + + DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n", + i915_modparams.guc_log_level, + yesno(i915_modparams.guc_log_level), + yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)), + GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level)); + + /* Make sure that sanitization was done */ + GEM_BUG_ON(i915_modparams.enable_guc < 0); + GEM_BUG_ON(i915_modparams.guc_log_level < 0); +} + +void intel_uc_init_early(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; + + intel_guc_init_early(guc); + intel_huc_init_early(huc); + + sanitize_options_early(i915); +} + +void intel_uc_cleanup_early(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + + guc_free_load_err_log(guc); +} + +/** + * intel_uc_init_mmio - setup uC MMIO access + * @i915: device private + * + * Setup minimal state necessary for MMIO accesses later in the + * initialization sequence. + */ +void intel_uc_init_mmio(struct drm_i915_private *i915) +{ + intel_guc_init_send_regs(&i915->guc); +} + +static void guc_capture_load_err_log(struct intel_guc *guc) +{ + if (!guc->log.vma || !intel_guc_log_get_level(&guc->log)) + return; + + if (!guc->load_err_log) + guc->load_err_log = i915_gem_object_get(guc->log.vma->obj); + + return; +} + +static void guc_free_load_err_log(struct intel_guc *guc) +{ + if (guc->load_err_log) + i915_gem_object_put(guc->load_err_log); +} + +/* + * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 + * register using the same bits used in the CT message payload. Since our + * communication channel with guc is turned off at this point, we can save the + * message and handle it after we turn it back on. + */ +static void guc_clear_mmio_msg(struct intel_guc *guc) +{ + intel_uncore_write(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15), 0); +} + +static void guc_get_mmio_msg(struct intel_guc *guc) +{ + u32 val; + + spin_lock_irq(&guc->irq_lock); + + val = intel_uncore_read(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15)); + guc->mmio_msg |= val & guc->msg_enabled_mask; + + /* + * clear all events, including the ones we're not currently servicing, + * to make sure we don't try to process a stale message if we enable + * handling of more events later. + */ + guc_clear_mmio_msg(guc); + + spin_unlock_irq(&guc->irq_lock); +} + +static void guc_handle_mmio_msg(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + + /* we need communication to be enabled to reply to GuC */ + GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop); + + if (!guc->mmio_msg) + return; + + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); + spin_unlock_irq(&i915->irq_lock); + + guc->mmio_msg = 0; +} + +static void guc_reset_interrupts(struct intel_guc *guc) +{ + guc->interrupts.reset(guc); +} + +static void guc_enable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.enable(guc); +} + +static void guc_disable_interrupts(struct intel_guc *guc) +{ + guc->interrupts.disable(guc); +} + +static int guc_enable_communication(struct intel_guc *guc) +{ + struct drm_i915_private *i915 = guc_to_i915(guc); + int ret; + + ret = intel_guc_ct_enable(&guc->ct); + if (ret) + return ret; + + guc->send = intel_guc_send_ct; + guc->handler = intel_guc_to_host_event_handler_ct; + + /* check for mmio messages received before/during the CT enable */ + guc_get_mmio_msg(guc); + guc_handle_mmio_msg(guc); + + guc_enable_interrupts(guc); + + /* check for CT messages received before we enabled interrupts */ + spin_lock_irq(&i915->irq_lock); + intel_guc_to_host_event_handler_ct(guc); + spin_unlock_irq(&i915->irq_lock); + + DRM_INFO("GuC communication enabled\n"); + + return 0; +} + +static void guc_stop_communication(struct intel_guc *guc) +{ + intel_guc_ct_stop(&guc->ct); + + guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; + + guc_clear_mmio_msg(guc); +} + +static void guc_disable_communication(struct intel_guc *guc) +{ + /* + * Events generated during or after CT disable are logged by guc in + * via mmio. Make sure the register is clear before disabling CT since + * all events we cared about have already been processed via CT. + */ + guc_clear_mmio_msg(guc); + + guc_disable_interrupts(guc); + + guc->send = intel_guc_send_nop; + guc->handler = intel_guc_to_host_event_handler_nop; + + intel_guc_ct_disable(&guc->ct); + + /* + * Check for messages received during/after the CT disable. We do not + * expect any messages to have arrived via CT between the interrupt + * disable and the CT disable because GuC should've been idle until we + * triggered the CT disable protocol. + */ + guc_get_mmio_msg(guc); + + DRM_INFO("GuC communication disabled\n"); +} + +void intel_uc_fetch_firmwares(struct drm_i915_private *i915) +{ + if (!USES_GUC(i915)) + return; + + intel_uc_fw_fetch(i915, &i915->guc.fw); + + if (USES_HUC(i915)) + intel_uc_fw_fetch(i915, &i915->huc.fw); +} + +void intel_uc_cleanup_firmwares(struct drm_i915_private *i915) +{ + if (!USES_GUC(i915)) + return; + + if (USES_HUC(i915)) + intel_uc_fw_cleanup_fetch(&i915->huc.fw); + + intel_uc_fw_cleanup_fetch(&i915->guc.fw); +} + +int intel_uc_init(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; + int ret; + + if (!USES_GUC(i915)) + return 0; + + if (!intel_uc_fw_supported(&guc->fw)) + return -ENODEV; + + /* XXX: GuC submission is unavailable for now */ + GEM_BUG_ON(USES_GUC_SUBMISSION(i915)); + + ret = intel_guc_init(guc); + if (ret) + return ret; + + if (USES_HUC(i915)) { + ret = intel_huc_init(huc); + if (ret) + goto err_guc; + } + + if (USES_GUC_SUBMISSION(i915)) { + /* + * This is stuff we need to have available at fw load time + * if we are planning to enable submission later + */ + ret = intel_guc_submission_init(guc); + if (ret) + goto err_huc; + } + + return 0; + +err_huc: + if (USES_HUC(i915)) + intel_huc_fini(huc); +err_guc: + intel_guc_fini(guc); + return ret; +} + +void intel_uc_fini(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + + if (!USES_GUC(i915)) + return; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + if (USES_GUC_SUBMISSION(i915)) + intel_guc_submission_fini(guc); + + if (USES_HUC(i915)) + intel_huc_fini(&i915->huc); + + intel_guc_fini(guc); +} + +static void __uc_sanitize(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + intel_huc_sanitize(huc); + intel_guc_sanitize(guc); + + __intel_uc_reset_hw(i915); +} + +void intel_uc_sanitize(struct drm_i915_private *i915) +{ + if (!USES_GUC(i915)) + return; + + __uc_sanitize(i915); +} + +int intel_uc_init_hw(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + struct intel_huc *huc = &i915->huc; + int ret, attempts; + + if (!USES_GUC(i915)) + return 0; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + guc_reset_interrupts(guc); + + /* WaEnableuKernelHeaderValidFix:skl */ + /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ + if (IS_GEN(i915, 9)) + attempts = 3; + else + attempts = 1; + + while (attempts--) { + /* + * Always reset the GuC just before (re)loading, so + * that the state and timing are fairly predictable + */ + ret = __intel_uc_reset_hw(i915); + if (ret) + goto err_out; + + if (USES_HUC(i915)) { + ret = intel_huc_fw_upload(huc); + if (ret) + goto err_out; + } + + intel_guc_ads_reset(guc); + intel_guc_init_params(guc); + ret = intel_guc_fw_upload(guc); + if (ret == 0) + break; + + DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " + "retry %d more time(s)\n", ret, attempts); + } + + /* Did we succeded or run out of retries? */ + if (ret) + goto err_log_capture; + + ret = guc_enable_communication(guc); + if (ret) + goto err_log_capture; + + if (USES_HUC(i915)) { + ret = intel_huc_auth(huc); + if (ret) + goto err_communication; + } + + ret = intel_guc_sample_forcewake(guc); + if (ret) + goto err_communication; + + if (USES_GUC_SUBMISSION(i915)) { + ret = intel_guc_submission_enable(guc); + if (ret) + goto err_communication; + } + + dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", + guc->fw.major_ver_found, guc->fw.minor_ver_found); + dev_info(i915->drm.dev, "GuC submission %s\n", + enableddisabled(USES_GUC_SUBMISSION(i915))); + dev_info(i915->drm.dev, "HuC %s\n", + enableddisabled(USES_HUC(i915))); + + return 0; + + /* + * We've failed to load the firmware :( + */ +err_communication: + guc_disable_communication(guc); +err_log_capture: + guc_capture_load_err_log(guc); +err_out: + __uc_sanitize(i915); + + /* + * Note that there is no fallback as either user explicitly asked for + * the GuC or driver default option was to run with the GuC enabled. + */ + if (GEM_WARN_ON(ret == -EIO)) + ret = -EINVAL; + + dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret); + return ret; +} + +void intel_uc_fini_hw(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + + if (!intel_guc_is_loaded(guc)) + return; + + GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + + if (USES_GUC_SUBMISSION(i915)) + intel_guc_submission_disable(guc); + + guc_disable_communication(guc); + __uc_sanitize(i915); +} + +/** + * intel_uc_reset_prepare - Prepare for reset + * @i915: device private + * + * Preparing for full gpu reset. + */ +void intel_uc_reset_prepare(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + + if (!intel_guc_is_loaded(guc)) + return; + + guc_stop_communication(guc); + __uc_sanitize(i915); +} + +void intel_uc_runtime_suspend(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + int err; + + if (!intel_guc_is_loaded(guc)) + return; + + err = intel_guc_suspend(guc); + if (err) + DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); + + guc_disable_communication(guc); +} + +void intel_uc_suspend(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + intel_wakeref_t wakeref; + + if (!intel_guc_is_loaded(guc)) + return; + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + intel_uc_runtime_suspend(i915); +} + +int intel_uc_resume(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + int err; + + if (!intel_guc_is_loaded(guc)) + return 0; + + guc_enable_communication(guc); + + err = intel_guc_resume(guc); + if (err) { + DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); + return err; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h new file mode 100644 index 000000000000..5a1383e192dd --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -0,0 +1,64 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ +#ifndef _INTEL_UC_H_ +#define _INTEL_UC_H_ + +#include "intel_guc.h" +#include "intel_huc.h" +#include "i915_params.h" + +void intel_uc_init_early(struct drm_i915_private *dev_priv); +void intel_uc_cleanup_early(struct drm_i915_private *dev_priv); +void intel_uc_init_mmio(struct drm_i915_private *dev_priv); +void intel_uc_fetch_firmwares(struct drm_i915_private *dev_priv); +void intel_uc_cleanup_firmwares(struct drm_i915_private *dev_priv); +void intel_uc_sanitize(struct drm_i915_private *dev_priv); +int intel_uc_init_hw(struct drm_i915_private *dev_priv); +void intel_uc_fini_hw(struct drm_i915_private *dev_priv); +int intel_uc_init(struct drm_i915_private *dev_priv); +void intel_uc_fini(struct drm_i915_private *dev_priv); +void intel_uc_reset_prepare(struct drm_i915_private *i915); +void intel_uc_suspend(struct drm_i915_private *i915); +void intel_uc_runtime_suspend(struct drm_i915_private *i915); +int intel_uc_resume(struct drm_i915_private *dev_priv); + +static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915) +{ + GEM_BUG_ON(i915_modparams.enable_guc < 0); + return i915_modparams.enable_guc > 0; +} + +static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915) +{ + GEM_BUG_ON(i915_modparams.enable_guc < 0); + return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; +} + +static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915) +{ + GEM_BUG_ON(i915_modparams.enable_guc < 0); + return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; +} + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c new file mode 100644 index 000000000000..8ce7210907c0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -0,0 +1,359 @@ +/* + * Copyright © 2016-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include +#include +#include + +#include "intel_uc_fw.h" +#include "i915_drv.h" + +/** + * intel_uc_fw_fetch - fetch uC firmware + * + * @dev_priv: device private + * @uc_fw: uC firmware + * + * Fetch uC firmware into GEM obj. + */ +void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, + struct intel_uc_fw *uc_fw) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + struct drm_i915_gem_object *obj; + const struct firmware *fw = NULL; + struct uc_css_header *css; + size_t size; + int err; + + GEM_BUG_ON(!intel_uc_fw_supported(uc_fw)); + + if (!uc_fw->path) { + dev_info(dev_priv->drm.dev, + "%s: No firmware was defined for %s!\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_platform_name(INTEL_INFO(dev_priv)->platform)); + return; + } + + DRM_DEBUG_DRIVER("%s fw fetch %s\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); + + uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; + DRM_DEBUG_DRIVER("%s fw fetch %s\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uc_fw_status_repr(uc_fw->fetch_status)); + + err = request_firmware(&fw, uc_fw->path, &pdev->dev); + if (err) { + DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); + goto fail; + } + + DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n", + intel_uc_fw_type_repr(uc_fw->type), fw->size, fw); + + /* Check the size of the blob before examining buffer contents */ + if (fw->size < sizeof(struct uc_css_header)) { + DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n", + intel_uc_fw_type_repr(uc_fw->type), + fw->size, sizeof(struct uc_css_header)); + err = -ENODATA; + goto fail; + } + + css = (struct uc_css_header *)fw->data; + + /* Firmware bits always start from header */ + uc_fw->header_offset = 0; + uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw - + css->key_size_dw - css->exponent_size_dw) * + sizeof(u32); + + if (uc_fw->header_size != sizeof(struct uc_css_header)) { + DRM_WARN("%s: Mismatched firmware header definition\n", + intel_uc_fw_type_repr(uc_fw->type)); + err = -ENOEXEC; + goto fail; + } + + /* then, uCode */ + uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size; + uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); + + /* now RSA */ + if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) { + DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n", + intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw); + err = -ENOEXEC; + goto fail; + } + uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size; + uc_fw->rsa_size = css->key_size_dw * sizeof(u32); + + /* At least, it should have header, uCode and RSA. Size of all three. */ + size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size; + if (fw->size < size) { + DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n", + intel_uc_fw_type_repr(uc_fw->type), fw->size, size); + err = -ENOEXEC; + goto fail; + } + + /* Get version numbers from the CSS header */ + switch (uc_fw->type) { + case INTEL_UC_FW_TYPE_GUC: + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR, + css->sw_version); + break; + + case INTEL_UC_FW_TYPE_HUC: + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR, + css->sw_version); + break; + + default: + MISSING_CASE(uc_fw->type); + break; + } + + DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); + + if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) { + DRM_NOTE("%s: Skipping firmware version check\n", + intel_uc_fw_type_repr(uc_fw->type)); + } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || + uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { + DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->major_ver_found, uc_fw->minor_ver_found, + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); + err = -ENOEXEC; + goto fail; + } + + obj = i915_gem_object_create_shmem_from_data(dev_priv, + fw->data, fw->size); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + DRM_DEBUG_DRIVER("%s fw object_create err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); + goto fail; + } + + uc_fw->obj = obj; + uc_fw->size = fw->size; + uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS; + DRM_DEBUG_DRIVER("%s fw fetch %s\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uc_fw_status_repr(uc_fw->fetch_status)); + + release_firmware(fw); + return; + +fail: + uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL; + DRM_DEBUG_DRIVER("%s fw fetch %s\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uc_fw_status_repr(uc_fw->fetch_status)); + + DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); + DRM_INFO("%s: Firmware can be downloaded from %s\n", + intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); + + release_firmware(fw); /* OK even if fw is NULL */ +} + +static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw) +{ + struct drm_i915_gem_object *obj = uc_fw->obj; + struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; + struct i915_vma dummy = { + .node.start = intel_uc_fw_ggtt_offset(uc_fw), + .node.size = obj->base.size, + .pages = obj->mm.pages, + .vm = &ggtt->vm, + }; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size); + + /* uc_fw->obj cache domains were not controlled across suspend */ + drm_clflush_sg(dummy.pages); + + ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); +} + +static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw) +{ + struct drm_i915_gem_object *obj = uc_fw->obj; + struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; + u64 start = intel_uc_fw_ggtt_offset(uc_fw); + + ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); +} + +/** + * intel_uc_fw_upload - load uC firmware using custom loader + * @uc_fw: uC firmware + * @xfer: custom uC firmware loader function + * + * Loads uC firmware using custom loader and updates internal flags. + * + * Return: 0 on success, non-zero on failure. + */ +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, + int (*xfer)(struct intel_uc_fw *uc_fw)) +{ + int err; + + DRM_DEBUG_DRIVER("%s fw load %s\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); + + if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + return -ENOEXEC; + + uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; + DRM_DEBUG_DRIVER("%s fw load %s\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uc_fw_status_repr(uc_fw->load_status)); + + /* Call custom loader */ + intel_uc_fw_ggtt_bind(uc_fw); + err = xfer(uc_fw); + intel_uc_fw_ggtt_unbind(uc_fw); + if (err) + goto fail; + + uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS; + DRM_DEBUG_DRIVER("%s fw load %s\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uc_fw_status_repr(uc_fw->load_status)); + + DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n", + intel_uc_fw_type_repr(uc_fw->type), + uc_fw->path, + uc_fw->major_ver_found, uc_fw->minor_ver_found); + + return 0; + +fail: + uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL; + DRM_DEBUG_DRIVER("%s fw load %s\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uc_fw_status_repr(uc_fw->load_status)); + + DRM_WARN("%s: Failed to load firmware %s (error %d)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); + + return err; +} + +int intel_uc_fw_init(struct intel_uc_fw *uc_fw) +{ + int err; + + if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + return -ENOEXEC; + + err = i915_gem_object_pin_pages(uc_fw->obj); + if (err) + DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", + intel_uc_fw_type_repr(uc_fw->type), err); + + return err; +} + +void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) +{ + if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + return; + + i915_gem_object_unpin_pages(uc_fw->obj); +} + +u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw) +{ + struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; + struct drm_mm_node *node = &ggtt->uc_fw; + + GEM_BUG_ON(!node->allocated); + GEM_BUG_ON(upper_32_bits(node->start)); + GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); + + return lower_32_bits(node->start); +} + +/** + * intel_uc_fw_cleanup_fetch - cleanup uC firmware + * + * @uc_fw: uC firmware + * + * Cleans up uC firmware by releasing the firmware GEM obj. + */ +void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) +{ + struct drm_i915_gem_object *obj; + + obj = fetch_and_zero(&uc_fw->obj); + if (obj) + i915_gem_object_put(obj); + + uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; +} + +/** + * intel_uc_fw_dump - dump information about uC firmware + * @uc_fw: uC firmware + * @p: the &drm_printer + * + * Pretty printer for uC firmware. + */ +void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) +{ + drm_printf(p, "%s firmware: %s\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); + drm_printf(p, "\tstatus: fetch %s, load %s\n", + intel_uc_fw_status_repr(uc_fw->fetch_status), + intel_uc_fw_status_repr(uc_fw->load_status)); + drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", + uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, + uc_fw->major_ver_found, uc_fw->minor_ver_found); + drm_printf(p, "\theader: offset %u, size %u\n", + uc_fw->header_offset, uc_fw->header_size); + drm_printf(p, "\tuCode: offset %u, size %u\n", + uc_fw->ucode_offset, uc_fw->ucode_size); + drm_printf(p, "\tRSA: offset %u, size %u\n", + uc_fw->rsa_offset, uc_fw->rsa_size); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h new file mode 100644 index 000000000000..833d04d06576 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -0,0 +1,177 @@ +/* + * Copyright © 2014-2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_UC_FW_H_ +#define _INTEL_UC_FW_H_ + +#include +#include "i915_gem.h" + +struct drm_printer; +struct drm_i915_private; + +/* Home of GuC, HuC and DMC firmwares */ +#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" + +enum intel_uc_fw_status { + INTEL_UC_FIRMWARE_NOT_SUPPORTED = -2, /* no uc HW */ + INTEL_UC_FIRMWARE_FAIL = -1, + INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */ + INTEL_UC_FIRMWARE_NOT_STARTED = 1, + INTEL_UC_FIRMWARE_PENDING, + INTEL_UC_FIRMWARE_SUCCESS +}; + +enum intel_uc_fw_type { + INTEL_UC_FW_TYPE_GUC, + INTEL_UC_FW_TYPE_HUC +}; + +/* + * This structure encapsulates all the data needed during the process + * of fetching, caching, and loading the firmware image into the uC. + */ +struct intel_uc_fw { + const char *path; + size_t size; + struct drm_i915_gem_object *obj; + enum intel_uc_fw_status fetch_status; + enum intel_uc_fw_status load_status; + + /* + * The firmware build process will generate a version header file with major and + * minor version defined. The versions are built into CSS header of firmware. + * i915 kernel driver set the minimal firmware version required per platform. + */ + u16 major_ver_wanted; + u16 minor_ver_wanted; + u16 major_ver_found; + u16 minor_ver_found; + + enum intel_uc_fw_type type; + u32 header_size; + u32 header_offset; + u32 rsa_size; + u32 rsa_offset; + u32 ucode_size; + u32 ucode_offset; +}; + +static inline +const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) +{ + switch (status) { + case INTEL_UC_FIRMWARE_NOT_SUPPORTED: + return "N/A - uc HW not available"; + case INTEL_UC_FIRMWARE_FAIL: + return "FAIL"; + case INTEL_UC_FIRMWARE_UNINITIALIZED: + return "UNINITIALIZED"; + case INTEL_UC_FIRMWARE_NOT_STARTED: + return "NOT_STARTED"; + case INTEL_UC_FIRMWARE_PENDING: + return "PENDING"; + case INTEL_UC_FIRMWARE_SUCCESS: + return "SUCCESS"; + } + return ""; +} + +static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type) +{ + switch (type) { + case INTEL_UC_FW_TYPE_GUC: + return "GuC"; + case INTEL_UC_FW_TYPE_HUC: + return "HuC"; + } + return "uC"; +} + +static inline +void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_type type) +{ + /* + * we use FIRMWARE_UNINITIALIZED to detect checks against fetch_status + * before we're looked at the HW caps to see if we have uc support + */ + BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); + + uc_fw->path = NULL; + uc_fw->fetch_status = INTEL_UC_FIRMWARE_UNINITIALIZED; + uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_STARTED; + uc_fw->type = type; +} + +static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw) +{ + return uc_fw->path != NULL; +} + +static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) +{ + return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; +} + +static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw) +{ + /* shouldn't call this before checking hw/blob availability */ + GEM_BUG_ON(uc_fw->fetch_status == INTEL_UC_FIRMWARE_UNINITIALIZED); + return uc_fw->fetch_status != INTEL_UC_FIRMWARE_NOT_SUPPORTED; +} + +static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) +{ + if (intel_uc_fw_is_loaded(uc_fw)) + uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; +} + +/** + * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded. + * @uc_fw: uC firmware. + * + * Get the size of the firmware and header that will be uploaded to WOPCM. + * + * Return: Upload firmware size, or zero on firmware fetch failure. + */ +static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) +{ + if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + return 0; + + return uc_fw->header_size + uc_fw->ucode_size; +} + +void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, + struct intel_uc_fw *uc_fw); +void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, + int (*xfer)(struct intel_uc_fw *uc_fw)); +int intel_uc_fw_init(struct intel_uc_fw *uc_fw); +void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); +u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw); +void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); + +#endif diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c new file mode 100644 index 000000000000..6ca76f5a98d4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -0,0 +1,336 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_selftest.h" +#include "gem/i915_gem_pm.h" + +/* max doorbell number + negative test for each client type */ +#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM) + +static struct intel_guc_client *clients[ATTEMPTS]; + +static bool available_dbs(struct intel_guc *guc, u32 priority) +{ + unsigned long offset; + unsigned long end; + u16 id; + + /* first half is used for normal priority, second half for high */ + offset = 0; + end = GUC_NUM_DOORBELLS / 2; + if (priority <= GUC_CLIENT_PRIORITY_HIGH) { + offset = end; + end += offset; + } + + id = find_next_zero_bit(guc->doorbell_bitmap, end, offset); + if (id < end) + return true; + + return false; +} + +static int check_all_doorbells(struct intel_guc *guc) +{ + u16 db_id; + + pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS); + for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) { + if (!doorbell_ok(guc, db_id)) { + pr_err("doorbell %d, not ok\n", db_id); + return -EIO; + } + } + + return 0; +} + +static int ring_doorbell_nop(struct intel_guc_client *client) +{ + struct guc_process_desc *desc = __get_process_desc(client); + int err; + + client->use_nop_wqi = true; + + spin_lock_irq(&client->wq_lock); + + guc_wq_item_append(client, 0, 0, 0, 0); + guc_ring_doorbell(client); + + spin_unlock_irq(&client->wq_lock); + + client->use_nop_wqi = false; + + /* if there are no issues GuC will update the WQ head and keep the + * WQ in active status + */ + err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10); + if (err) { + pr_err("doorbell %u ring failed!\n", client->doorbell_id); + return -EIO; + } + + if (desc->wq_status != WQ_STATUS_ACTIVE) { + pr_err("doorbell %u ring put WQ in bad state (%u)!\n", + client->doorbell_id, desc->wq_status); + return -EIO; + } + + return 0; +} + +/* + * Basic client sanity check, handy to validate create_clients. + */ +static int validate_client(struct intel_guc_client *client, int client_priority) +{ + if (client->priority != client_priority || + client->doorbell_id == GUC_DOORBELL_INVALID) + return -EINVAL; + else + return 0; +} + +static bool client_doorbell_in_sync(struct intel_guc_client *client) +{ + return !client || doorbell_ok(client->guc, client->doorbell_id); +} + +/* + * Check that we're able to synchronize guc_clients with their doorbells + * + * We're creating clients and reserving doorbells once, at module load. During + * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to + * GuC being reset. In other words - GuC clients are still around, but the + * status of their doorbells may be incorrect. This is the reason behind + * validating that the doorbells status expected by the driver matches what the + * GuC/HW have. + */ +static int igt_guc_clients(void *args) +{ + struct drm_i915_private *dev_priv = args; + intel_wakeref_t wakeref; + struct intel_guc *guc; + int err = 0; + + GEM_BUG_ON(!HAS_GUC(dev_priv)); + mutex_lock(&dev_priv->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + guc = &dev_priv->guc; + if (!guc) { + pr_err("No guc object!\n"); + err = -EINVAL; + goto unlock; + } + + err = check_all_doorbells(guc); + if (err) + goto unlock; + + /* + * Get rid of clients created during driver load because the test will + * recreate them. + */ + guc_clients_disable(guc); + guc_clients_destroy(guc); + if (guc->execbuf_client) { + pr_err("guc_clients_destroy lied!\n"); + err = -EINVAL; + goto unlock; + } + + err = guc_clients_create(guc); + if (err) { + pr_err("Failed to create clients\n"); + goto unlock; + } + GEM_BUG_ON(!guc->execbuf_client); + + err = validate_client(guc->execbuf_client, + GUC_CLIENT_PRIORITY_KMD_NORMAL); + if (err) { + pr_err("execbug client validation failed\n"); + goto out; + } + + /* the client should now have reserved a doorbell */ + if (!has_doorbell(guc->execbuf_client)) { + pr_err("guc_clients_create didn't reserve doorbells\n"); + err = -EINVAL; + goto out; + } + + /* Now enable the clients */ + guc_clients_enable(guc); + + /* each client should now have received a doorbell */ + if (!client_doorbell_in_sync(guc->execbuf_client)) { + pr_err("failed to initialize the doorbells\n"); + err = -EINVAL; + goto out; + } + + /* + * Basic test - an attempt to reallocate a valid doorbell to the + * client it is currently assigned should not cause a failure. + */ + err = create_doorbell(guc->execbuf_client); + +out: + /* + * Leave clean state for other test, plus the driver always destroy the + * clients during unload. + */ + guc_clients_disable(guc); + guc_clients_destroy(guc); + guc_clients_create(guc); + guc_clients_enable(guc); +unlock: + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + mutex_unlock(&dev_priv->drm.struct_mutex); + return err; +} + +/* + * Create as many clients as number of doorbells. Note that there's already + * client(s)/doorbell(s) created during driver load, but this test creates + * its own and do not interact with the existing ones. + */ +static int igt_guc_doorbells(void *arg) +{ + struct drm_i915_private *dev_priv = arg; + intel_wakeref_t wakeref; + struct intel_guc *guc; + int i, err = 0; + u16 db_id; + + GEM_BUG_ON(!HAS_GUC(dev_priv)); + mutex_lock(&dev_priv->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + + guc = &dev_priv->guc; + if (!guc) { + pr_err("No guc object!\n"); + err = -EINVAL; + goto unlock; + } + + err = check_all_doorbells(guc); + if (err) + goto unlock; + + for (i = 0; i < ATTEMPTS; i++) { + clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM); + + if (!clients[i]) { + pr_err("[%d] No guc client\n", i); + err = -EINVAL; + goto out; + } + + if (IS_ERR(clients[i])) { + if (PTR_ERR(clients[i]) != -ENOSPC) { + pr_err("[%d] unexpected error\n", i); + err = PTR_ERR(clients[i]); + goto out; + } + + if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) { + pr_err("[%d] non-db related alloc fail\n", i); + err = -EINVAL; + goto out; + } + + /* expected, ran out of dbs for this client type */ + continue; + } + + /* + * The check below is only valid because we keep a doorbell + * assigned during the whole life of the client. + */ + if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) { + pr_err("[%d] more clients than doorbells (%d >= %d)\n", + i, clients[i]->stage_id, GUC_NUM_DOORBELLS); + err = -EINVAL; + goto out; + } + + err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM); + if (err) { + pr_err("[%d] client_alloc sanity check failed!\n", i); + err = -EINVAL; + goto out; + } + + db_id = clients[i]->doorbell_id; + + err = __guc_client_enable(clients[i]); + if (err) { + pr_err("[%d] Failed to create a doorbell\n", i); + goto out; + } + + /* doorbell id shouldn't change, we are holding the mutex */ + if (db_id != clients[i]->doorbell_id) { + pr_err("[%d] doorbell id changed (%d != %d)\n", + i, db_id, clients[i]->doorbell_id); + err = -EINVAL; + goto out; + } + + err = check_all_doorbells(guc); + if (err) + goto out; + + err = ring_doorbell_nop(clients[i]); + if (err) + goto out; + } + +out: + for (i = 0; i < ATTEMPTS; i++) + if (!IS_ERR_OR_NULL(clients[i])) { + __guc_client_disable(clients[i]); + guc_client_free(clients[i]); + } +unlock: + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + mutex_unlock(&dev_priv->drm.struct_mutex); + return err; +} + +int intel_guc_live_selftest(struct drm_i915_private *dev_priv) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_guc_clients), + SUBTEST(igt_guc_doorbells), + }; + + if (!USES_GUC_SUBMISSION(dev_priv)) + return 0; + + return i915_subtests(tests, dev_priv); +} diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a1843e3de6d7..63024c0d0cd9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,12 +40,12 @@ #include "gem/i915_gem_context.h" #include "gt/intel_reset.h" +#include "gt/uc/intel_guc_submission.h" #include "i915_debugfs.h" #include "i915_irq.h" #include "intel_csr.h" #include "intel_drv.h" -#include "intel_guc_submission.h" #include "intel_pm.h" #include "intel_sideband.h" diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6953732719e2..44e13b9c8e25 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -65,6 +65,7 @@ #include "gt/intel_gt_pm.h" #include "gt/intel_reset.h" #include "gt/intel_workarounds.h" +#include "gt/uc/intel_uc.h" #include "i915_debugfs.h" #include "i915_drv.h" @@ -76,7 +77,6 @@ #include "intel_csr.h" #include "intel_drv.h" #include "intel_pm.h" -#include "intel_uc.h" static struct drm_driver driver; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1a0b114da77e..b604fee623ab 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -74,10 +74,10 @@ #include "gt/intel_engine.h" #include "gt/intel_gt_types.h" #include "gt/intel_workarounds.h" +#include "gt/uc/intel_uc.h" #include "intel_device_info.h" #include "intel_runtime_pm.h" -#include "intel_uc.h" #include "intel_uncore.h" #include "intel_wakeref.h" #include "intel_wopcm.h" diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 7dfbfda48733..85f06bc5da05 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -15,9 +15,9 @@ #include #include "gt/intel_engine.h" +#include "gt/uc/intel_uc_fw.h" #include "intel_device_info.h" -#include "intel_uc_fw.h" #include "i915_gem.h" #include "i915_gem_gtt.h" diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c deleted file mode 100644 index 4173b35bf104..000000000000 --- a/drivers/gpu/drm/i915/intel_guc.c +++ /dev/null @@ -1,603 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "intel_guc.h" -#include "intel_guc_ads.h" -#include "intel_guc_submission.h" -#include "i915_drv.h" - -static void gen8_guc_raise_irq(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); -} - -static void gen11_guc_raise_irq(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0); -} - -static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) -{ - GEM_BUG_ON(!guc->send_regs.base); - GEM_BUG_ON(!guc->send_regs.count); - GEM_BUG_ON(i >= guc->send_regs.count); - - return _MMIO(guc->send_regs.base + 4 * i); -} - -void intel_guc_init_send_regs(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - enum forcewake_domains fw_domains = 0; - unsigned int i; - - if (INTEL_GEN(dev_priv) >= 11) { - guc->send_regs.base = - i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); - guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; - } else { - guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); - guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; - BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); - } - - for (i = 0; i < guc->send_regs.count; i++) { - fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, - guc_send_reg(guc, i), - FW_REG_READ | FW_REG_WRITE); - } - guc->send_regs.fw_domains = fw_domains; -} - -void intel_guc_init_early(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - - intel_guc_fw_init_early(guc); - intel_guc_ct_init_early(&guc->ct); - intel_guc_log_init_early(&guc->log); - - mutex_init(&guc->send_mutex); - spin_lock_init(&guc->irq_lock); - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; - if (INTEL_GEN(i915) >= 11) { - guc->notify = gen11_guc_raise_irq; - guc->interrupts.reset = gen11_reset_guc_interrupts; - guc->interrupts.enable = gen11_enable_guc_interrupts; - guc->interrupts.disable = gen11_disable_guc_interrupts; - } else { - guc->notify = gen8_guc_raise_irq; - guc->interrupts.reset = gen9_reset_guc_interrupts; - guc->interrupts.enable = gen9_enable_guc_interrupts; - guc->interrupts.disable = gen9_disable_guc_interrupts; - } -} - -static int guc_shared_data_create(struct intel_guc *guc) -{ - struct i915_vma *vma; - void *vaddr; - - vma = intel_guc_allocate_vma(guc, PAGE_SIZE); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma, 0); - return PTR_ERR(vaddr); - } - - guc->shared_data = vma; - guc->shared_data_vaddr = vaddr; - - return 0; -} - -static void guc_shared_data_destroy(struct intel_guc *guc) -{ - i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); -} - -int intel_guc_init(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - int ret; - - ret = intel_uc_fw_init(&guc->fw); - if (ret) - goto err_fetch; - - ret = guc_shared_data_create(guc); - if (ret) - goto err_fw; - GEM_BUG_ON(!guc->shared_data); - - ret = intel_guc_log_create(&guc->log); - if (ret) - goto err_shared; - - ret = intel_guc_ads_create(guc); - if (ret) - goto err_log; - GEM_BUG_ON(!guc->ads_vma); - - ret = intel_guc_ct_init(&guc->ct); - if (ret) - goto err_ads; - - /* We need to notify the guc whenever we change the GGTT */ - i915_ggtt_enable_guc(dev_priv); - - return 0; - -err_ads: - intel_guc_ads_destroy(guc); -err_log: - intel_guc_log_destroy(&guc->log); -err_shared: - guc_shared_data_destroy(guc); -err_fw: - intel_uc_fw_fini(&guc->fw); -err_fetch: - intel_uc_fw_cleanup_fetch(&guc->fw); - return ret; -} - -void intel_guc_fini(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - i915_ggtt_disable_guc(dev_priv); - - intel_guc_ct_fini(&guc->ct); - - intel_guc_ads_destroy(guc); - intel_guc_log_destroy(&guc->log); - guc_shared_data_destroy(guc); - intel_uc_fw_fini(&guc->fw); - intel_uc_fw_cleanup_fetch(&guc->fw); -} - -static u32 guc_ctl_debug_flags(struct intel_guc *guc) -{ - u32 level = intel_guc_log_get_level(&guc->log); - u32 flags = 0; - - if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) - flags |= GUC_LOG_DISABLED; - else - flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << - GUC_LOG_VERBOSITY_SHIFT; - - return flags; -} - -static u32 guc_ctl_feature_flags(struct intel_guc *guc) -{ - u32 flags = 0; - - if (!USES_GUC_SUBMISSION(guc_to_i915(guc))) - flags |= GUC_CTL_DISABLE_SCHEDULER; - - return flags; -} - -static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) -{ - u32 flags = 0; - - if (USES_GUC_SUBMISSION(guc_to_i915(guc))) { - u32 ctxnum, base; - - base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); - ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16; - - base >>= PAGE_SHIFT; - flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) | - (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT); - } - return flags; -} - -static u32 guc_ctl_log_params_flags(struct intel_guc *guc) -{ - u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT; - u32 flags; - - #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0) - #define UNIT SZ_1M - #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE - #else - #define UNIT SZ_4K - #define FLAG 0 - #endif - - BUILD_BUG_ON(!CRASH_BUFFER_SIZE); - BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT)); - BUILD_BUG_ON(!DPC_BUFFER_SIZE); - BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT)); - BUILD_BUG_ON(!ISR_BUFFER_SIZE); - BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT)); - - BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) > - (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); - BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) > - (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT)); - BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) > - (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT)); - - flags = GUC_LOG_VALID | - GUC_LOG_NOTIFY_ON_HALF_FULL | - FLAG | - ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) | - ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) | - ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) | - (offset << GUC_LOG_BUF_ADDR_SHIFT); - - #undef UNIT - #undef FLAG - - return flags; -} - -static u32 guc_ctl_ads_flags(struct intel_guc *guc) -{ - u32 ads = intel_guc_ggtt_offset(guc, guc->ads_vma) >> PAGE_SHIFT; - u32 flags = ads << GUC_ADS_ADDR_SHIFT; - - return flags; -} - -/* - * Initialise the GuC parameter block before starting the firmware - * transfer. These parameters are read by the firmware on startup - * and cannot be changed thereafter. - */ -void intel_guc_init_params(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 params[GUC_CTL_MAX_DWORDS]; - int i; - - memset(params, 0, sizeof(params)); - - params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); - params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); - params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); - params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); - params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); - - for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); - - /* - * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and - * they are power context saved so it's ok to release forcewake - * when we are done here and take it again at xfer time. - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER); - - I915_WRITE(SOFT_SCRATCH(0), 0); - - for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER); -} - -int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size) -{ - WARN(1, "Unexpected send: action=%#x\n", *action); - return -ENODEV; -} - -void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) -{ - WARN(1, "Unexpected event: no suitable handler\n"); -} - -/* - * This function implements the MMIO based host to GuC interface. - */ -int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uncore *uncore = &dev_priv->uncore; - u32 status; - int i; - int ret; - - GEM_BUG_ON(!len); - GEM_BUG_ON(len > guc->send_regs.count); - - /* We expect only action code */ - GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK); - - /* If CT is available, we expect to use MMIO only during init/fini */ - GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER && - *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); - - mutex_lock(&guc->send_mutex); - intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); - - for (i = 0; i < len; i++) - intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); - - intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); - - intel_guc_notify(guc); - - /* - * No GuC command should ever take longer than 10ms. - * Fast commands should still complete in 10us. - */ - ret = __intel_wait_for_register_fw(uncore, - guc_send_reg(guc, 0), - INTEL_GUC_MSG_TYPE_MASK, - INTEL_GUC_MSG_TYPE_RESPONSE << - INTEL_GUC_MSG_TYPE_SHIFT, - 10, 10, &status); - /* If GuC explicitly returned an error, convert it to -EIO */ - if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status)) - ret = -EIO; - - if (ret) { - DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n", - action[0], ret, status); - goto out; - } - - if (response_buf) { - int count = min(response_buf_size, guc->send_regs.count - 1); - - for (i = 0; i < count; i++) - response_buf[i] = I915_READ(guc_send_reg(guc, i + 1)); - } - - /* Use data from the GuC response as our return value */ - ret = INTEL_GUC_MSG_TO_DATA(status); - -out: - intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); - mutex_unlock(&guc->send_mutex); - - return ret; -} - -int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, - const u32 *payload, u32 len) -{ - u32 msg; - - if (unlikely(!len)) - return -EPROTO; - - /* Make sure to handle only enabled messages */ - msg = payload[0] & guc->msg_enabled_mask; - - if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | - INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) - intel_guc_log_handle_flush_event(&guc->log); - - return 0; -} - -int intel_guc_sample_forcewake(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 action[2]; - - action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; - /* WaRsDisableCoarsePowerGating:skl,cnl */ - if (!HAS_RC6(dev_priv) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) - action[1] = 0; - else - /* bit 0 and 1 are for Render and Media domain separately */ - action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -/** - * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode - * @guc: intel_guc structure - * @rsa_offset: rsa offset w.r.t ggtt base of huc vma - * - * Triggers a HuC firmware authentication request to the GuC via intel_guc_send - * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by - * intel_huc_auth(). - * - * Return: non-zero code on error - */ -int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) -{ - u32 action[] = { - INTEL_GUC_ACTION_AUTHENTICATE_HUC, - rsa_offset - }; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -/** - * intel_guc_suspend() - notify GuC entering suspend state - * @guc: the guc - */ -int intel_guc_suspend(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - int ret; - u32 status; - u32 action[] = { - INTEL_GUC_ACTION_ENTER_S_STATE, - GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */ - }; - - /* - * The ENTER_S_STATE action queues the save/restore operation in GuC FW - * and then returns, so waiting on the H2G is not enough to guarantee - * GuC is done. When all the processing is done, GuC writes - * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll - * on that. Note that GuC does not ensure that the value in the register - * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is - * in progress so we need to take care of that ourselves as well. - */ - - I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); - - ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); - if (ret) - return ret; - - ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14), - INTEL_GUC_SLEEP_STATE_INVALID_MASK, - 0, 0, 10, &status); - if (ret) - return ret; - - if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) { - DRM_ERROR("GuC failed to change sleep state. " - "action=0x%x, err=%u\n", - action[0], status); - return -EIO; - } - - return 0; -} - -/** - * intel_guc_reset_engine() - ask GuC to reset an engine - * @guc: intel_guc structure - * @engine: engine to be reset - */ -int intel_guc_reset_engine(struct intel_guc *guc, - struct intel_engine_cs *engine) -{ - u32 data[7]; - - GEM_BUG_ON(!guc->execbuf_client); - - data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET; - data[1] = engine->guc_id; - data[2] = 0; - data[3] = 0; - data[4] = 0; - data[5] = guc->execbuf_client->stage_id; - data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); - - return intel_guc_send(guc, data, ARRAY_SIZE(data)); -} - -/** - * intel_guc_resume() - notify GuC resuming from suspend state - * @guc: the guc - */ -int intel_guc_resume(struct intel_guc *guc) -{ - u32 action[] = { - INTEL_GUC_ACTION_EXIT_S_STATE, - GUC_POWER_D0, - }; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -/** - * DOC: GuC Address Space - * - * The layout of GuC address space is shown below: - * - * :: - * - * +===========> +====================+ <== FFFF_FFFF - * ^ | Reserved | - * | +====================+ <== GUC_GGTT_TOP - * | | | - * | | DRAM | - * GuC | | - * Address +===> +====================+ <== GuC ggtt_pin_bias - * Space ^ | | - * | | | | - * | GuC | GuC | - * | WOPCM | WOPCM | - * | Size | | - * | | | | - * v v | | - * +=======+===> +====================+ <== 0000_0000 - * - * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM - * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped - * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size. - */ - -/** - * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage - * @guc: the guc - * @size: size of area to allocate (both virtual space and memory) - * - * This is a wrapper to create an object for use with the GuC. In order to - * use it inside the GuC, an object needs to be pinned lifetime, so we allocate - * both some backing storage and a range inside the Global GTT. We must pin - * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that - * range is reserved inside GuC. - * - * Return: A i915_vma if successful, otherwise an ERR_PTR. - */ -struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - u64 flags; - int ret; - - obj = i915_gem_object_create_shmem(dev_priv, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL); - if (IS_ERR(vma)) - goto err; - - flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - ret = i915_vma_pin(vma, 0, 0, flags); - if (ret) { - vma = ERR_PTR(ret); - goto err; - } - - return vma; - -err: - i915_gem_object_put(obj); - return vma; -} diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h deleted file mode 100644 index 6852352381ce..000000000000 --- a/drivers/gpu/drm/i915/intel_guc.h +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef _INTEL_GUC_H_ -#define _INTEL_GUC_H_ - -#include "intel_uncore.h" -#include "intel_guc_fw.h" -#include "intel_guc_fwif.h" -#include "intel_guc_ct.h" -#include "intel_guc_log.h" -#include "intel_guc_reg.h" -#include "intel_uc_fw.h" -#include "i915_utils.h" -#include "i915_vma.h" - -struct __guc_ads_blob; - -/* - * Top level structure of GuC. It handles firmware loading and manages client - * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy - * ExecList submission. - */ -struct intel_guc { - struct intel_uc_fw fw; - struct intel_guc_log log; - struct intel_guc_ct ct; - - /* Log snapshot if GuC errors during load */ - struct drm_i915_gem_object *load_err_log; - - /* intel_guc_recv interrupt related state */ - spinlock_t irq_lock; - unsigned int msg_enabled_mask; - - struct { - bool enabled; - void (*reset)(struct intel_guc *guc); - void (*enable)(struct intel_guc *guc); - void (*disable)(struct intel_guc *guc); - } interrupts; - - struct i915_vma *ads_vma; - struct __guc_ads_blob *ads_blob; - - struct i915_vma *stage_desc_pool; - void *stage_desc_pool_vaddr; - struct ida stage_ids; - struct i915_vma *shared_data; - void *shared_data_vaddr; - - struct intel_guc_client *execbuf_client; - - DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS); - /* Cyclic counter mod pagesize */ - u32 db_cacheline; - - /* GuC's FW specific registers used in MMIO send */ - struct { - u32 base; - unsigned int count; - enum forcewake_domains fw_domains; - } send_regs; - - /* Store msg (e.g. log flush) that we see while CTBs are disabled */ - u32 mmio_msg; - - /* To serialize the intel_guc_send actions */ - struct mutex send_mutex; - - /* GuC's FW specific send function */ - int (*send)(struct intel_guc *guc, const u32 *data, u32 len, - u32 *response_buf, u32 response_buf_size); - - /* GuC's FW specific event handler function */ - void (*handler)(struct intel_guc *guc); - - /* GuC's FW specific notify function */ - void (*notify)(struct intel_guc *guc); -}; - -static -inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) -{ - return guc->send(guc, action, len, NULL, 0); -} - -static inline int -intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size) -{ - return guc->send(guc, action, len, response_buf, response_buf_size); -} - -static inline void intel_guc_notify(struct intel_guc *guc) -{ - guc->notify(guc); -} - -static inline void intel_guc_to_host_event_handler(struct intel_guc *guc) -{ - guc->handler(guc); -} - -/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ -#define GUC_GGTT_TOP 0xFEE00000 - -/** - * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma - * @guc: intel_guc structure. - * @vma: i915 graphics virtual memory area. - * - * GuC does not allow any gfx GGTT address that falls into range - * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM. - * Currently, in order to exclude [0, ggtt.pin_bias) address space from - * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma() - * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias. - * - * Return: GGTT offset of the @vma. - */ -static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, - struct i915_vma *vma) -{ - u32 offset = i915_ggtt_offset(vma); - - GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma)); - GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP)); - - return offset; -} - -void intel_guc_init_early(struct intel_guc *guc); -void intel_guc_init_send_regs(struct intel_guc *guc); -void intel_guc_init_params(struct intel_guc *guc); -int intel_guc_init(struct intel_guc *guc); -void intel_guc_fini(struct intel_guc *guc); -int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size); -int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size); -void intel_guc_to_host_event_handler(struct intel_guc *guc); -void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); -int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, - const u32 *payload, u32 len); -int intel_guc_sample_forcewake(struct intel_guc *guc); -int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); -int intel_guc_suspend(struct intel_guc *guc); -int intel_guc_resume(struct intel_guc *guc); -struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); - -static inline bool intel_guc_is_loaded(struct intel_guc *guc) -{ - return intel_uc_fw_is_loaded(&guc->fw); -} - -static inline int intel_guc_sanitize(struct intel_guc *guc) -{ - intel_uc_fw_sanitize(&guc->fw); - guc->mmio_msg = 0; - - return 0; -} - -static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) -{ - spin_lock_irq(&guc->irq_lock); - guc->msg_enabled_mask |= mask; - spin_unlock_irq(&guc->irq_lock); -} - -static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask) -{ - spin_lock_irq(&guc->irq_lock); - guc->msg_enabled_mask &= ~mask; - spin_unlock_irq(&guc->irq_lock); -} - -int intel_guc_reset_engine(struct intel_guc *guc, - struct intel_engine_cs *engine); - -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c deleted file mode 100644 index 69859d1e047f..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "intel_guc_ads.h" -#include "intel_uc.h" -#include "i915_drv.h" - -/* - * The Additional Data Struct (ADS) has pointers for different buffers used by - * the GuC. One single gem object contains the ADS struct itself (guc_ads), the - * scheduling policies (guc_policies), a structure describing a collection of - * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save - * its internal state for sleep. - */ - -static void guc_policy_init(struct guc_policy *policy) -{ - policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US; - policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US; - policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US; - policy->policy_flags = 0; -} - -static void guc_policies_init(struct guc_policies *policies) -{ - struct guc_policy *policy; - u32 p, i; - - policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US; - policies->max_num_work_items = POLICY_MAX_NUM_WI; - - for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { - for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) { - policy = &policies->policy[p][i]; - - guc_policy_init(policy); - } - } - - policies->is_valid = 1; -} - -static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num) -{ - memset(pool, 0, num * sizeof(*pool)); -} - -/* - * The first 80 dwords of the register state context, containing the - * execlists and ppgtt registers. - */ -#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) - -/* The ads obj includes the struct itself and buffers passed to GuC */ -struct __guc_ads_blob { - struct guc_ads ads; - struct guc_policies policies; - struct guc_mmio_reg_state reg_state; - struct guc_gt_system_info system_info; - struct guc_clients_info clients_info; - struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE]; - u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE]; -} __packed; - -static void __guc_ads_init(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct __guc_ads_blob *blob = guc->ads_blob; - const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; - u32 base; - u8 engine_class; - - /* GuC scheduling policies */ - guc_policies_init(&blob->policies); - - /* - * GuC expects a per-engine-class context image and size - * (minus hwsp and ring context). The context image will be - * used to reinitialize engines after a reset. It must exist - * and be pinned in the GGTT, so that the address won't change after - * we have told GuC where to find it. The context size will be used - * to validate that the LRC base + size fall within allowed GGTT. - */ - for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) { - if (engine_class == OTHER_CLASS) - continue; - /* - * TODO: Set context pointer to default state to allow - * GuC to re-init guilty contexts after internal reset. - */ - blob->ads.golden_context_lrca[engine_class] = 0; - blob->ads.eng_state_size[engine_class] = - intel_engine_context_size(dev_priv, engine_class) - - skipped_size; - } - - /* System info */ - blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask); - blob->system_info.rcs_enabled = 1; - blob->system_info.bcs_enabled = 1; - - blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv); - blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv); - blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; - - base = intel_guc_ggtt_offset(guc, guc->ads_vma); - - /* Clients info */ - guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool)); - - blob->clients_info.clients_num = 1; - blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool); - blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool); - - /* ADS */ - blob->ads.scheduler_policies = base + ptr_offset(blob, policies); - blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer); - blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); - blob->ads.gt_system_info = base + ptr_offset(blob, system_info); - blob->ads.clients_info = base + ptr_offset(blob, clients_info); - - i915_gem_object_flush_map(guc->ads_vma->obj); -} - -/** - * intel_guc_ads_create() - allocates and initializes GuC ADS. - * @guc: intel_guc struct - * - * GuC needs memory block (Additional Data Struct), where it will store - * some data. Allocate and initialize such memory block for GuC use. - */ -int intel_guc_ads_create(struct intel_guc *guc) -{ - const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob)); - struct i915_vma *vma; - void *blob; - int ret; - - GEM_BUG_ON(guc->ads_vma); - - vma = intel_guc_allocate_vma(guc, size); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(blob)) { - ret = PTR_ERR(blob); - goto err_vma; - } - - guc->ads_vma = vma; - guc->ads_blob = blob; - - __guc_ads_init(guc); - - return 0; - -err_vma: - i915_vma_unpin_and_release(&guc->ads_vma, 0); - return ret; -} - -void intel_guc_ads_destroy(struct intel_guc *guc) -{ - i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP); -} - -/** - * intel_guc_ads_reset() - prepares GuC Additional Data Struct for reuse - * @guc: intel_guc struct - * - * GuC stores some data in ADS, which might be stale after a reset. - * Reinitialize whole ADS in case any part of it was corrupted during - * previous GuC run. - */ -void intel_guc_ads_reset(struct intel_guc *guc) -{ - if (!guc->ads_vma) - return; - __guc_ads_init(guc); -} diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/intel_guc_ads.h deleted file mode 100644 index 7f40f9cd5fb9..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_ads.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef _INTEL_GUC_ADS_H_ -#define _INTEL_GUC_ADS_H_ - -struct intel_guc; - -int intel_guc_ads_create(struct intel_guc *guc); -void intel_guc_ads_destroy(struct intel_guc *guc); -void intel_guc_ads_reset(struct intel_guc *guc); - -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c deleted file mode 100644 index 9e383a47609f..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ /dev/null @@ -1,915 +0,0 @@ -/* - * Copyright © 2016-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include "i915_drv.h" -#include "intel_guc_ct.h" - -#ifdef CONFIG_DRM_I915_DEBUG_GUC -#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__) -#else -#define CT_DEBUG_DRIVER(...) do { } while (0) -#endif - -struct ct_request { - struct list_head link; - u32 fence; - u32 status; - u32 response_len; - u32 *response_buf; -}; - -struct ct_incoming_request { - struct list_head link; - u32 msg[]; -}; - -enum { CTB_SEND = 0, CTB_RECV = 1 }; - -enum { CTB_OWNER_HOST = 0 }; - -static void ct_incoming_request_worker_func(struct work_struct *w); - -/** - * intel_guc_ct_init_early - Initialize CT state without requiring device access - * @ct: pointer to CT struct - */ -void intel_guc_ct_init_early(struct intel_guc_ct *ct) -{ - /* we're using static channel owners */ - ct->host_channel.owner = CTB_OWNER_HOST; - - spin_lock_init(&ct->lock); - INIT_LIST_HEAD(&ct->pending_requests); - INIT_LIST_HEAD(&ct->incoming_requests); - INIT_WORK(&ct->worker, ct_incoming_request_worker_func); -} - -static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct) -{ - return container_of(ct, struct intel_guc, ct); -} - -static inline const char *guc_ct_buffer_type_to_str(u32 type) -{ - switch (type) { - case INTEL_GUC_CT_BUFFER_TYPE_SEND: - return "SEND"; - case INTEL_GUC_CT_BUFFER_TYPE_RECV: - return "RECV"; - default: - return ""; - } -} - -static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc, - u32 cmds_addr, u32 size, u32 owner) -{ - CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n", - desc, cmds_addr, size, owner); - memset(desc, 0, sizeof(*desc)); - desc->addr = cmds_addr; - desc->size = size; - desc->owner = owner; -} - -static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc) -{ - CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n", - desc, desc->head, desc->tail); - desc->head = 0; - desc->tail = 0; - desc->is_in_error = 0; -} - -static int guc_action_register_ct_buffer(struct intel_guc *guc, - u32 desc_addr, - u32 type) -{ - u32 action[] = { - INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER, - desc_addr, - sizeof(struct guc_ct_buffer_desc), - type - }; - int err; - - /* Can't use generic send(), CT registration must go over MMIO */ - err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); - if (err) - DRM_ERROR("CT: register %s buffer failed; err=%d\n", - guc_ct_buffer_type_to_str(type), err); - return err; -} - -static int guc_action_deregister_ct_buffer(struct intel_guc *guc, - u32 owner, - u32 type) -{ - u32 action[] = { - INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER, - owner, - type - }; - int err; - - /* Can't use generic send(), CT deregistration must go over MMIO */ - err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0); - if (err) - DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n", - guc_ct_buffer_type_to_str(type), owner, err); - return err; -} - -static int ctch_init(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) -{ - struct i915_vma *vma; - void *blob; - int err; - int i; - - GEM_BUG_ON(ctch->vma); - - /* We allocate 1 page to hold both descriptors and both buffers. - * ___________..................... - * |desc (SEND)| : - * |___________| PAGE/4 - * :___________....................: - * |desc (RECV)| : - * |___________| PAGE/4 - * :_______________________________: - * |cmds (SEND) | - * | PAGE/4 - * |_______________________________| - * |cmds (RECV) | - * | PAGE/4 - * |_______________________________| - * - * Each message can use a maximum of 32 dwords and we don't expect to - * have more than 1 in flight at any time, so we have enough space. - * Some logic further ahead will rely on the fact that there is only 1 - * page and that it is always mapped, so if the size is changed the - * other code will need updating as well. - */ - - /* allocate vma */ - vma = intel_guc_allocate_vma(guc, PAGE_SIZE); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_out; - } - ctch->vma = vma; - - /* map first page */ - blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(blob)) { - err = PTR_ERR(blob); - goto err_vma; - } - CT_DEBUG_DRIVER("CT: vma base=%#x\n", - intel_guc_ggtt_offset(guc, ctch->vma)); - - /* store pointers to desc and cmds */ - for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { - GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i; - ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2; - } - - return 0; - -err_vma: - i915_vma_unpin_and_release(&ctch->vma, 0); -err_out: - CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n", - ctch->owner, err); - return err; -} - -static void ctch_fini(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) -{ - GEM_BUG_ON(ctch->enabled); - - i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); -} - -static int ctch_enable(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) -{ - u32 base; - int err; - int i; - - GEM_BUG_ON(!ctch->vma); - - GEM_BUG_ON(ctch->enabled); - - /* vma should be already allocated and map'ed */ - base = intel_guc_ggtt_offset(guc, ctch->vma); - - /* (re)initialize descriptors - * cmds buffers are in the second half of the blob page - */ - for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) { - GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV)); - guc_ct_buffer_desc_init(ctch->ctbs[i].desc, - base + PAGE_SIZE/4 * i + PAGE_SIZE/2, - PAGE_SIZE/4, - ctch->owner); - } - - /* register buffers, starting wirh RECV buffer - * descriptors are in first half of the blob - */ - err = guc_action_register_ct_buffer(guc, - base + PAGE_SIZE/4 * CTB_RECV, - INTEL_GUC_CT_BUFFER_TYPE_RECV); - if (unlikely(err)) - goto err_out; - - err = guc_action_register_ct_buffer(guc, - base + PAGE_SIZE/4 * CTB_SEND, - INTEL_GUC_CT_BUFFER_TYPE_SEND); - if (unlikely(err)) - goto err_deregister; - - ctch->enabled = true; - - return 0; - -err_deregister: - guc_action_deregister_ct_buffer(guc, - ctch->owner, - INTEL_GUC_CT_BUFFER_TYPE_RECV); -err_out: - DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); - return err; -} - -static void ctch_disable(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) -{ - GEM_BUG_ON(!ctch->enabled); - - ctch->enabled = false; - - guc_action_deregister_ct_buffer(guc, - ctch->owner, - INTEL_GUC_CT_BUFFER_TYPE_SEND); - guc_action_deregister_ct_buffer(guc, - ctch->owner, - INTEL_GUC_CT_BUFFER_TYPE_RECV); -} - -static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) -{ - /* For now it's trivial */ - return ++ctch->next_fence; -} - -/** - * DOC: CTB Host to GuC request - * - * Format of the CTB Host to GuC request message is as follows:: - * - * +------------+---------+---------+---------+---------+ - * | msg[0] | [1] | [2] | ... | [n-1] | - * +------------+---------+---------+---------+---------+ - * | MESSAGE | MESSAGE PAYLOAD | - * + HEADER +---------+---------+---------+---------+ - * | | 0 | 1 | ... | n | - * +============+=========+=========+=========+=========+ - * | len >= 1 | FENCE | request specific data | - * +------+-----+---------+---------+---------+---------+ - * - * ^-----------------len-------------------^ - */ - -static int ctb_write(struct intel_guc_ct_buffer *ctb, - const u32 *action, - u32 len /* in dwords */, - u32 fence, - bool want_response) -{ - struct guc_ct_buffer_desc *desc = ctb->desc; - u32 head = desc->head / 4; /* in dwords */ - u32 tail = desc->tail / 4; /* in dwords */ - u32 size = desc->size / 4; /* in dwords */ - u32 used; /* in dwords */ - u32 header; - u32 *cmds = ctb->cmds; - unsigned int i; - - GEM_BUG_ON(desc->size % 4); - GEM_BUG_ON(desc->head % 4); - GEM_BUG_ON(desc->tail % 4); - GEM_BUG_ON(tail >= size); - - /* - * tail == head condition indicates empty. GuC FW does not support - * using up the entire buffer to get tail == head meaning full. - */ - if (tail < head) - used = (size - head) + tail; - else - used = tail - head; - - /* make sure there is a space including extra dw for the fence */ - if (unlikely(used + len + 1 >= size)) - return -ENOSPC; - - /* - * Write the message. The format is the following: - * DW0: header (including action code) - * DW1: fence - * DW2+: action data - */ - header = (len << GUC_CT_MSG_LEN_SHIFT) | - (GUC_CT_MSG_WRITE_FENCE_TO_DESC) | - (want_response ? GUC_CT_MSG_SEND_STATUS : 0) | - (action[0] << GUC_CT_MSG_ACTION_SHIFT); - - CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n", - 4, &header, 4, &fence, - 4 * (len - 1), &action[1]); - - cmds[tail] = header; - tail = (tail + 1) % size; - - cmds[tail] = fence; - tail = (tail + 1) % size; - - for (i = 1; i < len; i++) { - cmds[tail] = action[i]; - tail = (tail + 1) % size; - } - - /* now update desc tail (back in bytes) */ - desc->tail = tail * 4; - GEM_BUG_ON(desc->tail > desc->size); - - return 0; -} - -/** - * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update. - * @desc: buffer descriptor - * @fence: response fence - * @status: placeholder for status - * - * Guc will update CT buffer descriptor with new fence and status - * after processing the command identified by the fence. Wait for - * specified fence and then read from the descriptor status of the - * command. - * - * Return: - * * 0 response received (status is valid) - * * -ETIMEDOUT no response within hardcoded timeout - * * -EPROTO no response, CT buffer is in error - */ -static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc, - u32 fence, - u32 *status) -{ - int err; - - /* - * Fast commands should complete in less than 10us, so sample quickly - * up to that length of time, then switch to a slower sleep-wait loop. - * No GuC command should ever take longer than 10ms. - */ -#define done (READ_ONCE(desc->fence) == fence) - err = wait_for_us(done, 10); - if (err) - err = wait_for(done, 10); -#undef done - - if (unlikely(err)) { - DRM_ERROR("CT: fence %u failed; reported fence=%u\n", - fence, desc->fence); - - if (WARN_ON(desc->is_in_error)) { - /* Something went wrong with the messaging, try to reset - * the buffer and hope for the best - */ - guc_ct_buffer_desc_reset(desc); - err = -EPROTO; - } - } - - *status = desc->status; - return err; -} - -/** - * wait_for_ct_request_update - Wait for CT request state update. - * @req: pointer to pending request - * @status: placeholder for status - * - * For each sent request, Guc shall send bac CT response message. - * Our message handler will update status of tracked request once - * response message with given fence is received. Wait here and - * check for valid response status value. - * - * Return: - * * 0 response received (status is valid) - * * -ETIMEDOUT no response within hardcoded timeout - */ -static int wait_for_ct_request_update(struct ct_request *req, u32 *status) -{ - int err; - - /* - * Fast commands should complete in less than 10us, so sample quickly - * up to that length of time, then switch to a slower sleep-wait loop. - * No GuC command should ever take longer than 10ms. - */ -#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status)) - err = wait_for_us(done, 10); - if (err) - err = wait_for(done, 10); -#undef done - - if (unlikely(err)) - DRM_ERROR("CT: fence %u err %d\n", req->fence, err); - - *status = req->status; - return err; -} - -static int ctch_send(struct intel_guc_ct *ct, - struct intel_guc_ct_channel *ctch, - const u32 *action, - u32 len, - u32 *response_buf, - u32 response_buf_size, - u32 *status) -{ - struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND]; - struct guc_ct_buffer_desc *desc = ctb->desc; - struct ct_request request; - unsigned long flags; - u32 fence; - int err; - - GEM_BUG_ON(!ctch->enabled); - GEM_BUG_ON(!len); - GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); - GEM_BUG_ON(!response_buf && response_buf_size); - - fence = ctch_get_next_fence(ctch); - request.fence = fence; - request.status = 0; - request.response_len = response_buf_size; - request.response_buf = response_buf; - - spin_lock_irqsave(&ct->lock, flags); - list_add_tail(&request.link, &ct->pending_requests); - spin_unlock_irqrestore(&ct->lock, flags); - - err = ctb_write(ctb, action, len, fence, !!response_buf); - if (unlikely(err)) - goto unlink; - - intel_guc_notify(ct_to_guc(ct)); - - if (response_buf) - err = wait_for_ct_request_update(&request, status); - else - err = wait_for_ctb_desc_update(desc, fence, status); - if (unlikely(err)) - goto unlink; - - if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) { - err = -EIO; - goto unlink; - } - - if (response_buf) { - /* There shall be no data in the status */ - WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status)); - /* Return actual response len */ - err = request.response_len; - } else { - /* There shall be no response payload */ - WARN_ON(request.response_len); - /* Return data decoded from the status dword */ - err = INTEL_GUC_MSG_TO_DATA(*status); - } - -unlink: - spin_lock_irqsave(&ct->lock, flags); - list_del(&request.link); - spin_unlock_irqrestore(&ct->lock, flags); - - return err; -} - -/* - * Command Transport (CT) buffer based GuC send function. - */ -int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size) -{ - struct intel_guc_ct *ct = &guc->ct; - struct intel_guc_ct_channel *ctch = &ct->host_channel; - u32 status = ~0; /* undefined */ - int ret; - - mutex_lock(&guc->send_mutex); - - ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size, - &status); - if (unlikely(ret < 0)) { - DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n", - action[0], ret, status); - } else if (unlikely(ret)) { - CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n", - action[0], ret, ret); - } - - mutex_unlock(&guc->send_mutex); - return ret; -} - -static inline unsigned int ct_header_get_len(u32 header) -{ - return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK; -} - -static inline unsigned int ct_header_get_action(u32 header) -{ - return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK; -} - -static inline bool ct_header_is_response(u32 header) -{ - return !!(header & GUC_CT_MSG_IS_RESPONSE); -} - -static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data) -{ - struct guc_ct_buffer_desc *desc = ctb->desc; - u32 head = desc->head / 4; /* in dwords */ - u32 tail = desc->tail / 4; /* in dwords */ - u32 size = desc->size / 4; /* in dwords */ - u32 *cmds = ctb->cmds; - s32 available; /* in dwords */ - unsigned int len; - unsigned int i; - - GEM_BUG_ON(desc->size % 4); - GEM_BUG_ON(desc->head % 4); - GEM_BUG_ON(desc->tail % 4); - GEM_BUG_ON(tail >= size); - GEM_BUG_ON(head >= size); - - /* tail == head condition indicates empty */ - available = tail - head; - if (unlikely(available == 0)) - return -ENODATA; - - /* beware of buffer wrap case */ - if (unlikely(available < 0)) - available += size; - CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail); - GEM_BUG_ON(available < 0); - - data[0] = cmds[head]; - head = (head + 1) % size; - - /* message len with header */ - len = ct_header_get_len(data[0]) + 1; - if (unlikely(len > (u32)available)) { - DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n", - 4, data, - 4 * (head + available - 1 > size ? - size - head : available - 1), &cmds[head], - 4 * (head + available - 1 > size ? - available - 1 - size + head : 0), &cmds[0]); - return -EPROTO; - } - - for (i = 1; i < len; i++) { - data[i] = cmds[head]; - head = (head + 1) % size; - } - CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data); - - desc->head = head * 4; - return 0; -} - -/** - * DOC: CTB GuC to Host response - * - * Format of the CTB GuC to Host response message is as follows:: - * - * +------------+---------+---------+---------+---------+---------+ - * | msg[0] | [1] | [2] | [3] | ... | [n-1] | - * +------------+---------+---------+---------+---------+---------+ - * | MESSAGE | MESSAGE PAYLOAD | - * + HEADER +---------+---------+---------+---------+---------+ - * | | 0 | 1 | 2 | ... | n | - * +============+=========+=========+=========+=========+=========+ - * | len >= 2 | FENCE | STATUS | response specific data | - * +------+-----+---------+---------+---------+---------+---------+ - * - * ^-----------------------len-----------------------^ - */ - -static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg) -{ - u32 header = msg[0]; - u32 len = ct_header_get_len(header); - u32 msglen = len + 1; /* total message length including header */ - u32 fence; - u32 status; - u32 datalen; - struct ct_request *req; - bool found = false; - - GEM_BUG_ON(!ct_header_is_response(header)); - GEM_BUG_ON(!in_irq()); - - /* Response payload shall at least include fence and status */ - if (unlikely(len < 2)) { - DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); - return -EPROTO; - } - - fence = msg[1]; - status = msg[2]; - datalen = len - 2; - - /* Format of the status follows RESPONSE message */ - if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) { - DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg); - return -EPROTO; - } - - CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status); - - spin_lock(&ct->lock); - list_for_each_entry(req, &ct->pending_requests, link) { - if (unlikely(fence != req->fence)) { - CT_DEBUG_DRIVER("CT: request %u awaits response\n", - req->fence); - continue; - } - if (unlikely(datalen > req->response_len)) { - DRM_ERROR("CT: response %u too long %*ph\n", - req->fence, 4 * msglen, msg); - datalen = 0; - } - if (datalen) - memcpy(req->response_buf, msg + 3, 4 * datalen); - req->response_len = datalen; - WRITE_ONCE(req->status, status); - found = true; - break; - } - spin_unlock(&ct->lock); - - if (!found) - DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg); - return 0; -} - -static void ct_process_request(struct intel_guc_ct *ct, - u32 action, u32 len, const u32 *payload) -{ - struct intel_guc *guc = ct_to_guc(ct); - int ret; - - CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload); - - switch (action) { - case INTEL_GUC_ACTION_DEFAULT: - ret = intel_guc_to_host_process_recv_msg(guc, payload, len); - if (unlikely(ret)) - goto fail_unexpected; - break; - - default: -fail_unexpected: - DRM_ERROR("CT: unexpected request %x %*ph\n", - action, 4 * len, payload); - break; - } -} - -static bool ct_process_incoming_requests(struct intel_guc_ct *ct) -{ - unsigned long flags; - struct ct_incoming_request *request; - u32 header; - u32 *payload; - bool done; - - spin_lock_irqsave(&ct->lock, flags); - request = list_first_entry_or_null(&ct->incoming_requests, - struct ct_incoming_request, link); - if (request) - list_del(&request->link); - done = !!list_empty(&ct->incoming_requests); - spin_unlock_irqrestore(&ct->lock, flags); - - if (!request) - return true; - - header = request->msg[0]; - payload = &request->msg[1]; - ct_process_request(ct, - ct_header_get_action(header), - ct_header_get_len(header), - payload); - - kfree(request); - return done; -} - -static void ct_incoming_request_worker_func(struct work_struct *w) -{ - struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker); - bool done; - - done = ct_process_incoming_requests(ct); - if (!done) - queue_work(system_unbound_wq, &ct->worker); -} - -/** - * DOC: CTB GuC to Host request - * - * Format of the CTB GuC to Host request message is as follows:: - * - * +------------+---------+---------+---------+---------+---------+ - * | msg[0] | [1] | [2] | [3] | ... | [n-1] | - * +------------+---------+---------+---------+---------+---------+ - * | MESSAGE | MESSAGE PAYLOAD | - * + HEADER +---------+---------+---------+---------+---------+ - * | | 0 | 1 | 2 | ... | n | - * +============+=========+=========+=========+=========+=========+ - * | len | request specific data | - * +------+-----+---------+---------+---------+---------+---------+ - * - * ^-----------------------len-----------------------^ - */ - -static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg) -{ - u32 header = msg[0]; - u32 len = ct_header_get_len(header); - u32 msglen = len + 1; /* total message length including header */ - struct ct_incoming_request *request; - unsigned long flags; - - GEM_BUG_ON(ct_header_is_response(header)); - - request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC); - if (unlikely(!request)) { - DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg); - return 0; /* XXX: -ENOMEM ? */ - } - memcpy(request->msg, msg, 4 * msglen); - - spin_lock_irqsave(&ct->lock, flags); - list_add_tail(&request->link, &ct->incoming_requests); - spin_unlock_irqrestore(&ct->lock, flags); - - queue_work(system_unbound_wq, &ct->worker); - return 0; -} - -static void ct_process_host_channel(struct intel_guc_ct *ct) -{ - struct intel_guc_ct_channel *ctch = &ct->host_channel; - struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV]; - u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ - int err = 0; - - if (!ctch->enabled) - return; - - do { - err = ctb_read(ctb, msg); - if (err) - break; - - if (ct_header_is_response(msg[0])) - err = ct_handle_response(ct, msg); - else - err = ct_handle_request(ct, msg); - } while (!err); - - if (GEM_WARN_ON(err == -EPROTO)) { - DRM_ERROR("CT: corrupted message detected!\n"); - ctb->desc->is_in_error = 1; - } -} - -/* - * When we're communicating with the GuC over CT, GuC uses events - * to notify us about new messages being posted on the RECV buffer. - */ -void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) -{ - struct intel_guc_ct *ct = &guc->ct; - - ct_process_host_channel(ct); -} - -/** - * intel_guc_ct_init - Init CT communication - * @ct: pointer to CT struct - * - * Allocate memory required for communication via - * the CT channel. - * - * Return: 0 on success, a negative errno code on failure. - */ -int intel_guc_ct_init(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - int err; - - err = ctch_init(guc, ctch); - if (unlikely(err)) { - DRM_ERROR("CT: can't open channel %d; err=%d\n", - ctch->owner, err); - return err; - } - - GEM_BUG_ON(!ctch->vma); - return 0; -} - -/** - * intel_guc_ct_fini - Fini CT communication - * @ct: pointer to CT struct - * - * Deallocate memory required for communication via - * the CT channel. - */ -void intel_guc_ct_fini(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - - ctch_fini(guc, ctch); -} - -/** - * intel_guc_ct_enable - Enable buffer based command transport. - * @ct: pointer to CT struct - * - * Return: 0 on success, a negative errno code on failure. - */ -int intel_guc_ct_enable(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - - if (ctch->enabled) - return 0; - - return ctch_enable(guc, ctch); -} - -/** - * intel_guc_ct_disable - Disable buffer based command transport. - * @ct: pointer to CT struct - */ -void intel_guc_ct_disable(struct intel_guc_ct *ct) -{ - struct intel_guc *guc = ct_to_guc(ct); - struct intel_guc_ct_channel *ctch = &ct->host_channel; - - if (!ctch->enabled) - return; - - ctch_disable(guc, ctch); -} diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h deleted file mode 100644 index 8c1f6d133168..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright © 2016-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _INTEL_GUC_CT_H_ -#define _INTEL_GUC_CT_H_ - -#include -#include - -#include "intel_guc_fwif.h" - -struct i915_vma; -struct intel_guc; - -/** - * DOC: Command Transport (CT). - * - * Buffer based command transport is a replacement for MMIO based mechanism. - * It can be used to perform both host-2-guc and guc-to-host communication. - */ - -/** Represents single command transport buffer. - * - * A single command transport buffer consists of two parts, the header - * record (command transport buffer descriptor) and the actual buffer which - * holds the commands. - * - * @desc: pointer to the buffer descriptor - * @cmds: pointer to the commands buffer - */ -struct intel_guc_ct_buffer { - struct guc_ct_buffer_desc *desc; - u32 *cmds; -}; - -/** Represents pair of command transport buffers. - * - * Buffers go in pairs to allow bi-directional communication. - * To simplify the code we place both of them in the same vma. - * Buffers from the same pair must share unique owner id. - * - * @vma: pointer to the vma with pair of CT buffers - * @ctbs: buffers for sending(0) and receiving(1) commands - * @owner: unique identifier - * @next_fence: fence to be used with next send command - */ -struct intel_guc_ct_channel { - struct i915_vma *vma; - struct intel_guc_ct_buffer ctbs[2]; - u32 owner; - u32 next_fence; - bool enabled; -}; - -/** Holds all command transport channels. - * - * @host_channel: main channel used by the host - */ -struct intel_guc_ct { - struct intel_guc_ct_channel host_channel; - /* other channels are tbd */ - - /** @lock: protects pending requests list */ - spinlock_t lock; - - /** @pending_requests: list of requests waiting for response */ - struct list_head pending_requests; - - /** @incoming_requests: list of incoming requests */ - struct list_head incoming_requests; - - /** @worker: worker for handling incoming requests */ - struct work_struct worker; -}; - -void intel_guc_ct_init_early(struct intel_guc_ct *ct); -int intel_guc_ct_init(struct intel_guc_ct *ct); -void intel_guc_ct_fini(struct intel_guc_ct *ct); -int intel_guc_ct_enable(struct intel_guc_ct *ct); -void intel_guc_ct_disable(struct intel_guc_ct *ct); - -static inline void intel_guc_ct_stop(struct intel_guc_ct *ct) -{ - ct->host_channel.enabled = false; -} - -int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len, - u32 *response_buf, u32 response_buf_size); -void intel_guc_to_host_event_handler_ct(struct intel_guc *guc); - -#endif /* _INTEL_GUC_CT_H_ */ diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c deleted file mode 100644 index ee95d4960c5c..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Vinit Azad - * Ben Widawsky - * Dave Gordon - * Alex Dai - */ - -#include "intel_guc_fw.h" -#include "i915_drv.h" - -#define __MAKE_GUC_FW_PATH(KEY) \ - "i915/" \ - __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \ - __stringify(KEY##_GUC_FW_MAJOR) "." \ - __stringify(KEY##_GUC_FW_MINOR) "." \ - __stringify(KEY##_GUC_FW_PATCH) ".bin" - -#define SKL_GUC_FW_PREFIX skl -#define SKL_GUC_FW_MAJOR 33 -#define SKL_GUC_FW_MINOR 0 -#define SKL_GUC_FW_PATCH 0 -#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL) -MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH); - -#define BXT_GUC_FW_PREFIX bxt -#define BXT_GUC_FW_MAJOR 33 -#define BXT_GUC_FW_MINOR 0 -#define BXT_GUC_FW_PATCH 0 -#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT) -MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH); - -#define KBL_GUC_FW_PREFIX kbl -#define KBL_GUC_FW_MAJOR 33 -#define KBL_GUC_FW_MINOR 0 -#define KBL_GUC_FW_PATCH 0 -#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL) -MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); - -#define GLK_GUC_FW_PREFIX glk -#define GLK_GUC_FW_MAJOR 33 -#define GLK_GUC_FW_MINOR 0 -#define GLK_GUC_FW_PATCH 0 -#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK) -MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH); - -#define ICL_GUC_FW_PREFIX icl -#define ICL_GUC_FW_MAJOR 33 -#define ICL_GUC_FW_MINOR 0 -#define ICL_GUC_FW_PATCH 0 -#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL) -MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH); - -static void guc_fw_select(struct intel_uc_fw *guc_fw) -{ - struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *i915 = guc_to_i915(guc); - - GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - - if (!HAS_GUC(i915)) { - guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - return; - } - - guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; - - if (i915_modparams.guc_firmware_path) { - guc_fw->path = i915_modparams.guc_firmware_path; - guc_fw->major_ver_wanted = 0; - guc_fw->minor_ver_wanted = 0; - } else if (IS_ICELAKE(i915)) { - guc_fw->path = ICL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR; - } else if (IS_GEMINILAKE(i915)) { - guc_fw->path = GLK_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR; - } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { - guc_fw->path = KBL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR; - } else if (IS_BROXTON(i915)) { - guc_fw->path = BXT_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR; - } else if (IS_SKYLAKE(i915)) { - guc_fw->path = SKL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR; - } -} - -/** - * intel_guc_fw_init_early() - initializes GuC firmware struct - * @guc: intel_guc struct - * - * On platforms with GuC selects firmware for uploading - */ -void intel_guc_fw_init_early(struct intel_guc *guc) -{ - struct intel_uc_fw *guc_fw = &guc->fw; - - intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC); - guc_fw_select(guc_fw); -} - -static void guc_prepare_xfer(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - /* Must program this register before loading the ucode with DMA */ - I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES | - GUC_ENABLE_READ_CACHE_LOGIC | - GUC_ENABLE_MIA_CACHING | - GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | - GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | - GUC_ENABLE_MIA_CLOCK_GATING); - - if (IS_GEN9_LP(dev_priv)) - I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - else - I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - - if (IS_GEN(dev_priv, 9)) { - /* DOP Clock Gating Enable for GuC clocks */ - I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | - I915_READ(GEN7_MISCCPCTL))); - - /* allows for 5us (in 10ns units) before GT can go to RC6 */ - I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); - } -} - -/* Copy RSA signature from the fw image to HW for verification */ -static void guc_xfer_rsa(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uc_fw *fw = &guc->fw; - struct sg_table *pages = fw->obj->mm.pages; - u32 rsa[UOS_RSA_SCRATCH_COUNT]; - int i; - - sg_pcopy_to_buffer(pages->sgl, pages->nents, - rsa, sizeof(rsa), fw->rsa_offset); - - for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) - I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); -} - -static bool guc_xfer_completed(struct intel_guc *guc, u32 *status) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - /* Did we complete the xfer? */ - *status = I915_READ(DMA_CTRL); - return !(*status & START_DMA); -} - -/* - * Read the GuC status register (GUC_STATUS) and store it in the - * specified location; then return a boolean indicating whether - * the value matches either of two values representing completion - * of the GuC boot process. - * - * This is used for polling the GuC status in a wait_for() - * loop below. - */ -static inline bool guc_ready(struct intel_guc *guc, u32 *status) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 val = I915_READ(GUC_STATUS); - u32 uk_val = val & GS_UKERNEL_MASK; - - *status = val; - return (uk_val == GS_UKERNEL_READY) || - ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); -} - -static int guc_wait_ucode(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - u32 status; - int ret; - - /* - * Wait for the GuC to start up. - * NB: Docs recommend not using the interrupt for completion. - * Measurements indicate this should take no more than 20ms, so a - * timeout here indicates that the GuC has failed and is unusable. - * (Higher levels of the driver may decide to reset the GuC and - * attempt the ucode load again if this happens.) - */ - ret = wait_for(guc_ready(guc, &status), 100); - DRM_DEBUG_DRIVER("GuC status %#x\n", status); - - if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { - DRM_ERROR("GuC firmware signature verification failed\n"); - ret = -ENOEXEC; - } - - if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { - DRM_ERROR("GuC firmware exception. EIP: %#x\n", - intel_uncore_read(&i915->uncore, SOFT_SCRATCH(13))); - ret = -ENXIO; - } - - if (ret == 0 && !guc_xfer_completed(guc, &status)) { - DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", - status); - ret = -ENXIO; - } - - return ret; -} - -/* - * Transfer the firmware image to RAM for execution by the microcontroller. - * - * Architecturally, the DMA engine is bidirectional, and can potentially even - * transfer between GTT locations. This functionality is left out of the API - * for now as there is no need for it. - */ -static int guc_xfer_ucode(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uc_fw *guc_fw = &guc->fw; - unsigned long offset; - - /* - * The header plus uCode will be copied to WOPCM via DMA, excluding any - * other components - */ - I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); - - /* Set the source address for the new blob */ - offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset; - I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); - I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); - - /* - * Set the DMA destination. Current uCode expects the code to be - * loaded at 8k; locations below this are used for the stack. - */ - I915_WRITE(DMA_ADDR_1_LOW, 0x2000); - I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); - - /* Finally start the DMA */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); - - return guc_wait_ucode(guc); -} -/* - * Load the GuC firmware blob into the MinuteIA. - */ -static int guc_fw_xfer(struct intel_uc_fw *guc_fw) -{ - struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *dev_priv = guc_to_i915(guc); - int ret; - - GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - guc_prepare_xfer(guc); - - /* - * Note that GuC needs the CSS header plus uKernel code to be copied - * by the DMA engine in one operation, whereas the RSA signature is - * loaded via MMIO. - */ - guc_xfer_rsa(guc); - - ret = guc_xfer_ucode(guc); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - - return ret; -} - -/** - * intel_guc_fw_upload() - load GuC uCode to device - * @guc: intel_guc structure - * - * Called from intel_uc_init_hw() during driver load, resume from sleep and - * after a GPU reset. - * - * The firmware image should have already been fetched into memory, so only - * check that fetch succeeded, and then transfer the image to the h/w. - * - * Return: non-zero code on error - */ -int intel_guc_fw_upload(struct intel_guc *guc) -{ - return intel_uc_fw_upload(&guc->fw, guc_fw_xfer); -} diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h deleted file mode 100644 index 4ec5d3d9e2b0..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_fw.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef _INTEL_GUC_FW_H_ -#define _INTEL_GUC_FW_H_ - -struct intel_guc; - -void intel_guc_fw_init_early(struct intel_guc *guc); -int intel_guc_fw_upload(struct intel_guc *guc); - -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h deleted file mode 100644 index 30cca3a29323..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ /dev/null @@ -1,691 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ -#ifndef _INTEL_GUC_FWIF_H -#define _INTEL_GUC_FWIF_H - -#include -#include -#include - -#define GUC_CLIENT_PRIORITY_KMD_HIGH 0 -#define GUC_CLIENT_PRIORITY_HIGH 1 -#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2 -#define GUC_CLIENT_PRIORITY_NORMAL 3 -#define GUC_CLIENT_PRIORITY_NUM 4 - -#define GUC_MAX_STAGE_DESCRIPTORS 1024 -#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS - -#define GUC_RENDER_ENGINE 0 -#define GUC_VIDEO_ENGINE 1 -#define GUC_BLITTER_ENGINE 2 -#define GUC_VIDEOENHANCE_ENGINE 3 -#define GUC_VIDEO_ENGINE2 4 -#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) - -#define GUC_MAX_ENGINE_CLASSES 5 -#define GUC_MAX_INSTANCES_PER_CLASS 16 - -#define GUC_DOORBELL_INVALID 256 - -#define GUC_DB_SIZE (PAGE_SIZE) -#define GUC_WQ_SIZE (PAGE_SIZE * 2) - -/* Work queue item header definitions */ -#define WQ_STATUS_ACTIVE 1 -#define WQ_STATUS_SUSPENDED 2 -#define WQ_STATUS_CMD_ERROR 3 -#define WQ_STATUS_ENGINE_ID_NOT_USED 4 -#define WQ_STATUS_SUSPENDED_FROM_RESET 5 -#define WQ_TYPE_SHIFT 0 -#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT) -#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT) -#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT) -#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT) -#define WQ_TARGET_SHIFT 10 -#define WQ_LEN_SHIFT 16 -#define WQ_NO_WCFLUSH_WAIT (1 << 27) -#define WQ_PRESENT_WORKLOAD (1 << 28) - -#define WQ_RING_TAIL_SHIFT 20 -#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ -#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) - -#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) -#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) -#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) -#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3) -#define GUC_STAGE_DESC_ATTR_RESET BIT(4) -#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5) -#define GUC_STAGE_DESC_ATTR_PCH BIT(6) -#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7) - -/* New GuC control data */ -#define GUC_CTL_CTXINFO 0 -#define GUC_CTL_CTXNUM_IN16_SHIFT 0 -#define GUC_CTL_BASE_ADDR_SHIFT 12 - -#define GUC_CTL_LOG_PARAMS 1 -#define GUC_LOG_VALID (1 << 0) -#define GUC_LOG_NOTIFY_ON_HALF_FULL (1 << 1) -#define GUC_LOG_ALLOC_IN_MEGABYTE (1 << 3) -#define GUC_LOG_CRASH_SHIFT 4 -#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT) -#define GUC_LOG_DPC_SHIFT 6 -#define GUC_LOG_DPC_MASK (0x7 << GUC_LOG_DPC_SHIFT) -#define GUC_LOG_ISR_SHIFT 9 -#define GUC_LOG_ISR_MASK (0x7 << GUC_LOG_ISR_SHIFT) -#define GUC_LOG_BUF_ADDR_SHIFT 12 - -#define GUC_CTL_WA 2 -#define GUC_CTL_FEATURE 3 -#define GUC_CTL_DISABLE_SCHEDULER (1 << 14) - -#define GUC_CTL_DEBUG 4 -#define GUC_LOG_VERBOSITY_SHIFT 0 -#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT) -#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT) -#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT) -#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT) -/* Verbosity range-check limits, without the shift */ -#define GUC_LOG_VERBOSITY_MIN 0 -#define GUC_LOG_VERBOSITY_MAX 3 -#define GUC_LOG_VERBOSITY_MASK 0x0000000f -#define GUC_LOG_DESTINATION_MASK (3 << 4) -#define GUC_LOG_DISABLED (1 << 6) -#define GUC_PROFILE_ENABLED (1 << 7) - -#define GUC_CTL_ADS 5 -#define GUC_ADS_ADDR_SHIFT 1 -#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT) - -#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ - -/** - * DOC: GuC Firmware Layout - * - * The GuC firmware layout looks like this: - * - * +-------------------------------+ - * | uc_css_header | - * | | - * | contains major/minor version | - * +-------------------------------+ - * | uCode | - * +-------------------------------+ - * | RSA signature | - * +-------------------------------+ - * | modulus key | - * +-------------------------------+ - * | exponent val | - * +-------------------------------+ - * - * The firmware may or may not have modulus key and exponent data. The header, - * uCode and RSA signature are must-have components that will be used by driver. - * Length of each components, which is all in dwords, can be found in header. - * In the case that modulus and exponent are not present in fw, a.k.a truncated - * image, the length value still appears in header. - * - * Driver will do some basic fw size validation based on the following rules: - * - * 1. Header, uCode and RSA are must-have components. - * 2. All firmware components, if they present, are in the sequence illustrated - * in the layout table above. - * 3. Length info of each component can be found in header, in dwords. - * 4. Modulus and exponent key are not required by driver. They may not appear - * in fw. So driver will load a truncated firmware in this case. - * - * HuC firmware layout is same as GuC firmware. - * Only HuC version information is saved in a different way. - */ - -struct uc_css_header { - u32 module_type; - /* header_size includes all non-uCode bits, including css_header, rsa - * key, modulus key and exponent data. */ - u32 header_size_dw; - u32 header_version; - u32 module_id; - u32 module_vendor; - u32 date; -#define CSS_DATE_DAY (0xFF << 0) -#define CSS_DATE_MONTH (0xFF << 8) -#define CSS_DATE_YEAR (0xFFFF << 16) - u32 size_dw; /* uCode plus header_size_dw */ - u32 key_size_dw; - u32 modulus_size_dw; - u32 exponent_size_dw; - u32 time; -#define CSS_TIME_HOUR (0xFF << 0) -#define CSS_DATE_MIN (0xFF << 8) -#define CSS_DATE_SEC (0xFFFF << 16) - char username[8]; - char buildnumber[12]; - u32 sw_version; -#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) -#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) -#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) -#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) -#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) - u32 reserved[14]; - u32 header_info; -} __packed; - -/* Work item for submitting workloads into work queue of GuC. */ -struct guc_wq_item { - u32 header; - u32 context_desc; - u32 submit_element_info; - u32 fence_id; -} __packed; - -struct guc_process_desc { - u32 stage_id; - u64 db_base_addr; - u32 head; - u32 tail; - u32 error_offset; - u64 wq_base_addr; - u32 wq_size_bytes; - u32 wq_status; - u32 engine_presence; - u32 priority; - u32 reserved[30]; -} __packed; - -/* engine id and context id is packed into guc_execlist_context.context_id*/ -#define GUC_ELC_CTXID_OFFSET 0 -#define GUC_ELC_ENGINE_OFFSET 29 - -/* The execlist context including software and HW information */ -struct guc_execlist_context { - u32 context_desc; - u32 context_id; - u32 ring_status; - u32 ring_lrca; - u32 ring_begin; - u32 ring_end; - u32 ring_next_free_location; - u32 ring_current_tail_pointer_value; - u8 engine_state_submit_value; - u8 engine_state_wait_value; - u16 pagefault_count; - u16 engine_submit_queue_count; -} __packed; - -/* - * This structure describes a stage set arranged for a particular communication - * between uKernel (GuC) and Driver (KMD). Technically, this is known as a - * "GuC Context descriptor" in the specs, but we use the term "stage descriptor" - * to avoid confusion with all the other things already named "context" in the - * driver. A static pool of these descriptors are stored inside a GEM object - * (stage_desc_pool) which is held for the entire lifetime of our interaction - * with the GuC, being allocated before the GuC is loaded with its firmware. - */ -struct guc_stage_desc { - u32 sched_common_area; - u32 stage_id; - u32 pas_id; - u8 engines_used; - u64 db_trigger_cpu; - u32 db_trigger_uk; - u64 db_trigger_phy; - u16 db_id; - - struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM]; - - u8 attribute; - - u32 priority; - - u32 wq_sampled_tail_offset; - u32 wq_total_submit_enqueues; - - u32 process_desc; - u32 wq_addr; - u32 wq_size; - - u32 engine_presence; - - u8 engine_suspended; - - u8 reserved0[3]; - u64 reserved1[1]; - - u64 desc_private; -} __packed; - -/** - * DOC: CTB based communication - * - * The CTB (command transport buffer) communication between Host and GuC - * is based on u32 data stream written to the shared buffer. One buffer can - * be used to transmit data only in one direction (one-directional channel). - * - * Current status of the each buffer is stored in the buffer descriptor. - * Buffer descriptor holds tail and head fields that represents active data - * stream. The tail field is updated by the data producer (sender), and head - * field is updated by the data consumer (receiver):: - * - * +------------+ - * | DESCRIPTOR | +=================+============+========+ - * +============+ | | MESSAGE(s) | | - * | address |--------->+=================+============+========+ - * +------------+ - * | head | ^-----head--------^ - * +------------+ - * | tail | ^---------tail-----------------^ - * +------------+ - * | size | ^---------------size--------------------^ - * +------------+ - * - * Each message in data stream starts with the single u32 treated as a header, - * followed by optional set of u32 data that makes message specific payload:: - * - * +------------+---------+---------+---------+ - * | MESSAGE | - * +------------+---------+---------+---------+ - * | msg[0] | [1] | ... | [n-1] | - * +------------+---------+---------+---------+ - * | MESSAGE | MESSAGE PAYLOAD | - * + HEADER +---------+---------+---------+ - * | | 0 | ... | n | - * +======+=====+=========+=========+=========+ - * | 31:16| code| | | | - * +------+-----+ | | | - * | 15:5|flags| | | | - * +------+-----+ | | | - * | 4:0| len| | | | - * +------+-----+---------+---------+---------+ - * - * ^-------------len-------------^ - * - * The message header consists of: - * - * - **len**, indicates length of the message payload (in u32) - * - **code**, indicates message code - * - **flags**, holds various bits to control message handling - */ - -/* - * Describes single command transport buffer. - * Used by both guc-master and clients. - */ -struct guc_ct_buffer_desc { - u32 addr; /* gfx address */ - u64 host_private; /* host private data */ - u32 size; /* size in bytes */ - u32 head; /* offset updated by GuC*/ - u32 tail; /* offset updated by owner */ - u32 is_in_error; /* error indicator */ - u32 fence; /* fence updated by GuC */ - u32 status; /* status updated by GuC */ - u32 owner; /* id of the channel owner */ - u32 owner_sub_id; /* owner-defined field for extra tracking */ - u32 reserved[5]; -} __packed; - -/* Type of command transport buffer */ -#define INTEL_GUC_CT_BUFFER_TYPE_SEND 0x0u -#define INTEL_GUC_CT_BUFFER_TYPE_RECV 0x1u - -/* - * Definition of the command transport message header (DW0) - * - * bit[4..0] message len (in dwords) - * bit[7..5] reserved - * bit[8] response (G2H only) - * bit[8] write fence to desc (H2G only) - * bit[9] write status to H2G buff (H2G only) - * bit[10] send status back via G2H (H2G only) - * bit[15..11] reserved - * bit[31..16] action code - */ -#define GUC_CT_MSG_LEN_SHIFT 0 -#define GUC_CT_MSG_LEN_MASK 0x1F -#define GUC_CT_MSG_IS_RESPONSE (1 << 8) -#define GUC_CT_MSG_WRITE_FENCE_TO_DESC (1 << 8) -#define GUC_CT_MSG_WRITE_STATUS_TO_BUFF (1 << 9) -#define GUC_CT_MSG_SEND_STATUS (1 << 10) -#define GUC_CT_MSG_ACTION_SHIFT 16 -#define GUC_CT_MSG_ACTION_MASK 0xFFFF - -#define GUC_FORCEWAKE_RENDER (1 << 0) -#define GUC_FORCEWAKE_MEDIA (1 << 1) - -#define GUC_POWER_UNSPECIFIED 0 -#define GUC_POWER_D0 1 -#define GUC_POWER_D1 2 -#define GUC_POWER_D2 3 -#define GUC_POWER_D3 4 - -/* Scheduling policy settings */ - -/* Reset engine upon preempt failure */ -#define POLICY_RESET_ENGINE (1<<0) -/* Preempt to idle on quantum expiry */ -#define POLICY_PREEMPT_TO_IDLE (1<<1) - -#define POLICY_MAX_NUM_WI 15 -#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 -#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 -#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000 -#define POLICY_DEFAULT_FAULT_TIME_US 250000 - -struct guc_policy { - /* Time for one workload to execute. (in micro seconds) */ - u32 execution_quantum; - /* Time to wait for a preemption request to completed before issuing a - * reset. (in micro seconds). */ - u32 preemption_time; - /* How much time to allow to run after the first fault is observed. - * Then preempt afterwards. (in micro seconds) */ - u32 fault_time; - u32 policy_flags; - u32 reserved[8]; -} __packed; - -struct guc_policies { - struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES]; - u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES]; - /* In micro seconds. How much time to allow before DPC processing is - * called back via interrupt (to prevent DPC queue drain starving). - * Typically 1000s of micro seconds (example only, not granularity). */ - u32 dpc_promote_time; - - /* Must be set to take these new values. */ - u32 is_valid; - - /* Max number of WIs to process per call. A large value may keep CS - * idle. */ - u32 max_num_work_items; - - u32 reserved[4]; -} __packed; - -/* GuC MMIO reg state struct */ - - -#define GUC_REGSET_MAX_REGISTERS 64 -#define GUC_S3_SAVE_SPACE_PAGES 10 - -struct guc_mmio_reg { - u32 offset; - u32 value; - u32 flags; -#define GUC_REGSET_MASKED (1 << 0) -} __packed; - -struct guc_mmio_regset { - struct guc_mmio_reg registers[GUC_REGSET_MAX_REGISTERS]; - u32 values_valid; - u32 number_of_registers; -} __packed; - -/* GuC register sets */ -struct guc_mmio_reg_state { - struct guc_mmio_regset engine_reg[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; - u32 reserved[98]; -} __packed; - -/* HW info */ -struct guc_gt_system_info { - u32 slice_enabled; - u32 rcs_enabled; - u32 reserved0; - u32 bcs_enabled; - u32 vdbox_enable_mask; - u32 vdbox_sfc_support_mask; - u32 vebox_enable_mask; - u32 reserved[9]; -} __packed; - -/* Clients info */ -struct guc_ct_pool_entry { - struct guc_ct_buffer_desc desc; - u32 reserved[7]; -} __packed; - -#define GUC_CT_POOL_SIZE 2 - -struct guc_clients_info { - u32 clients_num; - u32 reserved0[13]; - u32 ct_pool_addr; - u32 ct_pool_count; - u32 reserved[4]; -} __packed; - -/* GuC Additional Data Struct */ -struct guc_ads { - u32 reg_state_addr; - u32 reg_state_buffer; - u32 scheduler_policies; - u32 gt_system_info; - u32 clients_info; - u32 control_data; - u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES]; - u32 eng_state_size[GUC_MAX_ENGINE_CLASSES]; - u32 reserved[16]; -} __packed; - -/* GuC logging structures */ - -enum guc_log_buffer_type { - GUC_ISR_LOG_BUFFER, - GUC_DPC_LOG_BUFFER, - GUC_CRASH_DUMP_LOG_BUFFER, - GUC_MAX_LOG_BUFFER -}; - -/** - * struct guc_log_buffer_state - GuC log buffer state - * - * Below state structure is used for coordination of retrieval of GuC firmware - * logs. Separate state is maintained for each log buffer type. - * read_ptr points to the location where i915 read last in log buffer and - * is read only for GuC firmware. write_ptr is incremented by GuC with number - * of bytes written for each log entry and is read only for i915. - * When any type of log buffer becomes half full, GuC sends a flush interrupt. - * GuC firmware expects that while it is writing to 2nd half of the buffer, - * first half would get consumed by Host and then get a flush completed - * acknowledgment from Host, so that it does not end up doing any overwrite - * causing loss of logs. So when buffer gets half filled & i915 has requested - * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr - * to the value of write_ptr and raise the interrupt. - * On receiving the interrupt i915 should read the buffer, clear flush_to_file - * field and also update read_ptr with the value of sample_write_ptr, before - * sending an acknowledgment to GuC. marker & version fields are for internal - * usage of GuC and opaque to i915. buffer_full_cnt field is incremented every - * time GuC detects the log buffer overflow. - */ -struct guc_log_buffer_state { - u32 marker[2]; - u32 read_ptr; - u32 write_ptr; - u32 size; - u32 sampled_write_ptr; - union { - struct { - u32 flush_to_file:1; - u32 buffer_full_cnt:4; - u32 reserved:27; - }; - u32 flags; - }; - u32 version; -} __packed; - -struct guc_ctx_report { - u32 report_return_status; - u32 reserved1[64]; - u32 affected_count; - u32 reserved2[2]; -} __packed; - -/* GuC Shared Context Data Struct */ -struct guc_shared_ctx_data { - u32 addr_of_last_preempted_data_low; - u32 addr_of_last_preempted_data_high; - u32 addr_of_last_preempted_data_high_tmp; - u32 padding; - u32 is_mapped_to_proxy; - u32 proxy_ctx_id; - u32 engine_reset_ctx_id; - u32 media_reset_count; - u32 reserved1[8]; - u32 uk_last_ctx_switch_reason; - u32 was_reset; - u32 lrca_gpu_addr; - u64 execlist_ctx; - u32 reserved2[66]; - struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM]; -} __packed; - -/** - * DOC: MMIO based communication - * - * The MMIO based communication between Host and GuC uses software scratch - * registers, where first register holds data treated as message header, - * and other registers are used to hold message payload. - * - * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, - * but no H2G command takes more than 8 parameters and the GuC FW - * itself uses an 8-element array to store the H2G message. - * - * +-----------+---------+---------+---------+ - * | MMIO[0] | MMIO[1] | ... | MMIO[n] | - * +-----------+---------+---------+---------+ - * | header | optional payload | - * +======+====+=========+=========+=========+ - * | 31:28|type| | | | - * +------+----+ | | | - * | 27:16|data| | | | - * +------+----+ | | | - * | 15:0|code| | | | - * +------+----+---------+---------+---------+ - * - * The message header consists of: - * - * - **type**, indicates message type - * - **code**, indicates message code, is specific for **type** - * - **data**, indicates message data, optional, depends on **code** - * - * The following message **types** are supported: - * - * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code - * must be priovided in **code** field. Optional action specific parameters - * can be provided in remaining payload registers or **data** field. - * - * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request, - * action response status will be provided in **code** field. Optional - * response data can be returned in remaining payload registers or **data** - * field. - */ - -#define GUC_MAX_MMIO_MSG_LEN 8 - -#define INTEL_GUC_MSG_TYPE_SHIFT 28 -#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) -#define INTEL_GUC_MSG_DATA_SHIFT 16 -#define INTEL_GUC_MSG_DATA_MASK (0xFFF << INTEL_GUC_MSG_DATA_SHIFT) -#define INTEL_GUC_MSG_CODE_SHIFT 0 -#define INTEL_GUC_MSG_CODE_MASK (0xFFFF << INTEL_GUC_MSG_CODE_SHIFT) - -#define __INTEL_GUC_MSG_GET(T, m) \ - (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT) -#define INTEL_GUC_MSG_TO_TYPE(m) __INTEL_GUC_MSG_GET(TYPE, m) -#define INTEL_GUC_MSG_TO_DATA(m) __INTEL_GUC_MSG_GET(DATA, m) -#define INTEL_GUC_MSG_TO_CODE(m) __INTEL_GUC_MSG_GET(CODE, m) - -enum intel_guc_msg_type { - INTEL_GUC_MSG_TYPE_REQUEST = 0x0, - INTEL_GUC_MSG_TYPE_RESPONSE = 0xF, -}; - -#define __INTEL_GUC_MSG_TYPE_IS(T, m) \ - (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T) -#define INTEL_GUC_MSG_IS_REQUEST(m) __INTEL_GUC_MSG_TYPE_IS(REQUEST, m) -#define INTEL_GUC_MSG_IS_RESPONSE(m) __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m) - -enum intel_guc_action { - INTEL_GUC_ACTION_DEFAULT = 0x0, - INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2, - INTEL_GUC_ACTION_REQUEST_ENGINE_RESET = 0x3, - INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, - INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, - INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, - INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, - INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, - INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, - INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003, - INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x3005, - INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, - INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, - INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, - INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, - INTEL_GUC_ACTION_LIMIT -}; - -enum intel_guc_preempt_options { - INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4, - INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8, -}; - -enum intel_guc_report_status { - INTEL_GUC_REPORT_STATUS_UNKNOWN = 0x0, - INTEL_GUC_REPORT_STATUS_ACKED = 0x1, - INTEL_GUC_REPORT_STATUS_ERROR = 0x2, - INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, -}; - -enum intel_guc_sleep_state_status { - INTEL_GUC_SLEEP_STATE_SUCCESS = 0x1, - INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x2, - INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x3 -#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 -}; - -#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) -#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 -#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) -#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8) - -enum intel_guc_response_status { - INTEL_GUC_RESPONSE_STATUS_SUCCESS = 0x0, - INTEL_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000, -}; - -#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \ - (typecheck(u32, (m)) && \ - ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \ - ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \ - (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT))) - -/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */ -enum intel_guc_recv_message { - INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1), - INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3) -}; - -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c deleted file mode 100644 index 9be5d3a6fb5f..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ /dev/null @@ -1,632 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include - -#include "intel_guc_log.h" -#include "i915_drv.h" - -static void guc_log_capture_logs(struct intel_guc_log *log); - -/** - * DOC: GuC firmware log - * - * Firmware log is enabled by setting i915.guc_log_level to the positive level. - * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from - * i915_guc_load_status will print out firmware loading status and scratch - * registers value. - */ - -static int guc_action_flush_log_complete(struct intel_guc *guc) -{ - u32 action[] = { - INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE - }; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -static int guc_action_flush_log(struct intel_guc *guc) -{ - u32 action[] = { - INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, - 0 - }; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -static int guc_action_control_log(struct intel_guc *guc, bool enable, - bool default_logging, u32 verbosity) -{ - u32 action[] = { - INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, - (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | - (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | - (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) - }; - - GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) -{ - return container_of(log, struct intel_guc, log); -} - -static void guc_log_enable_flush_events(struct intel_guc_log *log) -{ - intel_guc_enable_msg(log_to_guc(log), - INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | - INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); -} - -static void guc_log_disable_flush_events(struct intel_guc_log *log) -{ - intel_guc_disable_msg(log_to_guc(log), - INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | - INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED); -} - -/* - * Sub buffer switch callback. Called whenever relay has to switch to a new - * sub buffer, relay stays on the same sub buffer if 0 is returned. - */ -static int subbuf_start_callback(struct rchan_buf *buf, - void *subbuf, - void *prev_subbuf, - size_t prev_padding) -{ - /* - * Use no-overwrite mode by default, where relay will stop accepting - * new data if there are no empty sub buffers left. - * There is no strict synchronization enforced by relay between Consumer - * and Producer. In overwrite mode, there is a possibility of getting - * inconsistent/garbled data, the producer could be writing on to the - * same sub buffer from which Consumer is reading. This can't be avoided - * unless Consumer is fast enough and can always run in tandem with - * Producer. - */ - if (relay_buf_full(buf)) - return 0; - - return 1; -} - -/* - * file_create() callback. Creates relay file in debugfs. - */ -static struct dentry *create_buf_file_callback(const char *filename, - struct dentry *parent, - umode_t mode, - struct rchan_buf *buf, - int *is_global) -{ - struct dentry *buf_file; - - /* - * This to enable the use of a single buffer for the relay channel and - * correspondingly have a single file exposed to User, through which - * it can collect the logs in order without any post-processing. - * Need to set 'is_global' even if parent is NULL for early logging. - */ - *is_global = 1; - - if (!parent) - return NULL; - - buf_file = debugfs_create_file(filename, mode, - parent, buf, &relay_file_operations); - if (IS_ERR(buf_file)) - return NULL; - - return buf_file; -} - -/* - * file_remove() default callback. Removes relay file in debugfs. - */ -static int remove_buf_file_callback(struct dentry *dentry) -{ - debugfs_remove(dentry); - return 0; -} - -/* relay channel callbacks */ -static struct rchan_callbacks relay_callbacks = { - .subbuf_start = subbuf_start_callback, - .create_buf_file = create_buf_file_callback, - .remove_buf_file = remove_buf_file_callback, -}; - -static void guc_move_to_next_buf(struct intel_guc_log *log) -{ - /* - * Make sure the updates made in the sub buffer are visible when - * Consumer sees the following update to offset inside the sub buffer. - */ - smp_wmb(); - - /* All data has been written, so now move the offset of sub buffer. */ - relay_reserve(log->relay.channel, log->vma->obj->base.size); - - /* Switch to the next sub buffer */ - relay_flush(log->relay.channel); -} - -static void *guc_get_write_buffer(struct intel_guc_log *log) -{ - /* - * Just get the base address of a new sub buffer and copy data into it - * ourselves. NULL will be returned in no-overwrite mode, if all sub - * buffers are full. Could have used the relay_write() to indirectly - * copy the data, but that would have been bit convoluted, as we need to - * write to only certain locations inside a sub buffer which cannot be - * done without using relay_reserve() along with relay_write(). So its - * better to use relay_reserve() alone. - */ - return relay_reserve(log->relay.channel, 0); -} - -static bool guc_check_log_buf_overflow(struct intel_guc_log *log, - enum guc_log_buffer_type type, - unsigned int full_cnt) -{ - unsigned int prev_full_cnt = log->stats[type].sampled_overflow; - bool overflow = false; - - if (full_cnt != prev_full_cnt) { - overflow = true; - - log->stats[type].overflow = full_cnt; - log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; - - if (full_cnt < prev_full_cnt) { - /* buffer_full_cnt is a 4 bit counter */ - log->stats[type].sampled_overflow += 16; - } - - dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev, - "GuC log buffer overflow\n"); - } - - return overflow; -} - -static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type) -{ - switch (type) { - case GUC_ISR_LOG_BUFFER: - return ISR_BUFFER_SIZE; - case GUC_DPC_LOG_BUFFER: - return DPC_BUFFER_SIZE; - case GUC_CRASH_DUMP_LOG_BUFFER: - return CRASH_BUFFER_SIZE; - default: - MISSING_CASE(type); - } - - return 0; -} - -static void guc_read_update_log_buffer(struct intel_guc_log *log) -{ - unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; - struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; - struct guc_log_buffer_state log_buf_state_local; - enum guc_log_buffer_type type; - void *src_data, *dst_data; - bool new_overflow; - - mutex_lock(&log->relay.lock); - - if (WARN_ON(!intel_guc_log_relay_enabled(log))) - goto out_unlock; - - /* Get the pointer to shared GuC log buffer */ - log_buf_state = src_data = log->relay.buf_addr; - - /* Get the pointer to local buffer to store the logs */ - log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); - - if (unlikely(!log_buf_snapshot_state)) { - /* - * Used rate limited to avoid deluge of messages, logs might be - * getting consumed by User at a slow rate. - */ - DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); - log->relay.full_count++; - - goto out_unlock; - } - - /* Actual logs are present from the 2nd page */ - src_data += PAGE_SIZE; - dst_data += PAGE_SIZE; - - for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { - /* - * Make a copy of the state structure, inside GuC log buffer - * (which is uncached mapped), on the stack to avoid reading - * from it multiple times. - */ - memcpy(&log_buf_state_local, log_buf_state, - sizeof(struct guc_log_buffer_state)); - buffer_size = guc_get_log_buffer_size(type); - read_offset = log_buf_state_local.read_ptr; - write_offset = log_buf_state_local.sampled_write_ptr; - full_cnt = log_buf_state_local.buffer_full_cnt; - - /* Bookkeeping stuff */ - log->stats[type].flush += log_buf_state_local.flush_to_file; - new_overflow = guc_check_log_buf_overflow(log, type, full_cnt); - - /* Update the state of shared log buffer */ - log_buf_state->read_ptr = write_offset; - log_buf_state->flush_to_file = 0; - log_buf_state++; - - /* First copy the state structure in snapshot buffer */ - memcpy(log_buf_snapshot_state, &log_buf_state_local, - sizeof(struct guc_log_buffer_state)); - - /* - * The write pointer could have been updated by GuC firmware, - * after sending the flush interrupt to Host, for consistency - * set write pointer value to same value of sampled_write_ptr - * in the snapshot buffer. - */ - log_buf_snapshot_state->write_ptr = write_offset; - log_buf_snapshot_state++; - - /* Now copy the actual logs. */ - if (unlikely(new_overflow)) { - /* copy the whole buffer in case of overflow */ - read_offset = 0; - write_offset = buffer_size; - } else if (unlikely((read_offset > buffer_size) || - (write_offset > buffer_size))) { - DRM_ERROR("invalid log buffer state\n"); - /* copy whole buffer as offsets are unreliable */ - read_offset = 0; - write_offset = buffer_size; - } - - /* Just copy the newly written data */ - if (read_offset > write_offset) { - i915_memcpy_from_wc(dst_data, src_data, write_offset); - bytes_to_copy = buffer_size - read_offset; - } else { - bytes_to_copy = write_offset - read_offset; - } - i915_memcpy_from_wc(dst_data + read_offset, - src_data + read_offset, bytes_to_copy); - - src_data += buffer_size; - dst_data += buffer_size; - } - - guc_move_to_next_buf(log); - -out_unlock: - mutex_unlock(&log->relay.lock); -} - -static void capture_logs_work(struct work_struct *work) -{ - struct intel_guc_log *log = - container_of(work, struct intel_guc_log, relay.flush_work); - - guc_log_capture_logs(log); -} - -static int guc_log_map(struct intel_guc_log *log) -{ - void *vaddr; - - lockdep_assert_held(&log->relay.lock); - - if (!log->vma) - return -ENODEV; - - /* - * Create a WC (Uncached for read) vmalloc mapping of log - * buffer pages, so that we can directly get the data - * (up-to-date) from memory. - */ - vaddr = i915_gem_object_pin_map(log->vma->obj, I915_MAP_WC); - if (IS_ERR(vaddr)) - return PTR_ERR(vaddr); - - log->relay.buf_addr = vaddr; - - return 0; -} - -static void guc_log_unmap(struct intel_guc_log *log) -{ - lockdep_assert_held(&log->relay.lock); - - i915_gem_object_unpin_map(log->vma->obj); - log->relay.buf_addr = NULL; -} - -void intel_guc_log_init_early(struct intel_guc_log *log) -{ - mutex_init(&log->relay.lock); - INIT_WORK(&log->relay.flush_work, capture_logs_work); -} - -static int guc_log_relay_create(struct intel_guc_log *log) -{ - struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct rchan *guc_log_relay_chan; - size_t n_subbufs, subbuf_size; - int ret; - - lockdep_assert_held(&log->relay.lock); - - /* Keep the size of sub buffers same as shared log buffer */ - subbuf_size = log->vma->size; - - /* - * Store up to 8 snapshots, which is large enough to buffer sufficient - * boot time logs and provides enough leeway to User, in terms of - * latency, for consuming the logs from relay. Also doesn't take - * up too much memory. - */ - n_subbufs = 8; - - guc_log_relay_chan = relay_open("guc_log", - dev_priv->drm.primary->debugfs_root, - subbuf_size, n_subbufs, - &relay_callbacks, dev_priv); - if (!guc_log_relay_chan) { - DRM_ERROR("Couldn't create relay chan for GuC logging\n"); - - ret = -ENOMEM; - return ret; - } - - GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); - log->relay.channel = guc_log_relay_chan; - - return 0; -} - -static void guc_log_relay_destroy(struct intel_guc_log *log) -{ - lockdep_assert_held(&log->relay.lock); - - relay_close(log->relay.channel); - log->relay.channel = NULL; -} - -static void guc_log_capture_logs(struct intel_guc_log *log) -{ - struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); - intel_wakeref_t wakeref; - - guc_read_update_log_buffer(log); - - /* - * Generally device is expected to be active only at this - * time, so get/put should be really quick. - */ - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - guc_action_flush_log_complete(guc); -} - -int intel_guc_log_create(struct intel_guc_log *log) -{ - struct intel_guc *guc = log_to_guc(log); - struct i915_vma *vma; - u32 guc_log_size; - int ret; - - GEM_BUG_ON(log->vma); - - /* - * GuC Log buffer Layout - * - * +===============================+ 00B - * | Crash dump state header | - * +-------------------------------+ 32B - * | DPC state header | - * +-------------------------------+ 64B - * | ISR state header | - * +-------------------------------+ 96B - * | | - * +===============================+ PAGE_SIZE (4KB) - * | Crash Dump logs | - * +===============================+ + CRASH_SIZE - * | DPC logs | - * +===============================+ + DPC_SIZE - * | ISR logs | - * +===============================+ + ISR_SIZE - */ - guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE + - ISR_BUFFER_SIZE; - - vma = intel_guc_allocate_vma(guc, guc_log_size); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err; - } - - log->vma = vma; - - log->level = i915_modparams.guc_log_level; - - return 0; - -err: - DRM_ERROR("Failed to allocate GuC log buffer. %d\n", ret); - return ret; -} - -void intel_guc_log_destroy(struct intel_guc_log *log) -{ - i915_vma_unpin_and_release(&log->vma, 0); -} - -int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) -{ - struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); - intel_wakeref_t wakeref; - int ret = 0; - - BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); - GEM_BUG_ON(!log->vma); - - /* - * GuC is recognizing log levels starting from 0 to max, we're using 0 - * as indication that logging should be disabled. - */ - if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) - return -EINVAL; - - mutex_lock(&dev_priv->drm.struct_mutex); - - if (log->level == level) - goto out_unlock; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - ret = guc_action_control_log(guc, - GUC_LOG_LEVEL_IS_VERBOSE(level), - GUC_LOG_LEVEL_IS_ENABLED(level), - GUC_LOG_LEVEL_TO_VERBOSITY(level)); - if (ret) { - DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret); - goto out_unlock; - } - - log->level = level; - -out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); - - return ret; -} - -bool intel_guc_log_relay_enabled(const struct intel_guc_log *log) -{ - return log->relay.buf_addr; -} - -int intel_guc_log_relay_open(struct intel_guc_log *log) -{ - int ret; - - mutex_lock(&log->relay.lock); - - if (intel_guc_log_relay_enabled(log)) { - ret = -EEXIST; - goto out_unlock; - } - - /* - * We require SSE 4.1 for fast reads from the GuC log buffer and - * it should be present on the chipsets supporting GuC based - * submisssions. - */ - if (!i915_has_memcpy_from_wc()) { - ret = -ENXIO; - goto out_unlock; - } - - ret = guc_log_relay_create(log); - if (ret) - goto out_unlock; - - ret = guc_log_map(log); - if (ret) - goto out_relay; - - mutex_unlock(&log->relay.lock); - - guc_log_enable_flush_events(log); - - /* - * When GuC is logging without us relaying to userspace, we're ignoring - * the flush notification. This means that we need to unconditionally - * flush on relay enabling, since GuC only notifies us once. - */ - queue_work(system_highpri_wq, &log->relay.flush_work); - - return 0; - -out_relay: - guc_log_relay_destroy(log); -out_unlock: - mutex_unlock(&log->relay.lock); - - return ret; -} - -void intel_guc_log_relay_flush(struct intel_guc_log *log) -{ - struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_i915(guc); - intel_wakeref_t wakeref; - - /* - * Before initiating the forceful flush, wait for any pending/ongoing - * flush to complete otherwise forceful flush may not actually happen. - */ - flush_work(&log->relay.flush_work); - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - guc_action_flush_log(guc); - - /* GuC would have updated log buffer by now, so capture it */ - guc_log_capture_logs(log); -} - -void intel_guc_log_relay_close(struct intel_guc_log *log) -{ - struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_i915(guc); - - guc_log_disable_flush_events(log); - intel_synchronize_irq(i915); - - flush_work(&log->relay.flush_work); - - mutex_lock(&log->relay.lock); - GEM_BUG_ON(!intel_guc_log_relay_enabled(log)); - guc_log_unmap(log); - guc_log_relay_destroy(log); - mutex_unlock(&log->relay.lock); -} - -void intel_guc_log_handle_flush_event(struct intel_guc_log *log) -{ - queue_work(system_highpri_wq, &log->relay.flush_work); -} diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/intel_guc_log.h deleted file mode 100644 index 1969572f1f79..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_log.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef _INTEL_GUC_LOG_H_ -#define _INTEL_GUC_LOG_H_ - -#include -#include -#include - -#include "intel_guc_fwif.h" -#include "i915_gem.h" - -struct intel_guc; - -#ifdef CONFIG_DRM_I915_DEBUG_GUC -#define CRASH_BUFFER_SIZE SZ_2M -#define DPC_BUFFER_SIZE SZ_8M -#define ISR_BUFFER_SIZE SZ_8M -#else -#define CRASH_BUFFER_SIZE SZ_8K -#define DPC_BUFFER_SIZE SZ_32K -#define ISR_BUFFER_SIZE SZ_32K -#endif - -/* - * While we're using plain log level in i915, GuC controls are much more... - * "elaborate"? We have a couple of bits for verbosity, separate bit for actual - * log enabling, and separate bit for default logging - which "conveniently" - * ignores the enable bit. - */ -#define GUC_LOG_LEVEL_DISABLED 0 -#define GUC_LOG_LEVEL_NON_VERBOSE 1 -#define GUC_LOG_LEVEL_IS_ENABLED(x) ((x) > GUC_LOG_LEVEL_DISABLED) -#define GUC_LOG_LEVEL_IS_VERBOSE(x) ((x) > GUC_LOG_LEVEL_NON_VERBOSE) -#define GUC_LOG_LEVEL_TO_VERBOSITY(x) ({ \ - typeof(x) _x = (x); \ - GUC_LOG_LEVEL_IS_VERBOSE(_x) ? _x - 2 : 0; \ -}) -#define GUC_VERBOSITY_TO_LOG_LEVEL(x) ((x) + 2) -#define GUC_LOG_LEVEL_MAX GUC_VERBOSITY_TO_LOG_LEVEL(GUC_LOG_VERBOSITY_MAX) - -struct intel_guc_log { - u32 level; - struct i915_vma *vma; - struct { - void *buf_addr; - struct work_struct flush_work; - struct rchan *channel; - struct mutex lock; - u32 full_count; - } relay; - /* logging related stats */ - struct { - u32 sampled_overflow; - u32 overflow; - u32 flush; - } stats[GUC_MAX_LOG_BUFFER]; -}; - -void intel_guc_log_init_early(struct intel_guc_log *log); -int intel_guc_log_create(struct intel_guc_log *log); -void intel_guc_log_destroy(struct intel_guc_log *log); - -int intel_guc_log_set_level(struct intel_guc_log *log, u32 level); -bool intel_guc_log_relay_enabled(const struct intel_guc_log *log); -int intel_guc_log_relay_open(struct intel_guc_log *log); -void intel_guc_log_relay_flush(struct intel_guc_log *log); -void intel_guc_log_relay_close(struct intel_guc_log *log); - -void intel_guc_log_handle_flush_event(struct intel_guc_log *log); - -static inline u32 intel_guc_log_get_level(struct intel_guc_log *log) -{ - return log->level; -} - -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h deleted file mode 100644 index e3cbb23299ce..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ -#ifndef _INTEL_GUC_REG_H_ -#define _INTEL_GUC_REG_H_ - -#include -#include - -#include "i915_reg.h" - -/* Definitions of GuC H/W registers, bits, etc */ - -#define GUC_STATUS _MMIO(0xc000) -#define GS_RESET_SHIFT 0 -#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT) -#define GS_BOOTROM_SHIFT 1 -#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) -#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) -#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT) -#define GS_UKERNEL_SHIFT 8 -#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT) -#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) -#define GS_MIA_SHIFT 16 -#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) -#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT) -#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT) -#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT) -#define GS_AUTH_STATUS_SHIFT 30 -#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT) -#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT) -#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT) - -#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) -#define SOFT_SCRATCH_COUNT 16 - -#define GEN11_SOFT_SCRATCH(n) _MMIO(0x190240 + (n) * 4) -#define GEN11_SOFT_SCRATCH_COUNT 4 - -#define UOS_RSA_SCRATCH(i) _MMIO(0xc200 + (i) * 4) -#define UOS_RSA_SCRATCH_COUNT 64 - -#define DMA_ADDR_0_LOW _MMIO(0xc300) -#define DMA_ADDR_0_HIGH _MMIO(0xc304) -#define DMA_ADDR_1_LOW _MMIO(0xc308) -#define DMA_ADDR_1_HIGH _MMIO(0xc30c) -#define DMA_ADDRESS_SPACE_WOPCM (7 << 16) -#define DMA_ADDRESS_SPACE_GTT (8 << 16) -#define DMA_COPY_SIZE _MMIO(0xc310) -#define DMA_CTRL _MMIO(0xc314) -#define HUC_UKERNEL (1<<9) -#define UOS_MOVE (1<<4) -#define START_DMA (1<<0) -#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340) -#define GUC_WOPCM_OFFSET_VALID (1<<0) -#define HUC_LOADING_AGENT_VCR (0<<1) -#define HUC_LOADING_AGENT_GUC (1<<1) -#define GUC_WOPCM_OFFSET_SHIFT 14 -#define GUC_WOPCM_OFFSET_MASK (0x3ffff << GUC_WOPCM_OFFSET_SHIFT) -#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) - -#define HUC_STATUS2 _MMIO(0xD3B0) -#define HUC_FW_VERIFIED (1<<7) - -#define GEN11_HUC_KERNEL_LOAD_INFO _MMIO(0xC1DC) -#define HUC_LOAD_SUCCESSFUL (1 << 0) - -#define GUC_WOPCM_SIZE _MMIO(0xc050) -#define GUC_WOPCM_SIZE_LOCKED (1<<0) -#define GUC_WOPCM_SIZE_SHIFT 12 -#define GUC_WOPCM_SIZE_MASK (0xfffff << GUC_WOPCM_SIZE_SHIFT) - -#define GEN8_GT_PM_CONFIG _MMIO(0x138140) -#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) -#define GEN9_GT_PM_CONFIG _MMIO(0x13816c) -#define GT_DOORBELL_ENABLE (1<<0) - -#define GEN8_GTCR _MMIO(0x4274) -#define GEN8_GTCR_INVALIDATE (1<<0) - -#define GUC_ARAT_C6DIS _MMIO(0xA178) - -#define GUC_SHIM_CONTROL _MMIO(0xc064) -#define GUC_DISABLE_SRAM_INIT_TO_ZEROES (1<<0) -#define GUC_ENABLE_READ_CACHE_LOGIC (1<<1) -#define GUC_ENABLE_MIA_CACHING (1<<2) -#define GUC_GEN10_MSGCH_ENABLE (1<<4) -#define GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA (1<<9) -#define GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA (1<<10) -#define GUC_ENABLE_MIA_CLOCK_GATING (1<<15) -#define GUC_GEN10_SHIM_WC_ENABLE (1<<21) - -#define GUC_SEND_INTERRUPT _MMIO(0xc4c8) -#define GUC_SEND_TRIGGER (1<<0) -#define GEN11_GUC_HOST_INTERRUPT _MMIO(0x1901f0) - -#define GUC_NUM_DOORBELLS 256 - -/* format of the HW-monitored doorbell cacheline */ -struct guc_doorbell_info { - u32 db_status; -#define GUC_DOORBELL_DISABLED 0 -#define GUC_DOORBELL_ENABLED 1 - - u32 cookie; - u32 reserved[14]; -} __packed; - -#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) -#define GEN8_DRB_VALID (1<<0) -#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) - -#define DE_GUCRMR _MMIO(0x44054) - -#define GUC_BCS_RCS_IER _MMIO(0xC550) -#define GUC_VCS2_VCS1_IER _MMIO(0xC554) -#define GUC_WD_VECS_IER _MMIO(0xC558) -#define GUC_PM_P24C_IER _MMIO(0xC55C) - -/* GuC Interrupt Vector */ -#define GUC_INTR_GUC2HOST BIT(15) -#define GUC_INTR_EXEC_ERROR BIT(14) -#define GUC_INTR_DISPLAY_EVENT BIT(13) -#define GUC_INTR_SEM_SIG BIT(12) -#define GUC_INTR_IOMMU2GUC BIT(11) -#define GUC_INTR_DOORBELL_RANG BIT(10) -#define GUC_INTR_DMA_DONE BIT(9) -#define GUC_INTR_FATAL_ERROR BIT(8) -#define GUC_INTR_NOTIF_ERROR BIT(7) -#define GUC_INTR_SW_INT_6 BIT(6) -#define GUC_INTR_SW_INT_5 BIT(5) -#define GUC_INTR_SW_INT_4 BIT(4) -#define GUC_INTR_SW_INT_3 BIT(3) -#define GUC_INTR_SW_INT_2 BIT(2) -#define GUC_INTR_SW_INT_1 BIT(1) -#define GUC_INTR_SW_INT_0 BIT(0) - -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c deleted file mode 100644 index d86336aa4178..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ /dev/null @@ -1,1164 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include - -#include "gt/intel_engine_pm.h" -#include "gt/intel_lrc_reg.h" -#include "gt/intel_context.h" -#include "gem/i915_gem_context.h" - -#include "intel_guc_submission.h" -#include "i915_drv.h" - -enum { - GUC_PREEMPT_NONE = 0, - GUC_PREEMPT_INPROGRESS, - GUC_PREEMPT_FINISHED, -}; -#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 -#define GUC_PREEMPT_BREADCRUMB_BYTES \ - (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) - -/** - * DOC: GuC-based command submission - * - * GuC client: - * A intel_guc_client refers to a submission path through GuC. Currently, there - * is only one client, which is charged with all submissions to the GuC. This - * struct is the owner of a doorbell, a process descriptor and a workqueue (all - * of them inside a single gem object that contains all required pages for these - * elements). - * - * GuC stage descriptor: - * During initialization, the driver allocates a static pool of 1024 such - * descriptors, and shares them with the GuC. - * Currently, there exists a 1:1 mapping between a intel_guc_client and a - * guc_stage_desc (via the client's stage_id), so effectively only one - * gets used. This stage descriptor lets the GuC know about the doorbell, - * workqueue and process descriptor. Theoretically, it also lets the GuC - * know about our HW contexts (context ID, etc...), but we actually - * employ a kind of submission where the GuC uses the LRCA sent via the work - * item instead (the single guc_stage_desc associated to execbuf client - * contains information about the default kernel context only, but this is - * essentially unused). This is called a "proxy" submission. - * - * The Scratch registers: - * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes - * a value to the action register (SOFT_SCRATCH_0) along with any data. It then - * triggers an interrupt on the GuC via another register write (0xC4C8). - * Firmware writes a success/fail code back to the action register after - * processes the request. The kernel driver polls waiting for this update and - * then proceeds. - * See intel_guc_send() - * - * Doorbells: - * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) - * mapped into process space. - * - * Work Items: - * There are several types of work items that the host may place into a - * workqueue, each with its own requirements and limitations. Currently only - * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which - * represents in-order queue. The kernel driver packs ring tail pointer and an - * ELSP context descriptor dword into Work Item. - * See guc_add_request() - * - */ - -static inline struct i915_priolist *to_priolist(struct rb_node *rb) -{ - return rb_entry(rb, struct i915_priolist, node); -} - -static inline bool is_high_priority(struct intel_guc_client *client) -{ - return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH || - client->priority == GUC_CLIENT_PRIORITY_HIGH); -} - -static int reserve_doorbell(struct intel_guc_client *client) -{ - unsigned long offset; - unsigned long end; - u16 id; - - GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID); - - /* - * The bitmap tracks which doorbell registers are currently in use. - * It is split into two halves; the first half is used for normal - * priority contexts, the second half for high-priority ones. - */ - offset = 0; - end = GUC_NUM_DOORBELLS / 2; - if (is_high_priority(client)) { - offset = end; - end += offset; - } - - id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset); - if (id == end) - return -ENOSPC; - - __set_bit(id, client->guc->doorbell_bitmap); - client->doorbell_id = id; - DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n", - client->stage_id, yesno(is_high_priority(client)), - id); - return 0; -} - -static bool has_doorbell(struct intel_guc_client *client) -{ - if (client->doorbell_id == GUC_DOORBELL_INVALID) - return false; - - return test_bit(client->doorbell_id, client->guc->doorbell_bitmap); -} - -static void unreserve_doorbell(struct intel_guc_client *client) -{ - GEM_BUG_ON(!has_doorbell(client)); - - __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap); - client->doorbell_id = GUC_DOORBELL_INVALID; -} - -/* - * Tell the GuC to allocate or deallocate a specific doorbell - */ - -static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id) -{ - u32 action[] = { - INTEL_GUC_ACTION_ALLOCATE_DOORBELL, - stage_id - }; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id) -{ - u32 action[] = { - INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, - stage_id - }; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); -} - -static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client) -{ - struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr; - - return &base[client->stage_id]; -} - -/* - * Initialise, update, or clear doorbell data shared with the GuC - * - * These functions modify shared data and so need access to the mapped - * client object which contains the page being used for the doorbell - */ - -static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id) -{ - struct guc_stage_desc *desc; - - /* Update the GuC's idea of the doorbell ID */ - desc = __get_stage_desc(client); - desc->db_id = new_id; -} - -static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) -{ - return client->vaddr + client->doorbell_offset; -} - -static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); - return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; -} - -static void __init_doorbell(struct intel_guc_client *client) -{ - struct guc_doorbell_info *doorbell; - - doorbell = __get_doorbell(client); - doorbell->db_status = GUC_DOORBELL_ENABLED; - doorbell->cookie = 0; -} - -static void __fini_doorbell(struct intel_guc_client *client) -{ - struct guc_doorbell_info *doorbell; - u16 db_id = client->doorbell_id; - - doorbell = __get_doorbell(client); - doorbell->db_status = GUC_DOORBELL_DISABLED; - - /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit - * to go to zero after updating db_status before we call the GuC to - * release the doorbell - */ - if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10)) - WARN_ONCE(true, "Doorbell never became invalid after disable\n"); -} - -static int create_doorbell(struct intel_guc_client *client) -{ - int ret; - - if (WARN_ON(!has_doorbell(client))) - return -ENODEV; /* internal setup error, should never happen */ - - __update_doorbell_desc(client, client->doorbell_id); - __init_doorbell(client); - - ret = __guc_allocate_doorbell(client->guc, client->stage_id); - if (ret) { - __fini_doorbell(client); - __update_doorbell_desc(client, GUC_DOORBELL_INVALID); - DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", - client->stage_id, ret); - return ret; - } - - return 0; -} - -static int destroy_doorbell(struct intel_guc_client *client) -{ - int ret; - - GEM_BUG_ON(!has_doorbell(client)); - - __fini_doorbell(client); - ret = __guc_deallocate_doorbell(client->guc, client->stage_id); - if (ret) - DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", - client->stage_id, ret); - - __update_doorbell_desc(client, GUC_DOORBELL_INVALID); - - return ret; -} - -static unsigned long __select_cacheline(struct intel_guc *guc) -{ - unsigned long offset; - - /* Doorbell uses a single cache line within a page */ - offset = offset_in_page(guc->db_cacheline); - - /* Moving to next cache line to reduce contention */ - guc->db_cacheline += cache_line_size(); - - DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n", - offset, guc->db_cacheline, cache_line_size()); - return offset; -} - -static inline struct guc_process_desc * -__get_process_desc(struct intel_guc_client *client) -{ - return client->vaddr + client->proc_desc_offset; -} - -/* - * Initialise the process descriptor shared with the GuC firmware. - */ -static void guc_proc_desc_init(struct intel_guc_client *client) -{ - struct guc_process_desc *desc; - - desc = memset(__get_process_desc(client), 0, sizeof(*desc)); - - /* - * XXX: pDoorbell and WQVBaseAddress are pointers in process address - * space for ring3 clients (set them as in mmap_ioctl) or kernel - * space for kernel clients (map on demand instead? May make debug - * easier to have it mapped). - */ - desc->wq_base_addr = 0; - desc->db_base_addr = 0; - - desc->stage_id = client->stage_id; - desc->wq_size_bytes = GUC_WQ_SIZE; - desc->wq_status = WQ_STATUS_ACTIVE; - desc->priority = client->priority; -} - -static void guc_proc_desc_fini(struct intel_guc_client *client) -{ - struct guc_process_desc *desc; - - desc = __get_process_desc(client); - memset(desc, 0, sizeof(*desc)); -} - -static int guc_stage_desc_pool_create(struct intel_guc *guc) -{ - struct i915_vma *vma; - void *vaddr; - - vma = intel_guc_allocate_vma(guc, - PAGE_ALIGN(sizeof(struct guc_stage_desc) * - GUC_MAX_STAGE_DESCRIPTORS)); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma, 0); - return PTR_ERR(vaddr); - } - - guc->stage_desc_pool = vma; - guc->stage_desc_pool_vaddr = vaddr; - ida_init(&guc->stage_ids); - - return 0; -} - -static void guc_stage_desc_pool_destroy(struct intel_guc *guc) -{ - ida_destroy(&guc->stage_ids); - i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); -} - -/* - * Initialise/clear the stage descriptor shared with the GuC firmware. - * - * This descriptor tells the GuC where (in GGTT space) to find the important - * data structures relating to this client (doorbell, process descriptor, - * write queue, etc). - */ -static void guc_stage_desc_init(struct intel_guc_client *client) -{ - struct intel_guc *guc = client->guc; - struct guc_stage_desc *desc; - u32 gfx_addr; - - desc = __get_stage_desc(client); - memset(desc, 0, sizeof(*desc)); - - desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | - GUC_STAGE_DESC_ATTR_KERNEL; - if (is_high_priority(client)) - desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT; - desc->stage_id = client->stage_id; - desc->priority = client->priority; - desc->db_id = client->doorbell_id; - - /* - * The doorbell, process descriptor, and workqueue are all parts - * of the client object, which the GuC will reference via the GGTT - */ - gfx_addr = intel_guc_ggtt_offset(guc, client->vma); - desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + - client->doorbell_offset; - desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client)); - desc->db_trigger_uk = gfx_addr + client->doorbell_offset; - desc->process_desc = gfx_addr + client->proc_desc_offset; - desc->wq_addr = gfx_addr + GUC_DB_SIZE; - desc->wq_size = GUC_WQ_SIZE; - - desc->desc_private = ptr_to_u64(client); -} - -static void guc_stage_desc_fini(struct intel_guc_client *client) -{ - struct guc_stage_desc *desc; - - desc = __get_stage_desc(client); - memset(desc, 0, sizeof(*desc)); -} - -/* Construct a Work Item and append it to the GuC's Work Queue */ -static void guc_wq_item_append(struct intel_guc_client *client, - u32 target_engine, u32 context_desc, - u32 ring_tail, u32 fence_id) -{ - /* wqi_len is in DWords, and does not include the one-word header */ - const size_t wqi_size = sizeof(struct guc_wq_item); - const u32 wqi_len = wqi_size / sizeof(u32) - 1; - struct guc_process_desc *desc = __get_process_desc(client); - struct guc_wq_item *wqi; - u32 wq_off; - - lockdep_assert_held(&client->wq_lock); - - /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we - * should not have the case where structure wqi is across page, neither - * wrapped to the beginning. This simplifies the implementation below. - * - * XXX: if not the case, we need save data to a temp wqi and copy it to - * workqueue buffer dw by dw. - */ - BUILD_BUG_ON(wqi_size != 16); - - /* We expect the WQ to be active if we're appending items to it */ - GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); - - /* Free space is guaranteed. */ - wq_off = READ_ONCE(desc->tail); - GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), - GUC_WQ_SIZE) < wqi_size); - GEM_BUG_ON(wq_off & (wqi_size - 1)); - - /* WQ starts from the page after doorbell / process_desc */ - wqi = client->vaddr + wq_off + GUC_DB_SIZE; - - if (I915_SELFTEST_ONLY(client->use_nop_wqi)) { - wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT); - } else { - /* Now fill in the 4-word work queue item */ - wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (target_engine << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; - wqi->context_desc = context_desc; - wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; - GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); - wqi->fence_id = fence_id; - } - - /* Make the update visible to GuC */ - WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); -} - -static void guc_ring_doorbell(struct intel_guc_client *client) -{ - struct guc_doorbell_info *db; - u32 cookie; - - lockdep_assert_held(&client->wq_lock); - - /* pointer of current doorbell cacheline */ - db = __get_doorbell(client); - - /* - * We're not expecting the doorbell cookie to change behind our back, - * we also need to treat 0 as a reserved value. - */ - cookie = READ_ONCE(db->cookie); - WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie); - - /* XXX: doorbell was lost and need to acquire it again */ - GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); -} - -static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) -{ - struct intel_guc_client *client = guc->execbuf_client; - struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); - u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); - - guc_wq_item_append(client, engine->guc_id, ctx_desc, - ring_tail, rq->fence.seqno); - guc_ring_doorbell(client); - - client->submissions[engine->id] += 1; -} - -/* - * When we're doing submissions using regular execlists backend, writing to - * ELSP from CPU side is enough to make sure that writes to ringbuffer pages - * pinned in mappable aperture portion of GGTT are visible to command streamer. - * Writes done by GuC on our behalf are not guaranteeing such ordering, - * therefore, to ensure the flush, we're issuing a POSTING READ. - */ -static void flush_ggtt_writes(struct i915_vma *vma) -{ - struct drm_i915_private *i915 = vma->vm->i915; - - if (i915_vma_is_map_and_fenceable(vma)) - intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); -} - -static void guc_submit(struct intel_engine_cs *engine, - struct i915_request **out, - struct i915_request **end) -{ - struct intel_guc *guc = &engine->i915->guc; - struct intel_guc_client *client = guc->execbuf_client; - - spin_lock(&client->wq_lock); - - do { - struct i915_request *rq = *out++; - - flush_ggtt_writes(rq->ring->vma); - guc_add_request(guc, rq); - } while (out != end); - - spin_unlock(&client->wq_lock); -} - -static inline int rq_prio(const struct i915_request *rq) -{ - return rq->sched.attr.priority | __NO_PREEMPTION; -} - -static struct i915_request *schedule_in(struct i915_request *rq, int idx) -{ - trace_i915_request_in(rq, idx); - - if (!rq->hw_context->inflight) - rq->hw_context->inflight = rq->engine; - intel_context_inflight_inc(rq->hw_context); - - return i915_request_get(rq); -} - -static void schedule_out(struct i915_request *rq) -{ - trace_i915_request_out(rq); - - intel_context_inflight_dec(rq->hw_context); - if (!intel_context_inflight_count(rq->hw_context)) - rq->hw_context->inflight = NULL; - - i915_request_put(rq); -} - -static void __guc_dequeue(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request **first = execlists->inflight; - struct i915_request ** const last_port = first + execlists->port_mask; - struct i915_request *last = first[0]; - struct i915_request **port; - bool submit = false; - struct rb_node *rb; - - lockdep_assert_held(&engine->active.lock); - - if (last) { - if (*++first) - return; - - last = NULL; - } - - port = first; - while ((rb = rb_first_cached(&execlists->queue))) { - struct i915_priolist *p = to_priolist(rb); - struct i915_request *rq, *rn; - int i; - - priolist_for_each_request_consume(rq, rn, p, i) { - if (last && rq->hw_context != last->hw_context) { - if (port == last_port) - goto done; - - *port = schedule_in(last, - port - execlists->inflight); - port++; - } - - list_del_init(&rq->sched.link); - __i915_request_submit(rq); - submit = true; - last = rq; - } - - rb_erase_cached(&p->node, &execlists->queue); - i915_priolist_free(p); - } -done: - execlists->queue_priority_hint = - rb ? to_priolist(rb)->priority : INT_MIN; - if (submit) { - *port = schedule_in(last, port - execlists->inflight); - *++port = NULL; - guc_submit(engine, first, port); - } - execlists->active = execlists->inflight; -} - -static void guc_submission_tasklet(unsigned long data) -{ - struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request **port, *rq; - unsigned long flags; - - spin_lock_irqsave(&engine->active.lock, flags); - - for (port = execlists->inflight; (rq = *port); port++) { - if (!i915_request_completed(rq)) - break; - - schedule_out(rq); - } - if (port != execlists->inflight) { - int idx = port - execlists->inflight; - int rem = ARRAY_SIZE(execlists->inflight) - idx; - memmove(execlists->inflight, port, rem * sizeof(*port)); - } - - __guc_dequeue(engine); - - spin_unlock_irqrestore(&engine->active.lock, flags); -} - -static void guc_reset_prepare(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - - GEM_TRACE("%s\n", engine->name); - - /* - * Prevent request submission to the hardware until we have - * completed the reset in i915_gem_reset_finish(). If a request - * is completed by one engine, it may then queue a request - * to a second via its execlists->tasklet *just* as we are - * calling engine->init_hw() and also writing the ELSP. - * Turning off the execlists->tasklet until the reset is over - * prevents the race. - */ - __tasklet_disable_sync_once(&execlists->tasklet); -} - -static void guc_reset(struct intel_engine_cs *engine, bool stalled) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request *rq; - unsigned long flags; - - spin_lock_irqsave(&engine->active.lock, flags); - - execlists_cancel_port_requests(execlists); - - /* Push back any incomplete requests for replay after the reset. */ - rq = execlists_unwind_incomplete_requests(execlists); - if (!rq) - goto out_unlock; - - if (!i915_request_started(rq)) - stalled = false; - - __i915_request_reset(rq, stalled); - intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); - -out_unlock: - spin_unlock_irqrestore(&engine->active.lock, flags); -} - -static void guc_cancel_requests(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request *rq, *rn; - struct rb_node *rb; - unsigned long flags; - - GEM_TRACE("%s\n", engine->name); - - /* - * Before we call engine->cancel_requests(), we should have exclusive - * access to the submission state. This is arranged for us by the - * caller disabling the interrupt generation, the tasklet and other - * threads that may then access the same state, giving us a free hand - * to reset state. However, we still need to let lockdep be aware that - * we know this state may be accessed in hardirq context, so we - * disable the irq around this manipulation and we want to keep - * the spinlock focused on its duties and not accidentally conflate - * coverage to the submission's irq state. (Similarly, although we - * shouldn't need to disable irq around the manipulation of the - * submission's irq state, we also wish to remind ourselves that - * it is irq state.) - */ - spin_lock_irqsave(&engine->active.lock, flags); - - /* Cancel the requests on the HW and clear the ELSP tracker. */ - execlists_cancel_port_requests(execlists); - - /* Mark all executing requests as skipped. */ - list_for_each_entry(rq, &engine->active.requests, sched.link) { - if (!i915_request_signaled(rq)) - dma_fence_set_error(&rq->fence, -EIO); - - i915_request_mark_complete(rq); - } - - /* Flush the queued requests to the timeline list (for retiring). */ - while ((rb = rb_first_cached(&execlists->queue))) { - struct i915_priolist *p = to_priolist(rb); - int i; - - priolist_for_each_request_consume(rq, rn, p, i) { - list_del_init(&rq->sched.link); - __i915_request_submit(rq); - dma_fence_set_error(&rq->fence, -EIO); - i915_request_mark_complete(rq); - } - - rb_erase_cached(&p->node, &execlists->queue); - i915_priolist_free(p); - } - - /* Remaining _unready_ requests will be nop'ed when submitted */ - - execlists->queue_priority_hint = INT_MIN; - execlists->queue = RB_ROOT_CACHED; - - spin_unlock_irqrestore(&engine->active.lock, flags); -} - -static void guc_reset_finish(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - - if (__tasklet_enable(&execlists->tasklet)) - /* And kick in case we missed a new request submission. */ - tasklet_hi_schedule(&execlists->tasklet); - - GEM_TRACE("%s: depth->%d\n", engine->name, - atomic_read(&execlists->tasklet.count)); -} - -/* - * Everything below here is concerned with setup & teardown, and is - * therefore not part of the somewhat time-critical batch-submission - * path of guc_submit() above. - */ - -/* Check that a doorbell register is in the expected state */ -static bool doorbell_ok(struct intel_guc *guc, u16 db_id) -{ - bool valid; - - GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); - - valid = __doorbell_valid(guc, db_id); - - if (test_bit(db_id, guc->doorbell_bitmap) == valid) - return true; - - DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n", - db_id, yesno(valid)); - - return false; -} - -static bool guc_verify_doorbells(struct intel_guc *guc) -{ - bool doorbells_ok = true; - u16 db_id; - - for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) - if (!doorbell_ok(guc, db_id)) - doorbells_ok = false; - - return doorbells_ok; -} - -/** - * guc_client_alloc() - Allocate an intel_guc_client - * @guc: the intel_guc structure - * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW - * The kernel client to replace ExecList submission is created with - * NORMAL priority. Priority of a client for scheduler can be HIGH, - * while a preemption context can use CRITICAL. - * - * Return: An intel_guc_client object if success, else NULL. - */ -static struct intel_guc_client * -guc_client_alloc(struct intel_guc *guc, u32 priority) -{ - struct intel_guc_client *client; - struct i915_vma *vma; - void *vaddr; - int ret; - - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) - return ERR_PTR(-ENOMEM); - - client->guc = guc; - client->priority = priority; - client->doorbell_id = GUC_DOORBELL_INVALID; - spin_lock_init(&client->wq_lock); - - ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, - GFP_KERNEL); - if (ret < 0) - goto err_client; - - client->stage_id = ret; - - /* The first page is doorbell/proc_desc. Two followed pages are wq. */ - vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_id; - } - - /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ - client->vma = vma; - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_vma; - } - client->vaddr = vaddr; - - ret = reserve_doorbell(client); - if (ret) - goto err_vaddr; - - client->doorbell_offset = __select_cacheline(guc); - - /* - * Since the doorbell only requires a single cacheline, we can save - * space by putting the application process descriptor in the same - * page. Use the half of the page that doesn't include the doorbell. - */ - if (client->doorbell_offset >= (GUC_DB_SIZE / 2)) - client->proc_desc_offset = 0; - else - client->proc_desc_offset = (GUC_DB_SIZE / 2); - - DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n", - priority, client, client->stage_id); - DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", - client->doorbell_id, client->doorbell_offset); - - return client; - -err_vaddr: - i915_gem_object_unpin_map(client->vma->obj); -err_vma: - i915_vma_unpin_and_release(&client->vma, 0); -err_id: - ida_simple_remove(&guc->stage_ids, client->stage_id); -err_client: - kfree(client); - return ERR_PTR(ret); -} - -static void guc_client_free(struct intel_guc_client *client) -{ - unreserve_doorbell(client); - i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); - ida_simple_remove(&client->guc->stage_ids, client->stage_id); - kfree(client); -} - -static inline bool ctx_save_restore_disabled(struct intel_context *ce) -{ - u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1]; - -#define SR_DISABLED \ - _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \ - CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) - - return (sr & SR_DISABLED) == SR_DISABLED; - -#undef SR_DISABLED -} - -static int guc_clients_create(struct intel_guc *guc) -{ - struct intel_guc_client *client; - - GEM_BUG_ON(guc->execbuf_client); - - client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL); - if (IS_ERR(client)) { - DRM_ERROR("Failed to create GuC client for submission!\n"); - return PTR_ERR(client); - } - guc->execbuf_client = client; - - return 0; -} - -static void guc_clients_destroy(struct intel_guc *guc) -{ - struct intel_guc_client *client; - - client = fetch_and_zero(&guc->execbuf_client); - if (client) - guc_client_free(client); -} - -static int __guc_client_enable(struct intel_guc_client *client) -{ - int ret; - - guc_proc_desc_init(client); - guc_stage_desc_init(client); - - ret = create_doorbell(client); - if (ret) - goto fail; - - return 0; - -fail: - guc_stage_desc_fini(client); - guc_proc_desc_fini(client); - return ret; -} - -static void __guc_client_disable(struct intel_guc_client *client) -{ - /* - * By the time we're here, GuC may have already been reset. if that is - * the case, instead of trying (in vain) to communicate with it, let's - * just cleanup the doorbell HW and our internal state. - */ - if (intel_guc_is_loaded(client->guc)) - destroy_doorbell(client); - else - __fini_doorbell(client); - - guc_stage_desc_fini(client); - guc_proc_desc_fini(client); -} - -static int guc_clients_enable(struct intel_guc *guc) -{ - return __guc_client_enable(guc->execbuf_client); -} - -static void guc_clients_disable(struct intel_guc *guc) -{ - if (guc->execbuf_client) - __guc_client_disable(guc->execbuf_client); -} - -/* - * Set up the memory resources to be shared with the GuC (via the GGTT) - * at firmware loading time. - */ -int intel_guc_submission_init(struct intel_guc *guc) -{ - int ret; - - if (guc->stage_desc_pool) - return 0; - - ret = guc_stage_desc_pool_create(guc); - if (ret) - return ret; - /* - * Keep static analysers happy, let them know that we allocated the - * vma after testing that it didn't exist earlier. - */ - GEM_BUG_ON(!guc->stage_desc_pool); - - WARN_ON(!guc_verify_doorbells(guc)); - ret = guc_clients_create(guc); - if (ret) - goto err_pool; - - return 0; - -err_pool: - guc_stage_desc_pool_destroy(guc); - return ret; -} - -void intel_guc_submission_fini(struct intel_guc *guc) -{ - guc_clients_destroy(guc); - WARN_ON(!guc_verify_doorbells(guc)); - - if (guc->stage_desc_pool) - guc_stage_desc_pool_destroy(guc); -} - -static void guc_interrupts_capture(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int irqs; - - /* tell all command streamers to forward interrupts (but not vblank) - * to GuC - */ - irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, id) - ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); - - /* route USER_INTERRUPT to Host, all others are sent to GuC. */ - irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - /* These three registers have the same bit definitions */ - I915_WRITE(GUC_BCS_RCS_IER, ~irqs); - I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); - I915_WRITE(GUC_WD_VECS_IER, ~irqs); - - /* - * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all - * (unmasked) PM interrupts to the GuC. All other bits of this - * register *disable* generation of a specific interrupt. - * - * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when - * writing to the PM interrupt mask register, i.e. interrupts - * that must not be disabled. - * - * If the GuC is handling these interrupts, then we must not let - * the PM code disable ANY interrupt that the GuC is expecting. - * So for each ENABLED (0) bit in this register, we must SET the - * bit in pm_intrmsk_mbz so that it's left enabled for the GuC. - * GuC needs ARAT expired interrupt unmasked hence it is set in - * pm_intrmsk_mbz. - * - * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will - * result in the register bit being left SET! - */ - rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; - rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; -} - -static void guc_interrupts_release(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int irqs; - - /* - * tell all command streamers NOT to forward interrupts or vblank - * to GuC. - */ - irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); - irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, id) - ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); - - /* route all GT interrupts to the host */ - I915_WRITE(GUC_BCS_RCS_IER, 0); - I915_WRITE(GUC_VCS2_VCS1_IER, 0); - I915_WRITE(GUC_WD_VECS_IER, 0); - - rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; -} - -static void guc_submission_park(struct intel_engine_cs *engine) -{ - intel_engine_park(engine); - intel_engine_unpin_breadcrumbs_irq(engine); - engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; -} - -static void guc_submission_unpark(struct intel_engine_cs *engine) -{ - engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; - intel_engine_pin_breadcrumbs_irq(engine); -} - -static void guc_set_default_submission(struct intel_engine_cs *engine) -{ - /* - * We inherit a bunch of functions from execlists that we'd like - * to keep using: - * - * engine->submit_request = execlists_submit_request; - * engine->cancel_requests = execlists_cancel_requests; - * engine->schedule = execlists_schedule; - * - * But we need to override the actual submission backend in order - * to talk to the GuC. - */ - intel_execlists_set_default_submission(engine); - - engine->execlists.tasklet.func = guc_submission_tasklet; - - engine->park = guc_submission_park; - engine->unpark = guc_submission_unpark; - - engine->reset.prepare = guc_reset_prepare; - engine->reset.reset = guc_reset; - engine->reset.finish = guc_reset_finish; - - engine->cancel_requests = guc_cancel_requests; - - engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; -} - -int intel_guc_submission_enable(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err; - - /* - * We're using GuC work items for submitting work through GuC. Since - * we're coalescing multiple requests from a single context into a - * single work item prior to assigning it to execlist_port, we can - * never have more work items than the total number of ports (for all - * engines). The GuC firmware is controlling the HEAD of work queue, - * and it is guaranteed that it will remove the work item from the - * queue before our request is completed. - */ - BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * - sizeof(struct guc_wq_item) * - I915_NUM_ENGINES > GUC_WQ_SIZE); - - GEM_BUG_ON(!guc->execbuf_client); - - err = guc_clients_enable(guc); - if (err) - return err; - - /* Take over from manual control of ELSP (execlists) */ - guc_interrupts_capture(dev_priv); - - for_each_engine(engine, dev_priv, id) { - engine->set_default_submission = guc_set_default_submission; - engine->set_default_submission(engine); - } - - return 0; -} - -void intel_guc_submission_disable(struct intel_guc *guc) -{ - struct drm_i915_private *dev_priv = guc_to_i915(guc); - - GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ - - guc_interrupts_release(dev_priv); - guc_clients_disable(guc); -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftests/intel_guc.c" -#endif diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h deleted file mode 100644 index 87a38cb6faf3..000000000000 --- a/drivers/gpu/drm/i915/intel_guc_submission.h +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef _INTEL_GUC_SUBMISSION_H_ -#define _INTEL_GUC_SUBMISSION_H_ - -#include - -#include "gt/intel_engine_types.h" - -#include "i915_gem.h" -#include "i915_selftest.h" - -struct drm_i915_private; - -/* - * This structure primarily describes the GEM object shared with the GuC. - * The specs sometimes refer to this object as a "GuC context", but we use - * the term "client" to avoid confusion with hardware contexts. This - * GEM object is held for the entire lifetime of our interaction with - * the GuC, being allocated before the GuC is loaded with its firmware. - * Because there's no way to update the address used by the GuC after - * initialisation, the shared object must stay pinned into the GGTT as - * long as the GuC is in use. We also keep the first page (only) mapped - * into kernel address space, as it includes shared data that must be - * updated on every request submission. - * - * The single GEM object described here is actually made up of several - * separate areas, as far as the GuC is concerned. The first page (kept - * kmap'd) includes the "process descriptor" which holds sequence data for - * the doorbell, and one cacheline which actually *is* the doorbell; a - * write to this will "ring the doorbell" (i.e. send an interrupt to the - * GuC). The subsequent pages of the client object constitute the work - * queue (a circular array of work items), again described in the process - * descriptor. Work queue pages are mapped momentarily as required. - */ -struct intel_guc_client { - struct i915_vma *vma; - void *vaddr; - struct intel_guc *guc; - - /* bitmap of (host) engine ids */ - u32 priority; - u32 stage_id; - u32 proc_desc_offset; - - u16 doorbell_id; - unsigned long doorbell_offset; - - /* Protects GuC client's WQ access */ - spinlock_t wq_lock; - /* Per-engine counts of GuC submissions */ - u64 submissions[I915_NUM_ENGINES]; - - /* For testing purposes, use nop WQ items instead of real ones */ - I915_SELFTEST_DECLARE(bool use_nop_wqi); -}; - -int intel_guc_submission_init(struct intel_guc *guc); -int intel_guc_submission_enable(struct intel_guc *guc); -void intel_guc_submission_disable(struct intel_guc *guc); -void intel_guc_submission_fini(struct intel_guc *guc); -int intel_guc_preempt_work_create(struct intel_guc *guc); -void intel_guc_preempt_work_destroy(struct intel_guc *guc); - -#endif diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c deleted file mode 100644 index 2a41ee89a16d..000000000000 --- a/drivers/gpu/drm/i915/intel_huc.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright © 2016-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include - -#include "intel_huc.h" -#include "i915_drv.h" - -void intel_huc_init_early(struct intel_huc *huc) -{ - struct drm_i915_private *i915 = huc_to_i915(huc); - - intel_huc_fw_init_early(huc); - - if (INTEL_GEN(i915) >= 11) { - huc->status.reg = GEN11_HUC_KERNEL_LOAD_INFO; - huc->status.mask = HUC_LOAD_SUCCESSFUL; - huc->status.value = HUC_LOAD_SUCCESSFUL; - } else { - huc->status.reg = HUC_STATUS2; - huc->status.mask = HUC_FW_VERIFIED; - huc->status.value = HUC_FW_VERIFIED; - } -} - -static int intel_huc_rsa_data_create(struct intel_huc *huc) -{ - struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->guc; - struct i915_vma *vma; - void *vaddr; - - /* - * HuC firmware will sit above GUC_GGTT_TOP and will not map - * through GTT. Unfortunately, this means GuC cannot perform - * the HuC auth. as the rsa offset now falls within the GuC - * inaccessible range. We resort to perma-pinning an additional - * vma within the accessible range that only contains the rsa - * signature. The GuC can use this extra pinning to perform - * the authentication since its GGTT offset will be GuC - * accessible. - */ - vma = intel_guc_allocate_vma(guc, PAGE_SIZE); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma, 0); - return PTR_ERR(vaddr); - } - - huc->rsa_data = vma; - huc->rsa_data_vaddr = vaddr; - - return 0; -} - -static void intel_huc_rsa_data_destroy(struct intel_huc *huc) -{ - i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP); -} - -int intel_huc_init(struct intel_huc *huc) -{ - int err; - - err = intel_huc_rsa_data_create(huc); - if (err) - return err; - - return intel_uc_fw_init(&huc->fw); -} - -void intel_huc_fini(struct intel_huc *huc) -{ - intel_uc_fw_fini(&huc->fw); - intel_huc_rsa_data_destroy(huc); -} - -/** - * intel_huc_auth() - Authenticate HuC uCode - * @huc: intel_huc structure - * - * Called after HuC and GuC firmware loading during intel_uc_init_hw(). - * - * This function pins HuC firmware image object into GGTT. - * Then it invokes GuC action to authenticate passing the offset to RSA - * signature through intel_guc_auth_huc(). It then waits for 50ms for - * firmware verification ACK and unpins the object. - */ -int intel_huc_auth(struct intel_huc *huc) -{ - struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->guc; - int ret; - - if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; - - ret = intel_guc_auth_huc(guc, - intel_guc_ggtt_offset(guc, huc->rsa_data)); - if (ret) { - DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); - goto fail; - } - - /* Check authentication status, it should be done by now */ - ret = __intel_wait_for_register(&i915->uncore, - huc->status.reg, - huc->status.mask, - huc->status.value, - 2, 50, NULL); - if (ret) { - DRM_ERROR("HuC: Firmware not verified %d\n", ret); - goto fail; - } - - return 0; - -fail: - huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL; - - DRM_ERROR("HuC: Authentication failed %d\n", ret); - return ret; -} - -/** - * intel_huc_check_status() - check HuC status - * @huc: intel_huc structure - * - * This function reads status register to verify if HuC - * firmware was successfully loaded. - * - * Returns: 1 if HuC firmware is loaded and verified, - * 0 if HuC firmware is not loaded and -ENODEV if HuC - * is not present on this platform. - */ -int intel_huc_check_status(struct intel_huc *huc) -{ - struct drm_i915_private *dev_priv = huc_to_i915(huc); - intel_wakeref_t wakeref; - bool status = false; - - if (!HAS_HUC(dev_priv)) - return -ENODEV; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - status = (I915_READ(huc->status.reg) & huc->status.mask) == - huc->status.value; - - return status; -} diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h deleted file mode 100644 index 9fa3d4629f2e..000000000000 --- a/drivers/gpu/drm/i915/intel_huc.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef _INTEL_HUC_H_ -#define _INTEL_HUC_H_ - -#include "i915_reg.h" -#include "intel_uc_fw.h" -#include "intel_huc_fw.h" - -struct intel_huc { - /* Generic uC firmware management */ - struct intel_uc_fw fw; - - /* HuC-specific additions */ - struct i915_vma *rsa_data; - void *rsa_data_vaddr; - - struct { - i915_reg_t reg; - u32 mask; - u32 value; - } status; -}; - -void intel_huc_init_early(struct intel_huc *huc); -int intel_huc_init(struct intel_huc *huc); -void intel_huc_fini(struct intel_huc *huc); -int intel_huc_auth(struct intel_huc *huc); -int intel_huc_check_status(struct intel_huc *huc); - -static inline int intel_huc_sanitize(struct intel_huc *huc) -{ - intel_uc_fw_sanitize(&huc->fw); - return 0; -} - -#endif diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c deleted file mode 100644 index 06e726ba9863..000000000000 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ /dev/null @@ -1,219 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2014-2018 Intel Corporation - */ - -#include "intel_huc_fw.h" -#include "i915_drv.h" - -/** - * DOC: HuC Firmware - * - * Motivation: - * GEN9 introduces a new dedicated firmware for usage in media HEVC (High - * Efficiency Video Coding) operations. Userspace can use the firmware - * capabilities by adding HuC specific commands to batch buffers. - * - * Implementation: - * The same firmware loader is used as the GuC. However, the actual - * loading to HW is deferred until GEM initialization is done. - * - * Note that HuC firmware loading must be done before GuC loading. - */ - -#define BXT_HUC_FW_MAJOR 01 -#define BXT_HUC_FW_MINOR 8 -#define BXT_BLD_NUM 2893 - -#define SKL_HUC_FW_MAJOR 01 -#define SKL_HUC_FW_MINOR 07 -#define SKL_BLD_NUM 1398 - -#define KBL_HUC_FW_MAJOR 02 -#define KBL_HUC_FW_MINOR 00 -#define KBL_BLD_NUM 1810 - -#define GLK_HUC_FW_MAJOR 03 -#define GLK_HUC_FW_MINOR 01 -#define GLK_BLD_NUM 2893 - -#define ICL_HUC_FW_MAJOR 8 -#define ICL_HUC_FW_MINOR 4 -#define ICL_BLD_NUM 3238 - -#define HUC_FW_PATH(platform, major, minor, bld_num) \ - "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ - __stringify(minor) "_" __stringify(bld_num) ".bin" - -#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \ - SKL_HUC_FW_MINOR, SKL_BLD_NUM) -MODULE_FIRMWARE(I915_SKL_HUC_UCODE); - -#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \ - BXT_HUC_FW_MINOR, BXT_BLD_NUM) -MODULE_FIRMWARE(I915_BXT_HUC_UCODE); - -#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \ - KBL_HUC_FW_MINOR, KBL_BLD_NUM) -MODULE_FIRMWARE(I915_KBL_HUC_UCODE); - -#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ - GLK_HUC_FW_MINOR, GLK_BLD_NUM) -MODULE_FIRMWARE(I915_GLK_HUC_UCODE); - -#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \ - ICL_HUC_FW_MINOR, ICL_BLD_NUM) -MODULE_FIRMWARE(I915_ICL_HUC_UCODE); - -static void huc_fw_select(struct intel_uc_fw *huc_fw) -{ - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - struct drm_i915_private *dev_priv = huc_to_i915(huc); - - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - - if (!HAS_HUC(dev_priv)) { - huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - return; - } - - huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; - - if (i915_modparams.huc_firmware_path) { - huc_fw->path = i915_modparams.huc_firmware_path; - huc_fw->major_ver_wanted = 0; - huc_fw->minor_ver_wanted = 0; - } else if (IS_SKYLAKE(dev_priv)) { - huc_fw->path = I915_SKL_HUC_UCODE; - huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR; - } else if (IS_BROXTON(dev_priv)) { - huc_fw->path = I915_BXT_HUC_UCODE; - huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR; - } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - huc_fw->path = I915_KBL_HUC_UCODE; - huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - huc_fw->path = I915_GLK_HUC_UCODE; - huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR; - } else if (IS_ICELAKE(dev_priv)) { - huc_fw->path = I915_ICL_HUC_UCODE; - huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR; - } -} - -/** - * intel_huc_fw_init_early() - initializes HuC firmware struct - * @huc: intel_huc struct - * - * On platforms with HuC selects firmware for uploading - */ -void intel_huc_fw_init_early(struct intel_huc *huc) -{ - struct intel_uc_fw *huc_fw = &huc->fw; - - intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC); - huc_fw_select(huc_fw); -} - -static void huc_xfer_rsa(struct intel_huc *huc) -{ - struct intel_uc_fw *fw = &huc->fw; - struct sg_table *pages = fw->obj->mm.pages; - - /* - * HuC firmware image is outside GuC accessible range. - * Copy the RSA signature out of the image into - * the perma-pinned region set aside for it - */ - sg_pcopy_to_buffer(pages->sgl, pages->nents, - huc->rsa_data_vaddr, fw->rsa_size, - fw->rsa_offset); -} - -static int huc_xfer_ucode(struct intel_huc *huc) -{ - struct intel_uc_fw *huc_fw = &huc->fw; - struct drm_i915_private *dev_priv = huc_to_i915(huc); - struct intel_uncore *uncore = &dev_priv->uncore; - unsigned long offset = 0; - u32 size; - int ret; - - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - /* Set the source address for the uCode */ - offset = intel_uc_fw_ggtt_offset(huc_fw) + - huc_fw->header_offset; - intel_uncore_write(uncore, DMA_ADDR_0_LOW, - lower_32_bits(offset)); - intel_uncore_write(uncore, DMA_ADDR_0_HIGH, - upper_32_bits(offset) & 0xFFFF); - - /* - * Hardware doesn't look at destination address for HuC. Set it to 0, - * but still program the correct address space. - */ - intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0); - intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); - - size = huc_fw->header_size + huc_fw->ucode_size; - intel_uncore_write(uncore, DMA_COPY_SIZE, size); - - /* Start the DMA */ - intel_uncore_write(uncore, DMA_CTRL, - _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); - - /* Wait for DMA to finish */ - ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); - - DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); - - /* Disable the bits once DMA is over */ - intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); - - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - - return ret; -} - -/** - * huc_fw_xfer() - DMA's the firmware - * @huc_fw: the firmware descriptor - * - * Transfer the firmware image to RAM for execution by the microcontroller. - * - * Return: 0 on success, non-zero on failure - */ -static int huc_fw_xfer(struct intel_uc_fw *huc_fw) -{ - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - - huc_xfer_rsa(huc); - - return huc_xfer_ucode(huc); -} - -/** - * intel_huc_fw_upload() - load HuC uCode to device - * @huc: intel_huc structure - * - * Called from intel_uc_init_hw() during driver load, resume from sleep and - * after a GPU reset. Note that HuC must be loaded before GuC. - * - * The firmware image should have already been fetched into memory, so only - * check that fetch succeeded, and then transfer the image to the h/w. - * - * Return: non-zero code on error - */ -int intel_huc_fw_upload(struct intel_huc *huc) -{ - return intel_uc_fw_upload(&huc->fw, huc_fw_xfer); -} diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/intel_huc_fw.h deleted file mode 100644 index 8a00a0ebddc5..000000000000 --- a/drivers/gpu/drm/i915/intel_huc_fw.h +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2014-2018 Intel Corporation - */ - -#ifndef _INTEL_HUC_FW_H_ -#define _INTEL_HUC_FW_H_ - -struct intel_huc; - -void intel_huc_fw_init_early(struct intel_huc *huc); -int intel_huc_fw_upload(struct intel_huc *huc); - -#endif diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c deleted file mode 100644 index 4ea7661705b1..000000000000 --- a/drivers/gpu/drm/i915/intel_uc.c +++ /dev/null @@ -1,635 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "gt/intel_reset.h" -#include "intel_uc.h" -#include "intel_guc.h" -#include "intel_guc_ads.h" -#include "intel_guc_submission.h" -#include "i915_drv.h" - -static void guc_free_load_err_log(struct intel_guc *guc); - -/* Reset GuC providing us with fresh state for both GuC and HuC. - */ -static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) -{ - int ret; - u32 guc_status; - - ret = intel_reset_guc(&dev_priv->gt); - if (ret) { - DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); - return ret; - } - - guc_status = I915_READ(GUC_STATUS); - WARN(!(guc_status & GS_MIA_IN_RESET), - "GuC status: 0x%x, MIA core expected to be in reset\n", - guc_status); - - return ret; -} - -static int __get_platform_enable_guc(struct drm_i915_private *i915) -{ - struct intel_uc_fw *guc_fw = &i915->guc.fw; - struct intel_uc_fw *huc_fw = &i915->huc.fw; - int enable_guc = 0; - - if (!HAS_GUC(i915)) - return 0; - - /* We don't want to enable GuC/HuC on pre-Gen11 by default */ - if (INTEL_GEN(i915) < 11) - return 0; - - if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw)) - enable_guc |= ENABLE_GUC_LOAD_HUC; - - return enable_guc; -} - -static int __get_default_guc_log_level(struct drm_i915_private *i915) -{ - int guc_log_level; - - if (!intel_uc_fw_supported(&i915->guc.fw) || - !intel_uc_is_using_guc(i915)) - guc_log_level = GUC_LOG_LEVEL_DISABLED; - else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || - IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - guc_log_level = GUC_LOG_LEVEL_MAX; - else - guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE; - - /* Any platform specific fine-tuning can be done here */ - - return guc_log_level; -} - -/** - * sanitize_options_early - sanitize uC related modparam options - * @i915: device private - * - * In case of "enable_guc" option this function will attempt to modify - * it only if it was initially set to "auto(-1)". Default value for this - * modparam varies between platforms and it is hardcoded in driver code. - * Any other modparam value is only monitored against availability of the - * related hardware or firmware definitions. - * - * In case of "guc_log_level" option this function will attempt to modify - * it only if it was initially set to "auto(-1)" or if initial value was - * "enable(1..4)" on platforms without the GuC. Default value for this - * modparam varies between platforms and is usually set to "disable(0)" - * unless GuC is enabled on given platform and the driver is compiled with - * debug config when this modparam will default to "enable(1..4)". - */ -static void sanitize_options_early(struct drm_i915_private *i915) -{ - struct intel_uc_fw *guc_fw = &i915->guc.fw; - struct intel_uc_fw *huc_fw = &i915->huc.fw; - - /* A negative value means "use platform default" */ - if (i915_modparams.enable_guc < 0) - i915_modparams.enable_guc = __get_platform_enable_guc(i915); - - DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", - i915_modparams.enable_guc, - yesno(intel_uc_is_using_guc_submission(i915)), - yesno(intel_uc_is_using_huc(i915))); - - /* Verify GuC firmware availability */ - if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - !intel_uc_fw_supported(guc_fw) ? - "no GuC hardware" : "no GuC firmware"); - } - - /* Verify HuC firmware availability */ - if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - !intel_uc_fw_supported(huc_fw) ? - "no HuC hardware" : "no HuC firmware"); - } - - /* XXX: GuC submission is unavailable for now */ - if (intel_uc_is_using_guc_submission(i915)) { - DRM_INFO("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - "GuC submission not supported"); - DRM_INFO("Switching to non-GuC submission mode!\n"); - i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; - } - - /* A negative value means "use platform/config default" */ - if (i915_modparams.guc_log_level < 0) - i915_modparams.guc_log_level = - __get_default_guc_log_level(i915); - - if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "guc_log_level", i915_modparams.guc_log_level, - !intel_uc_fw_supported(guc_fw) ? - "no GuC hardware" : "GuC not enabled"); - i915_modparams.guc_log_level = 0; - } - - if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "guc_log_level", i915_modparams.guc_log_level, - "verbosity too high"); - i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX; - } - - DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n", - i915_modparams.guc_log_level, - yesno(i915_modparams.guc_log_level), - yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)), - GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level)); - - /* Make sure that sanitization was done */ - GEM_BUG_ON(i915_modparams.enable_guc < 0); - GEM_BUG_ON(i915_modparams.guc_log_level < 0); -} - -void intel_uc_init_early(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - - intel_guc_init_early(guc); - intel_huc_init_early(huc); - - sanitize_options_early(i915); -} - -void intel_uc_cleanup_early(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - guc_free_load_err_log(guc); -} - -/** - * intel_uc_init_mmio - setup uC MMIO access - * @i915: device private - * - * Setup minimal state necessary for MMIO accesses later in the - * initialization sequence. - */ -void intel_uc_init_mmio(struct drm_i915_private *i915) -{ - intel_guc_init_send_regs(&i915->guc); -} - -static void guc_capture_load_err_log(struct intel_guc *guc) -{ - if (!guc->log.vma || !intel_guc_log_get_level(&guc->log)) - return; - - if (!guc->load_err_log) - guc->load_err_log = i915_gem_object_get(guc->log.vma->obj); - - return; -} - -static void guc_free_load_err_log(struct intel_guc *guc) -{ - if (guc->load_err_log) - i915_gem_object_put(guc->load_err_log); -} - -/* - * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 - * register using the same bits used in the CT message payload. Since our - * communication channel with guc is turned off at this point, we can save the - * message and handle it after we turn it back on. - */ -static void guc_clear_mmio_msg(struct intel_guc *guc) -{ - intel_uncore_write(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15), 0); -} - -static void guc_get_mmio_msg(struct intel_guc *guc) -{ - u32 val; - - spin_lock_irq(&guc->irq_lock); - - val = intel_uncore_read(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15)); - guc->mmio_msg |= val & guc->msg_enabled_mask; - - /* - * clear all events, including the ones we're not currently servicing, - * to make sure we don't try to process a stale message if we enable - * handling of more events later. - */ - guc_clear_mmio_msg(guc); - - spin_unlock_irq(&guc->irq_lock); -} - -static void guc_handle_mmio_msg(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - - /* we need communication to be enabled to reply to GuC */ - GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop); - - if (!guc->mmio_msg) - return; - - spin_lock_irq(&i915->irq_lock); - intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); - spin_unlock_irq(&i915->irq_lock); - - guc->mmio_msg = 0; -} - -static void guc_reset_interrupts(struct intel_guc *guc) -{ - guc->interrupts.reset(guc); -} - -static void guc_enable_interrupts(struct intel_guc *guc) -{ - guc->interrupts.enable(guc); -} - -static void guc_disable_interrupts(struct intel_guc *guc) -{ - guc->interrupts.disable(guc); -} - -static int guc_enable_communication(struct intel_guc *guc) -{ - struct drm_i915_private *i915 = guc_to_i915(guc); - int ret; - - ret = intel_guc_ct_enable(&guc->ct); - if (ret) - return ret; - - guc->send = intel_guc_send_ct; - guc->handler = intel_guc_to_host_event_handler_ct; - - /* check for mmio messages received before/during the CT enable */ - guc_get_mmio_msg(guc); - guc_handle_mmio_msg(guc); - - guc_enable_interrupts(guc); - - /* check for CT messages received before we enabled interrupts */ - spin_lock_irq(&i915->irq_lock); - intel_guc_to_host_event_handler_ct(guc); - spin_unlock_irq(&i915->irq_lock); - - DRM_INFO("GuC communication enabled\n"); - - return 0; -} - -static void guc_stop_communication(struct intel_guc *guc) -{ - intel_guc_ct_stop(&guc->ct); - - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; - - guc_clear_mmio_msg(guc); -} - -static void guc_disable_communication(struct intel_guc *guc) -{ - /* - * Events generated during or after CT disable are logged by guc in - * via mmio. Make sure the register is clear before disabling CT since - * all events we cared about have already been processed via CT. - */ - guc_clear_mmio_msg(guc); - - guc_disable_interrupts(guc); - - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; - - intel_guc_ct_disable(&guc->ct); - - /* - * Check for messages received during/after the CT disable. We do not - * expect any messages to have arrived via CT between the interrupt - * disable and the CT disable because GuC should've been idle until we - * triggered the CT disable protocol. - */ - guc_get_mmio_msg(guc); - - DRM_INFO("GuC communication disabled\n"); -} - -void intel_uc_fetch_firmwares(struct drm_i915_private *i915) -{ - if (!USES_GUC(i915)) - return; - - intel_uc_fw_fetch(i915, &i915->guc.fw); - - if (USES_HUC(i915)) - intel_uc_fw_fetch(i915, &i915->huc.fw); -} - -void intel_uc_cleanup_firmwares(struct drm_i915_private *i915) -{ - if (!USES_GUC(i915)) - return; - - if (USES_HUC(i915)) - intel_uc_fw_cleanup_fetch(&i915->huc.fw); - - intel_uc_fw_cleanup_fetch(&i915->guc.fw); -} - -int intel_uc_init(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - int ret; - - if (!USES_GUC(i915)) - return 0; - - if (!intel_uc_fw_supported(&guc->fw)) - return -ENODEV; - - /* XXX: GuC submission is unavailable for now */ - GEM_BUG_ON(USES_GUC_SUBMISSION(i915)); - - ret = intel_guc_init(guc); - if (ret) - return ret; - - if (USES_HUC(i915)) { - ret = intel_huc_init(huc); - if (ret) - goto err_guc; - } - - if (USES_GUC_SUBMISSION(i915)) { - /* - * This is stuff we need to have available at fw load time - * if we are planning to enable submission later - */ - ret = intel_guc_submission_init(guc); - if (ret) - goto err_huc; - } - - return 0; - -err_huc: - if (USES_HUC(i915)) - intel_huc_fini(huc); -err_guc: - intel_guc_fini(guc); - return ret; -} - -void intel_uc_fini(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - if (!USES_GUC(i915)) - return; - - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - - if (USES_GUC_SUBMISSION(i915)) - intel_guc_submission_fini(guc); - - if (USES_HUC(i915)) - intel_huc_fini(&i915->huc); - - intel_guc_fini(guc); -} - -static void __uc_sanitize(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - - intel_huc_sanitize(huc); - intel_guc_sanitize(guc); - - __intel_uc_reset_hw(i915); -} - -void intel_uc_sanitize(struct drm_i915_private *i915) -{ - if (!USES_GUC(i915)) - return; - - __uc_sanitize(i915); -} - -int intel_uc_init_hw(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; - int ret, attempts; - - if (!USES_GUC(i915)) - return 0; - - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - - guc_reset_interrupts(guc); - - /* WaEnableuKernelHeaderValidFix:skl */ - /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ - if (IS_GEN(i915, 9)) - attempts = 3; - else - attempts = 1; - - while (attempts--) { - /* - * Always reset the GuC just before (re)loading, so - * that the state and timing are fairly predictable - */ - ret = __intel_uc_reset_hw(i915); - if (ret) - goto err_out; - - if (USES_HUC(i915)) { - ret = intel_huc_fw_upload(huc); - if (ret) - goto err_out; - } - - intel_guc_ads_reset(guc); - intel_guc_init_params(guc); - ret = intel_guc_fw_upload(guc); - if (ret == 0) - break; - - DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " - "retry %d more time(s)\n", ret, attempts); - } - - /* Did we succeded or run out of retries? */ - if (ret) - goto err_log_capture; - - ret = guc_enable_communication(guc); - if (ret) - goto err_log_capture; - - if (USES_HUC(i915)) { - ret = intel_huc_auth(huc); - if (ret) - goto err_communication; - } - - ret = intel_guc_sample_forcewake(guc); - if (ret) - goto err_communication; - - if (USES_GUC_SUBMISSION(i915)) { - ret = intel_guc_submission_enable(guc); - if (ret) - goto err_communication; - } - - dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", - guc->fw.major_ver_found, guc->fw.minor_ver_found); - dev_info(i915->drm.dev, "GuC submission %s\n", - enableddisabled(USES_GUC_SUBMISSION(i915))); - dev_info(i915->drm.dev, "HuC %s\n", - enableddisabled(USES_HUC(i915))); - - return 0; - - /* - * We've failed to load the firmware :( - */ -err_communication: - guc_disable_communication(guc); -err_log_capture: - guc_capture_load_err_log(guc); -err_out: - __uc_sanitize(i915); - - /* - * Note that there is no fallback as either user explicitly asked for - * the GuC or driver default option was to run with the GuC enabled. - */ - if (GEM_WARN_ON(ret == -EIO)) - ret = -EINVAL; - - dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret); - return ret; -} - -void intel_uc_fini_hw(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - if (!intel_guc_is_loaded(guc)) - return; - - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - - if (USES_GUC_SUBMISSION(i915)) - intel_guc_submission_disable(guc); - - guc_disable_communication(guc); - __uc_sanitize(i915); -} - -/** - * intel_uc_reset_prepare - Prepare for reset - * @i915: device private - * - * Preparing for full gpu reset. - */ -void intel_uc_reset_prepare(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - - if (!intel_guc_is_loaded(guc)) - return; - - guc_stop_communication(guc); - __uc_sanitize(i915); -} - -void intel_uc_runtime_suspend(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - int err; - - if (!intel_guc_is_loaded(guc)) - return; - - err = intel_guc_suspend(guc); - if (err) - DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err); - - guc_disable_communication(guc); -} - -void intel_uc_suspend(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - intel_wakeref_t wakeref; - - if (!intel_guc_is_loaded(guc)) - return; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - intel_uc_runtime_suspend(i915); -} - -int intel_uc_resume(struct drm_i915_private *i915) -{ - struct intel_guc *guc = &i915->guc; - int err; - - if (!intel_guc_is_loaded(guc)) - return 0; - - guc_enable_communication(guc); - - err = intel_guc_resume(guc); - if (err) { - DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err); - return err; - } - - return 0; -} diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h deleted file mode 100644 index 5a1383e192dd..000000000000 --- a/drivers/gpu/drm/i915/intel_uc.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ -#ifndef _INTEL_UC_H_ -#define _INTEL_UC_H_ - -#include "intel_guc.h" -#include "intel_huc.h" -#include "i915_params.h" - -void intel_uc_init_early(struct drm_i915_private *dev_priv); -void intel_uc_cleanup_early(struct drm_i915_private *dev_priv); -void intel_uc_init_mmio(struct drm_i915_private *dev_priv); -void intel_uc_fetch_firmwares(struct drm_i915_private *dev_priv); -void intel_uc_cleanup_firmwares(struct drm_i915_private *dev_priv); -void intel_uc_sanitize(struct drm_i915_private *dev_priv); -int intel_uc_init_hw(struct drm_i915_private *dev_priv); -void intel_uc_fini_hw(struct drm_i915_private *dev_priv); -int intel_uc_init(struct drm_i915_private *dev_priv); -void intel_uc_fini(struct drm_i915_private *dev_priv); -void intel_uc_reset_prepare(struct drm_i915_private *i915); -void intel_uc_suspend(struct drm_i915_private *i915); -void intel_uc_runtime_suspend(struct drm_i915_private *i915); -int intel_uc_resume(struct drm_i915_private *dev_priv); - -static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915) -{ - GEM_BUG_ON(i915_modparams.enable_guc < 0); - return i915_modparams.enable_guc > 0; -} - -static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915) -{ - GEM_BUG_ON(i915_modparams.enable_guc < 0); - return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; -} - -static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915) -{ - GEM_BUG_ON(i915_modparams.enable_guc < 0); - return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; -} - -#endif diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c deleted file mode 100644 index 8ce7210907c0..000000000000 --- a/drivers/gpu/drm/i915/intel_uc_fw.c +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright © 2016-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include -#include -#include - -#include "intel_uc_fw.h" -#include "i915_drv.h" - -/** - * intel_uc_fw_fetch - fetch uC firmware - * - * @dev_priv: device private - * @uc_fw: uC firmware - * - * Fetch uC firmware into GEM obj. - */ -void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, - struct intel_uc_fw *uc_fw) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - struct drm_i915_gem_object *obj; - const struct firmware *fw = NULL; - struct uc_css_header *css; - size_t size; - int err; - - GEM_BUG_ON(!intel_uc_fw_supported(uc_fw)); - - if (!uc_fw->path) { - dev_info(dev_priv->drm.dev, - "%s: No firmware was defined for %s!\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_platform_name(INTEL_INFO(dev_priv)->platform)); - return; - } - - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - - uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); - - err = request_firmware(&fw, uc_fw->path, &pdev->dev); - if (err) { - DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); - goto fail; - } - - DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n", - intel_uc_fw_type_repr(uc_fw->type), fw->size, fw); - - /* Check the size of the blob before examining buffer contents */ - if (fw->size < sizeof(struct uc_css_header)) { - DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n", - intel_uc_fw_type_repr(uc_fw->type), - fw->size, sizeof(struct uc_css_header)); - err = -ENODATA; - goto fail; - } - - css = (struct uc_css_header *)fw->data; - - /* Firmware bits always start from header */ - uc_fw->header_offset = 0; - uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw - - css->key_size_dw - css->exponent_size_dw) * - sizeof(u32); - - if (uc_fw->header_size != sizeof(struct uc_css_header)) { - DRM_WARN("%s: Mismatched firmware header definition\n", - intel_uc_fw_type_repr(uc_fw->type)); - err = -ENOEXEC; - goto fail; - } - - /* then, uCode */ - uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size; - uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); - - /* now RSA */ - if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) { - DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n", - intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw); - err = -ENOEXEC; - goto fail; - } - uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size; - uc_fw->rsa_size = css->key_size_dw * sizeof(u32); - - /* At least, it should have header, uCode and RSA. Size of all three. */ - size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size; - if (fw->size < size) { - DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n", - intel_uc_fw_type_repr(uc_fw->type), fw->size, size); - err = -ENOEXEC; - goto fail; - } - - /* Get version numbers from the CSS header */ - switch (uc_fw->type) { - case INTEL_UC_FW_TYPE_GUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR, - css->sw_version); - break; - - case INTEL_UC_FW_TYPE_HUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR, - css->sw_version); - break; - - default: - MISSING_CASE(uc_fw->type); - break; - } - - DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n", - intel_uc_fw_type_repr(uc_fw->type), - uc_fw->major_ver_found, uc_fw->minor_ver_found, - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); - - if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) { - DRM_NOTE("%s: Skipping firmware version check\n", - intel_uc_fw_type_repr(uc_fw->type)); - } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || - uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { - DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n", - intel_uc_fw_type_repr(uc_fw->type), - uc_fw->major_ver_found, uc_fw->minor_ver_found, - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted); - err = -ENOEXEC; - goto fail; - } - - obj = i915_gem_object_create_shmem_from_data(dev_priv, - fw->data, fw->size); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - DRM_DEBUG_DRIVER("%s fw object_create err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); - goto fail; - } - - uc_fw->obj = obj; - uc_fw->size = fw->size; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); - - release_firmware(fw); - return; - -fail: - uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); - - DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - DRM_INFO("%s: Firmware can be downloaded from %s\n", - intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL); - - release_firmware(fw); /* OK even if fw is NULL */ -} - -static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - struct i915_vma dummy = { - .node.start = intel_uc_fw_ggtt_offset(uc_fw), - .node.size = obj->base.size, - .pages = obj->mm.pages, - .vm = &ggtt->vm, - }; - - GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); - GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size); - - /* uc_fw->obj cache domains were not controlled across suspend */ - drm_clflush_sg(dummy.pages); - - ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); -} - -static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - u64 start = intel_uc_fw_ggtt_offset(uc_fw); - - ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); -} - -/** - * intel_uc_fw_upload - load uC firmware using custom loader - * @uc_fw: uC firmware - * @xfer: custom uC firmware loader function - * - * Loads uC firmware using custom loader and updates internal flags. - * - * Return: 0 on success, non-zero on failure. - */ -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, - int (*xfer)(struct intel_uc_fw *uc_fw)) -{ - int err; - - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; - - uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); - - /* Call custom loader */ - intel_uc_fw_ggtt_bind(uc_fw); - err = xfer(uc_fw); - intel_uc_fw_ggtt_unbind(uc_fw); - if (err) - goto fail; - - uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); - - DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n", - intel_uc_fw_type_repr(uc_fw->type), - uc_fw->path, - uc_fw->major_ver_found, uc_fw->minor_ver_found); - - return 0; - -fail: - uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); - - DRM_WARN("%s: Failed to load firmware %s (error %d)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - - return err; -} - -int intel_uc_fw_init(struct intel_uc_fw *uc_fw) -{ - int err; - - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; - - err = i915_gem_object_pin_pages(uc_fw->obj); - if (err) - DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); - - return err; -} - -void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) -{ - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return; - - i915_gem_object_unpin_pages(uc_fw->obj); -} - -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev); - struct i915_ggtt *ggtt = &i915->ggtt; - struct drm_mm_node *node = &ggtt->uc_fw; - - GEM_BUG_ON(!node->allocated); - GEM_BUG_ON(upper_32_bits(node->start)); - GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); - - return lower_32_bits(node->start); -} - -/** - * intel_uc_fw_cleanup_fetch - cleanup uC firmware - * - * @uc_fw: uC firmware - * - * Cleans up uC firmware by releasing the firmware GEM obj. - */ -void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) -{ - struct drm_i915_gem_object *obj; - - obj = fetch_and_zero(&uc_fw->obj); - if (obj) - i915_gem_object_put(obj); - - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; -} - -/** - * intel_uc_fw_dump - dump information about uC firmware - * @uc_fw: uC firmware - * @p: the &drm_printer - * - * Pretty printer for uC firmware. - */ -void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) -{ - drm_printf(p, "%s firmware: %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - drm_printf(p, "\tstatus: fetch %s, load %s\n", - intel_uc_fw_status_repr(uc_fw->fetch_status), - intel_uc_fw_status_repr(uc_fw->load_status)); - drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", - uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, - uc_fw->major_ver_found, uc_fw->minor_ver_found); - drm_printf(p, "\theader: offset %u, size %u\n", - uc_fw->header_offset, uc_fw->header_size); - drm_printf(p, "\tuCode: offset %u, size %u\n", - uc_fw->ucode_offset, uc_fw->ucode_size); - drm_printf(p, "\tRSA: offset %u, size %u\n", - uc_fw->rsa_offset, uc_fw->rsa_size); -} diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h deleted file mode 100644 index 833d04d06576..000000000000 --- a/drivers/gpu/drm/i915/intel_uc_fw.h +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright © 2014-2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#ifndef _INTEL_UC_FW_H_ -#define _INTEL_UC_FW_H_ - -#include -#include "i915_gem.h" - -struct drm_printer; -struct drm_i915_private; - -/* Home of GuC, HuC and DMC firmwares */ -#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" - -enum intel_uc_fw_status { - INTEL_UC_FIRMWARE_NOT_SUPPORTED = -2, /* no uc HW */ - INTEL_UC_FIRMWARE_FAIL = -1, - INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */ - INTEL_UC_FIRMWARE_NOT_STARTED = 1, - INTEL_UC_FIRMWARE_PENDING, - INTEL_UC_FIRMWARE_SUCCESS -}; - -enum intel_uc_fw_type { - INTEL_UC_FW_TYPE_GUC, - INTEL_UC_FW_TYPE_HUC -}; - -/* - * This structure encapsulates all the data needed during the process - * of fetching, caching, and loading the firmware image into the uC. - */ -struct intel_uc_fw { - const char *path; - size_t size; - struct drm_i915_gem_object *obj; - enum intel_uc_fw_status fetch_status; - enum intel_uc_fw_status load_status; - - /* - * The firmware build process will generate a version header file with major and - * minor version defined. The versions are built into CSS header of firmware. - * i915 kernel driver set the minimal firmware version required per platform. - */ - u16 major_ver_wanted; - u16 minor_ver_wanted; - u16 major_ver_found; - u16 minor_ver_found; - - enum intel_uc_fw_type type; - u32 header_size; - u32 header_offset; - u32 rsa_size; - u32 rsa_offset; - u32 ucode_size; - u32 ucode_offset; -}; - -static inline -const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) -{ - switch (status) { - case INTEL_UC_FIRMWARE_NOT_SUPPORTED: - return "N/A - uc HW not available"; - case INTEL_UC_FIRMWARE_FAIL: - return "FAIL"; - case INTEL_UC_FIRMWARE_UNINITIALIZED: - return "UNINITIALIZED"; - case INTEL_UC_FIRMWARE_NOT_STARTED: - return "NOT_STARTED"; - case INTEL_UC_FIRMWARE_PENDING: - return "PENDING"; - case INTEL_UC_FIRMWARE_SUCCESS: - return "SUCCESS"; - } - return ""; -} - -static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type) -{ - switch (type) { - case INTEL_UC_FW_TYPE_GUC: - return "GuC"; - case INTEL_UC_FW_TYPE_HUC: - return "HuC"; - } - return "uC"; -} - -static inline -void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, - enum intel_uc_fw_type type) -{ - /* - * we use FIRMWARE_UNINITIALIZED to detect checks against fetch_status - * before we're looked at the HW caps to see if we have uc support - */ - BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); - - uc_fw->path = NULL; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_UNINITIALIZED; - uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_STARTED; - uc_fw->type = type; -} - -static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw) -{ - return uc_fw->path != NULL; -} - -static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) -{ - return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; -} - -static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw) -{ - /* shouldn't call this before checking hw/blob availability */ - GEM_BUG_ON(uc_fw->fetch_status == INTEL_UC_FIRMWARE_UNINITIALIZED); - return uc_fw->fetch_status != INTEL_UC_FIRMWARE_NOT_SUPPORTED; -} - -static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) -{ - if (intel_uc_fw_is_loaded(uc_fw)) - uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; -} - -/** - * intel_uc_fw_get_upload_size() - Get size of firmware needed to be uploaded. - * @uc_fw: uC firmware. - * - * Get the size of the firmware and header that will be uploaded to WOPCM. - * - * Return: Upload firmware size, or zero on firmware fetch failure. - */ -static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) -{ - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return 0; - - return uc_fw->header_size + uc_fw->ucode_size; -} - -void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, - struct intel_uc_fw *uc_fw); -void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, - int (*xfer)(struct intel_uc_fw *uc_fw)); -int intel_uc_fw_init(struct intel_uc_fw *uc_fw); -void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw); -void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); - -#endif diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c deleted file mode 100644 index 6ca76f5a98d4..000000000000 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ /dev/null @@ -1,336 +0,0 @@ -/* - * Copyright © 2017 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "i915_selftest.h" -#include "gem/i915_gem_pm.h" - -/* max doorbell number + negative test for each client type */ -#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM) - -static struct intel_guc_client *clients[ATTEMPTS]; - -static bool available_dbs(struct intel_guc *guc, u32 priority) -{ - unsigned long offset; - unsigned long end; - u16 id; - - /* first half is used for normal priority, second half for high */ - offset = 0; - end = GUC_NUM_DOORBELLS / 2; - if (priority <= GUC_CLIENT_PRIORITY_HIGH) { - offset = end; - end += offset; - } - - id = find_next_zero_bit(guc->doorbell_bitmap, end, offset); - if (id < end) - return true; - - return false; -} - -static int check_all_doorbells(struct intel_guc *guc) -{ - u16 db_id; - - pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS); - for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) { - if (!doorbell_ok(guc, db_id)) { - pr_err("doorbell %d, not ok\n", db_id); - return -EIO; - } - } - - return 0; -} - -static int ring_doorbell_nop(struct intel_guc_client *client) -{ - struct guc_process_desc *desc = __get_process_desc(client); - int err; - - client->use_nop_wqi = true; - - spin_lock_irq(&client->wq_lock); - - guc_wq_item_append(client, 0, 0, 0, 0); - guc_ring_doorbell(client); - - spin_unlock_irq(&client->wq_lock); - - client->use_nop_wqi = false; - - /* if there are no issues GuC will update the WQ head and keep the - * WQ in active status - */ - err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10); - if (err) { - pr_err("doorbell %u ring failed!\n", client->doorbell_id); - return -EIO; - } - - if (desc->wq_status != WQ_STATUS_ACTIVE) { - pr_err("doorbell %u ring put WQ in bad state (%u)!\n", - client->doorbell_id, desc->wq_status); - return -EIO; - } - - return 0; -} - -/* - * Basic client sanity check, handy to validate create_clients. - */ -static int validate_client(struct intel_guc_client *client, int client_priority) -{ - if (client->priority != client_priority || - client->doorbell_id == GUC_DOORBELL_INVALID) - return -EINVAL; - else - return 0; -} - -static bool client_doorbell_in_sync(struct intel_guc_client *client) -{ - return !client || doorbell_ok(client->guc, client->doorbell_id); -} - -/* - * Check that we're able to synchronize guc_clients with their doorbells - * - * We're creating clients and reserving doorbells once, at module load. During - * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to - * GuC being reset. In other words - GuC clients are still around, but the - * status of their doorbells may be incorrect. This is the reason behind - * validating that the doorbells status expected by the driver matches what the - * GuC/HW have. - */ -static int igt_guc_clients(void *args) -{ - struct drm_i915_private *dev_priv = args; - intel_wakeref_t wakeref; - struct intel_guc *guc; - int err = 0; - - GEM_BUG_ON(!HAS_GUC(dev_priv)); - mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - guc = &dev_priv->guc; - if (!guc) { - pr_err("No guc object!\n"); - err = -EINVAL; - goto unlock; - } - - err = check_all_doorbells(guc); - if (err) - goto unlock; - - /* - * Get rid of clients created during driver load because the test will - * recreate them. - */ - guc_clients_disable(guc); - guc_clients_destroy(guc); - if (guc->execbuf_client) { - pr_err("guc_clients_destroy lied!\n"); - err = -EINVAL; - goto unlock; - } - - err = guc_clients_create(guc); - if (err) { - pr_err("Failed to create clients\n"); - goto unlock; - } - GEM_BUG_ON(!guc->execbuf_client); - - err = validate_client(guc->execbuf_client, - GUC_CLIENT_PRIORITY_KMD_NORMAL); - if (err) { - pr_err("execbug client validation failed\n"); - goto out; - } - - /* the client should now have reserved a doorbell */ - if (!has_doorbell(guc->execbuf_client)) { - pr_err("guc_clients_create didn't reserve doorbells\n"); - err = -EINVAL; - goto out; - } - - /* Now enable the clients */ - guc_clients_enable(guc); - - /* each client should now have received a doorbell */ - if (!client_doorbell_in_sync(guc->execbuf_client)) { - pr_err("failed to initialize the doorbells\n"); - err = -EINVAL; - goto out; - } - - /* - * Basic test - an attempt to reallocate a valid doorbell to the - * client it is currently assigned should not cause a failure. - */ - err = create_doorbell(guc->execbuf_client); - -out: - /* - * Leave clean state for other test, plus the driver always destroy the - * clients during unload. - */ - guc_clients_disable(guc); - guc_clients_destroy(guc); - guc_clients_create(guc); - guc_clients_enable(guc); -unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); - return err; -} - -/* - * Create as many clients as number of doorbells. Note that there's already - * client(s)/doorbell(s) created during driver load, but this test creates - * its own and do not interact with the existing ones. - */ -static int igt_guc_doorbells(void *arg) -{ - struct drm_i915_private *dev_priv = arg; - intel_wakeref_t wakeref; - struct intel_guc *guc; - int i, err = 0; - u16 db_id; - - GEM_BUG_ON(!HAS_GUC(dev_priv)); - mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - guc = &dev_priv->guc; - if (!guc) { - pr_err("No guc object!\n"); - err = -EINVAL; - goto unlock; - } - - err = check_all_doorbells(guc); - if (err) - goto unlock; - - for (i = 0; i < ATTEMPTS; i++) { - clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM); - - if (!clients[i]) { - pr_err("[%d] No guc client\n", i); - err = -EINVAL; - goto out; - } - - if (IS_ERR(clients[i])) { - if (PTR_ERR(clients[i]) != -ENOSPC) { - pr_err("[%d] unexpected error\n", i); - err = PTR_ERR(clients[i]); - goto out; - } - - if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) { - pr_err("[%d] non-db related alloc fail\n", i); - err = -EINVAL; - goto out; - } - - /* expected, ran out of dbs for this client type */ - continue; - } - - /* - * The check below is only valid because we keep a doorbell - * assigned during the whole life of the client. - */ - if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) { - pr_err("[%d] more clients than doorbells (%d >= %d)\n", - i, clients[i]->stage_id, GUC_NUM_DOORBELLS); - err = -EINVAL; - goto out; - } - - err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM); - if (err) { - pr_err("[%d] client_alloc sanity check failed!\n", i); - err = -EINVAL; - goto out; - } - - db_id = clients[i]->doorbell_id; - - err = __guc_client_enable(clients[i]); - if (err) { - pr_err("[%d] Failed to create a doorbell\n", i); - goto out; - } - - /* doorbell id shouldn't change, we are holding the mutex */ - if (db_id != clients[i]->doorbell_id) { - pr_err("[%d] doorbell id changed (%d != %d)\n", - i, db_id, clients[i]->doorbell_id); - err = -EINVAL; - goto out; - } - - err = check_all_doorbells(guc); - if (err) - goto out; - - err = ring_doorbell_nop(clients[i]); - if (err) - goto out; - } - -out: - for (i = 0; i < ATTEMPTS; i++) - if (!IS_ERR_OR_NULL(clients[i])) { - __guc_client_disable(clients[i]); - guc_client_free(clients[i]); - } -unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); - return err; -} - -int intel_guc_live_selftest(struct drm_i915_private *dev_priv) -{ - static const struct i915_subtest tests[] = { - SUBTEST(igt_guc_clients), - SUBTEST(igt_guc_doorbells), - }; - - if (!USES_GUC_SUBMISSION(dev_priv)) - return 0; - - return i915_subtests(tests, dev_priv); -} -- cgit v1.2.3 From 8b5689d7e3ca889a7e55c79bc335b33e3f170a18 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:12 +0100 Subject: drm/i915/uc: move GuC/HuC inside intel_gt under a new intel_uc Being part of the GT HW, it make sense to keep the guc/huc structures inside the GT structure. To help with the encapsulation work done by the following patches, both structures are placed inside a new intel_uc container. Although this results in code with ugly nested dereferences (i915->gt.uc.guc...), it saves us the extra work required in moving the structures twice (i915 -> gt -> uc). The following patches will reduce the number of places where we try to access the guc/huc structures directly from i915 and reduce the ugliness. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-7-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/gt/intel_gt_types.h | 4 ++ drivers/gpu/drm/i915/gt/intel_reset.c | 6 +-- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 4 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 52 +++++++++++------------ drivers/gpu/drm/i915/gt/uc/intel_uc.h | 5 +++ drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 26 ++++++------ drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 7 +-- drivers/gpu/drm/i915/i915_gpu_error.c | 11 ++--- drivers/gpu/drm/i915/i915_irq.c | 6 +-- drivers/gpu/drm/i915/intel_wopcm.c | 4 +- 13 files changed, 69 insertions(+), 64 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 1ffbc3ec6ef3..d3b90c6ee8cf 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -13,6 +13,8 @@ #include #include +#include "uc/intel_uc.h" + #include "i915_vma.h" #include "intel_reset_types.h" #include "intel_wakeref.h" @@ -34,6 +36,8 @@ struct intel_gt { struct intel_uncore *uncore; struct i915_ggtt *ggtt; + struct intel_uc uc; + struct intel_gt_timelines { struct mutex mutex; /* protects list */ struct list_head active_list; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8490d4a991ad..bdd34a1d739a 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1099,14 +1099,14 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) "Resetting %s for %s\n", engine->name, msg); atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]); - if (!engine->i915->guc.execbuf_client) + if (!engine->gt->uc.guc.execbuf_client) ret = intel_gt_reset_engine(engine); else - ret = intel_guc_reset_engine(&engine->i915->guc, engine); + ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n", - engine->i915->guc.execbuf_client ? "GuC " : "", + engine->gt->uc.guc.execbuf_client ? "GuC " : "", engine->name, ret); goto out; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index e1e4e683ce21..fa22d377ae48 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -509,7 +509,7 @@ static void guc_submit(struct intel_engine_cs *engine, struct i915_request **out, struct i915_request **end) { - struct intel_guc *guc = &engine->i915->guc; + struct intel_guc *guc = &engine->gt->uc.guc; struct intel_guc_client *client = guc->execbuf_client; spin_lock(&client->wq_lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 2a41ee89a16d..581c9c3d4fc0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -47,7 +47,7 @@ void intel_huc_init_early(struct intel_huc *huc) static int intel_huc_rsa_data_create(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; struct i915_vma *vma; void *vaddr; @@ -113,7 +113,7 @@ void intel_huc_fini(struct intel_huc *huc) int intel_huc_auth(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; int ret; if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 4ea7661705b1..f47ee7601a66 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -54,8 +54,8 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) static int __get_platform_enable_guc(struct drm_i915_private *i915) { - struct intel_uc_fw *guc_fw = &i915->guc.fw; - struct intel_uc_fw *huc_fw = &i915->huc.fw; + struct intel_uc_fw *guc_fw = &i915->gt.uc.guc.fw; + struct intel_uc_fw *huc_fw = &i915->gt.uc.huc.fw; int enable_guc = 0; if (!HAS_GUC(i915)) @@ -75,7 +75,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915) { int guc_log_level; - if (!intel_uc_fw_supported(&i915->guc.fw) || + if (!intel_uc_fw_supported(&i915->gt.uc.guc.fw) || !intel_uc_is_using_guc(i915)) guc_log_level = GUC_LOG_LEVEL_DISABLED; else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || @@ -108,8 +108,8 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915) */ static void sanitize_options_early(struct drm_i915_private *i915) { - struct intel_uc_fw *guc_fw = &i915->guc.fw; - struct intel_uc_fw *huc_fw = &i915->huc.fw; + struct intel_uc_fw *guc_fw = &i915->gt.uc.guc.fw; + struct intel_uc_fw *huc_fw = &i915->gt.uc.huc.fw; /* A negative value means "use platform default" */ if (i915_modparams.enable_guc < 0) @@ -178,8 +178,8 @@ static void sanitize_options_early(struct drm_i915_private *i915) void intel_uc_init_early(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; + struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_huc *huc = &i915->gt.uc.huc; intel_guc_init_early(guc); intel_huc_init_early(huc); @@ -189,7 +189,7 @@ void intel_uc_init_early(struct drm_i915_private *i915) void intel_uc_cleanup_early(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; guc_free_load_err_log(guc); } @@ -203,7 +203,7 @@ void intel_uc_cleanup_early(struct drm_i915_private *i915) */ void intel_uc_init_mmio(struct drm_i915_private *i915) { - intel_guc_init_send_regs(&i915->guc); + intel_guc_init_send_regs(&i915->gt.uc.guc); } static void guc_capture_load_err_log(struct intel_guc *guc) @@ -355,10 +355,10 @@ void intel_uc_fetch_firmwares(struct drm_i915_private *i915) if (!USES_GUC(i915)) return; - intel_uc_fw_fetch(i915, &i915->guc.fw); + intel_uc_fw_fetch(i915, &i915->gt.uc.guc.fw); if (USES_HUC(i915)) - intel_uc_fw_fetch(i915, &i915->huc.fw); + intel_uc_fw_fetch(i915, &i915->gt.uc.huc.fw); } void intel_uc_cleanup_firmwares(struct drm_i915_private *i915) @@ -367,15 +367,15 @@ void intel_uc_cleanup_firmwares(struct drm_i915_private *i915) return; if (USES_HUC(i915)) - intel_uc_fw_cleanup_fetch(&i915->huc.fw); + intel_uc_fw_cleanup_fetch(&i915->gt.uc.huc.fw); - intel_uc_fw_cleanup_fetch(&i915->guc.fw); + intel_uc_fw_cleanup_fetch(&i915->gt.uc.guc.fw); } int intel_uc_init(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; + struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_huc *huc = &i915->gt.uc.huc; int ret; if (!USES_GUC(i915)) @@ -419,7 +419,7 @@ err_guc: void intel_uc_fini(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; if (!USES_GUC(i915)) return; @@ -430,15 +430,15 @@ void intel_uc_fini(struct drm_i915_private *i915) intel_guc_submission_fini(guc); if (USES_HUC(i915)) - intel_huc_fini(&i915->huc); + intel_huc_fini(&i915->gt.uc.huc); intel_guc_fini(guc); } static void __uc_sanitize(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; + struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_huc *huc = &i915->gt.uc.huc; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); @@ -458,8 +458,8 @@ void intel_uc_sanitize(struct drm_i915_private *i915) int intel_uc_init_hw(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; - struct intel_huc *huc = &i915->huc; + struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_huc *huc = &i915->gt.uc.huc; int ret, attempts; if (!USES_GUC(i915)) @@ -557,7 +557,7 @@ err_out: void intel_uc_fini_hw(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; if (!intel_guc_is_loaded(guc)) return; @@ -579,7 +579,7 @@ void intel_uc_fini_hw(struct drm_i915_private *i915) */ void intel_uc_reset_prepare(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; if (!intel_guc_is_loaded(guc)) return; @@ -590,7 +590,7 @@ void intel_uc_reset_prepare(struct drm_i915_private *i915) void intel_uc_runtime_suspend(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; int err; if (!intel_guc_is_loaded(guc)) @@ -605,7 +605,7 @@ void intel_uc_runtime_suspend(struct drm_i915_private *i915) void intel_uc_suspend(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; intel_wakeref_t wakeref; if (!intel_guc_is_loaded(guc)) @@ -617,7 +617,7 @@ void intel_uc_suspend(struct drm_i915_private *i915) int intel_uc_resume(struct drm_i915_private *i915) { - struct intel_guc *guc = &i915->guc; + struct intel_guc *guc = &i915->gt.uc.guc; int err; if (!intel_guc_is_loaded(guc)) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 5a1383e192dd..a2fdc84afff2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -28,6 +28,11 @@ #include "intel_huc.h" #include "i915_params.h" +struct intel_uc { + struct intel_guc guc; + struct intel_huc huc; +}; + void intel_uc_init_early(struct drm_i915_private *dev_priv); void intel_uc_cleanup_early(struct drm_i915_private *dev_priv); void intel_uc_init_mmio(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index 6ca76f5a98d4..93f7c930ab18 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -138,7 +138,7 @@ static int igt_guc_clients(void *args) mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - guc = &dev_priv->guc; + guc = &dev_priv->gt.uc.guc; if (!guc) { pr_err("No guc object!\n"); err = -EINVAL; @@ -230,7 +230,7 @@ static int igt_guc_doorbells(void *arg) mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - guc = &dev_priv->guc; + guc = &dev_priv->gt.uc.guc; if (!guc) { pr_err("No guc object!\n"); err = -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 63024c0d0cd9..0ac9b8d5e8b9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1869,7 +1869,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) return -ENODEV; p = drm_seq_file_printer(m); - intel_uc_fw_dump(&dev_priv->huc.fw, &p); + intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p); with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); @@ -1887,7 +1887,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) return -ENODEV; p = drm_seq_file_printer(m); - intel_uc_fw_dump(&dev_priv->guc.fw, &p); + intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p); with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { u32 tmp = I915_READ(GUC_STATUS); @@ -1930,7 +1930,7 @@ stringify_guc_log_type(enum guc_log_buffer_type type) static void i915_guc_log_info(struct seq_file *m, struct drm_i915_private *dev_priv) { - struct intel_guc_log *log = &dev_priv->guc.log; + struct intel_guc_log *log = &dev_priv->gt.uc.guc.log; enum guc_log_buffer_type type; if (!intel_guc_log_relay_enabled(log)) { @@ -1976,7 +1976,7 @@ static void i915_guc_client_info(struct seq_file *m, static int i915_guc_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_guc *guc = &dev_priv->guc; + const struct intel_guc *guc = &dev_priv->gt.uc.guc; if (!USES_GUC(dev_priv)) return -ENODEV; @@ -2003,7 +2003,7 @@ static int i915_guc_info(struct seq_file *m, void *data) static int i915_guc_stage_pool(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_guc *guc = &dev_priv->guc; + const struct intel_guc *guc = &dev_priv->gt.uc.guc; struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; intel_engine_mask_t tmp; int index; @@ -2066,9 +2066,9 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) return -ENODEV; if (dump_load_err) - obj = dev_priv->guc.load_err_log; - else if (dev_priv->guc.log.vma) - obj = dev_priv->guc.log.vma->obj; + obj = dev_priv->gt.uc.guc.load_err_log; + else if (dev_priv->gt.uc.guc.log.vma) + obj = dev_priv->gt.uc.guc.log.vma->obj; if (!obj) return 0; @@ -2099,7 +2099,7 @@ static int i915_guc_log_level_get(void *data, u64 *val) if (!USES_GUC(dev_priv)) return -ENODEV; - *val = intel_guc_log_get_level(&dev_priv->guc.log); + *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log); return 0; } @@ -2111,7 +2111,7 @@ static int i915_guc_log_level_set(void *data, u64 val) if (!USES_GUC(dev_priv)) return -ENODEV; - return intel_guc_log_set_level(&dev_priv->guc.log, val); + return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val); } DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops, @@ -2125,9 +2125,9 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file) if (!USES_GUC(dev_priv)) return -ENODEV; - file->private_data = &dev_priv->guc.log; + file->private_data = &dev_priv->gt.uc.guc.log; - return intel_guc_log_relay_open(&dev_priv->guc.log); + return intel_guc_log_relay_open(&dev_priv->gt.uc.guc.log); } static ssize_t @@ -2147,7 +2147,7 @@ static int i915_guc_log_relay_release(struct inode *inode, struct file *file) { struct drm_i915_private *dev_priv = inode->i_private; - intel_guc_log_relay_close(&dev_priv->guc.log); + intel_guc_log_relay_close(&dev_priv->gt.uc.guc.log); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 44e13b9c8e25..434819deac5b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -426,7 +426,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = sseu->min_eu_in_pool; break; case I915_PARAM_HUC_STATUS: - value = intel_huc_check_status(&dev_priv->huc); + value = intel_huc_check_status(&dev_priv->gt.uc.huc); if (value < 0) return value; break; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b604fee623ab..1d2e9f6ee253 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1349,9 +1349,6 @@ struct drm_i915_private { struct intel_wopcm wopcm; - struct intel_huc huc; - struct intel_guc guc; - struct intel_csr csr; struct intel_gmbus gmbus[GMBUS_NUM_PINS]; @@ -1908,12 +1905,12 @@ static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm) static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) { - return container_of(guc, struct drm_i915_private, guc); + return container_of(guc, struct drm_i915_private, gt.uc.guc); } static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) { - return container_of(huc, struct drm_i915_private, huc); + return container_of(huc, struct drm_i915_private, gt.uc.huc); } /* Simple iterator over all initialised engines */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 5489cd879315..78e388fa059c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1552,21 +1552,22 @@ static void capture_uc_state(struct i915_gpu_state *error) { struct drm_i915_private *i915 = error->i915; struct i915_error_uc *error_uc = &error->uc; + struct intel_uc *uc = &i915->gt.uc; /* Capturing uC state won't be useful if there is no GuC */ if (!error->device_info.has_guc) return; - error_uc->guc_fw = i915->guc.fw; - error_uc->huc_fw = i915->huc.fw; + error_uc->guc_fw = uc->guc.fw; + error_uc->huc_fw = uc->huc.fw; /* Non-default firmware paths will be specified by the modparam. * As modparams are generally accesible from the userspace make * explicit copies of the firmware paths. */ - error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC); - error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC); - error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma); + error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, GFP_ATOMIC); + error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, GFP_ATOMIC); + error_uc->guc_log = i915_error_object_create(i915, uc->guc.log.vma); } /* Capture all registers which don't fit into another category. */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 42d6d8bfac70..78c748cb9df8 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1655,7 +1655,7 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { gen6_rps_irq_handler(i915, gt_iir[2]); - guc_irq_handler(&i915->guc, gt_iir[2] >> 16); + guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16); } } @@ -3082,10 +3082,8 @@ static void gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, const u16 iir) { - struct drm_i915_private *i915 = gt->i915; - if (instance == OTHER_GUC_INSTANCE) - return guc_irq_handler(&i915->guc, iir); + return guc_irq_handler(>->uc.guc, iir); if (instance == OTHER_GTPM_INSTANCE) return gen11_rps_irq_handler(gt, iir); diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index a6bc15bc7be3..fafd4e6a1147 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -164,8 +164,8 @@ static inline int check_hw_restriction(struct drm_i915_private *i915, int intel_wopcm_init(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw); - u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw); + u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.guc.fw); + u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.huc.fw); u32 ctx_rsvd = context_reserved_size(i915); u32 guc_wopcm_base; u32 guc_wopcm_size; -- cgit v1.2.3 From ca7b2c1bbede6187342dd0bec8666ab4481fa6ad Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:13 +0100 Subject: drm/i915/uc: Move intel functions to intel_uc All the intel_uc_* can now be moved to work on the intel_uc structure for better encapsulation of uc-related actions. Note: I've introduced uc_to_gt instead of uc_to_i915 because the aim is to move everything to be gt-focused in the medium term, so we would've had to replace it soon anyway. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Acked-by: Michal Wajdeczko Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-8-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 6 +- drivers/gpu/drm/i915/gt/intel_gt.h | 5 + drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 188 ++++++++++++++++----------------- drivers/gpu/drm/i915/gt/uc/intel_uc.h | 34 +++--- drivers/gpu/drm/i915/i915_drv.c | 14 +-- drivers/gpu/drm/i915/i915_drv.h | 6 +- drivers/gpu/drm/i915/i915_gem.c | 18 ++-- 8 files changed, 139 insertions(+), 134 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 9ee6edbad4c5..8faf262278ae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -174,7 +174,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) i915_gem_drain_freed_objects(i915); - intel_uc_suspend(i915); + intel_uc_suspend(&i915->gt.uc); } static struct drm_i915_gem_object *first_mm_object(struct list_head *list) @@ -239,7 +239,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - intel_uc_sanitize(i915); + intel_uc_sanitize(&i915->gt.uc); i915_gem_sanitize(i915); } @@ -266,7 +266,7 @@ void i915_gem_resume(struct drm_i915_private *i915) if (intel_gt_resume(&i915->gt)) goto err_wedged; - intel_uc_resume(i915); + intel_uc_resume(&i915->gt.uc); /* Always reload a context for powersaving. */ if (!i915_gem_load_power_context(i915)) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 49c0085385a0..26c94521ad1b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -12,6 +12,11 @@ struct drm_i915_private; +static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) +{ + return container_of(uc, struct intel_gt, uc); +} + void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_hw(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index bdd34a1d739a..7ddedfb16aa2 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -717,7 +717,7 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt) reset_prepare_engine(engine); } - intel_uc_reset_prepare(gt->i915); + intel_uc_reset_prepare(>->uc); return awake; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index f47ee7601a66..0c43d547bc94 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -22,19 +22,22 @@ * */ +#include "gt/intel_gt.h" #include "gt/intel_reset.h" -#include "intel_uc.h" #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" +#include "intel_uc.h" + #include "i915_drv.h" static void guc_free_load_err_log(struct intel_guc *guc); /* Reset GuC providing us with fresh state for both GuC and HuC. */ -static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) +static int __intel_uc_reset_hw(struct intel_uc *uc) { + struct drm_i915_private *dev_priv = uc_to_gt(uc)->i915; int ret; u32 guc_status; @@ -52,17 +55,17 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) return ret; } -static int __get_platform_enable_guc(struct drm_i915_private *i915) +static int __get_platform_enable_guc(struct intel_uc *uc) { - struct intel_uc_fw *guc_fw = &i915->gt.uc.guc.fw; - struct intel_uc_fw *huc_fw = &i915->gt.uc.huc.fw; + struct intel_uc_fw *guc_fw = &uc->guc.fw; + struct intel_uc_fw *huc_fw = &uc->huc.fw; int enable_guc = 0; - if (!HAS_GUC(i915)) + if (!HAS_GUC(uc_to_gt(uc)->i915)) return 0; /* We don't want to enable GuC/HuC on pre-Gen11 by default */ - if (INTEL_GEN(i915) < 11) + if (INTEL_GEN(uc_to_gt(uc)->i915) < 11) return 0; if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw)) @@ -71,12 +74,11 @@ static int __get_platform_enable_guc(struct drm_i915_private *i915) return enable_guc; } -static int __get_default_guc_log_level(struct drm_i915_private *i915) +static int __get_default_guc_log_level(struct intel_uc *uc) { int guc_log_level; - if (!intel_uc_fw_supported(&i915->gt.uc.guc.fw) || - !intel_uc_is_using_guc(i915)) + if (!intel_uc_fw_supported(&uc->guc.fw) || !intel_uc_is_using_guc(uc)) guc_log_level = GUC_LOG_LEVEL_DISABLED; else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) @@ -91,7 +93,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915) /** * sanitize_options_early - sanitize uC related modparam options - * @i915: device private + * @uc: the intel_uc structure * * In case of "enable_guc" option this function will attempt to modify * it only if it was initially set to "auto(-1)". Default value for this @@ -106,22 +108,22 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915) * unless GuC is enabled on given platform and the driver is compiled with * debug config when this modparam will default to "enable(1..4)". */ -static void sanitize_options_early(struct drm_i915_private *i915) +static void sanitize_options_early(struct intel_uc *uc) { - struct intel_uc_fw *guc_fw = &i915->gt.uc.guc.fw; - struct intel_uc_fw *huc_fw = &i915->gt.uc.huc.fw; + struct intel_uc_fw *guc_fw = &uc->guc.fw; + struct intel_uc_fw *huc_fw = &uc->huc.fw; /* A negative value means "use platform default" */ if (i915_modparams.enable_guc < 0) - i915_modparams.enable_guc = __get_platform_enable_guc(i915); + i915_modparams.enable_guc = __get_platform_enable_guc(uc); DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", i915_modparams.enable_guc, - yesno(intel_uc_is_using_guc_submission(i915)), - yesno(intel_uc_is_using_huc(i915))); + yesno(intel_uc_is_using_guc_submission(uc)), + yesno(intel_uc_is_using_huc(uc))); /* Verify GuC firmware availability */ - if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) { + if (intel_uc_is_using_guc(uc) && !intel_uc_fw_is_selected(guc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, !intel_uc_fw_supported(guc_fw) ? @@ -129,7 +131,7 @@ static void sanitize_options_early(struct drm_i915_private *i915) } /* Verify HuC firmware availability */ - if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) { + if (intel_uc_is_using_huc(uc) && !intel_uc_fw_is_selected(huc_fw)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, !intel_uc_fw_supported(huc_fw) ? @@ -137,7 +139,7 @@ static void sanitize_options_early(struct drm_i915_private *i915) } /* XXX: GuC submission is unavailable for now */ - if (intel_uc_is_using_guc_submission(i915)) { + if (intel_uc_is_using_guc_submission(uc)) { DRM_INFO("Incompatible option detected: %s=%d, %s!\n", "enable_guc", i915_modparams.enable_guc, "GuC submission not supported"); @@ -148,9 +150,9 @@ static void sanitize_options_early(struct drm_i915_private *i915) /* A negative value means "use platform/config default" */ if (i915_modparams.guc_log_level < 0) i915_modparams.guc_log_level = - __get_default_guc_log_level(i915); + __get_default_guc_log_level(uc); - if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) { + if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(uc)) { DRM_WARN("Incompatible option detected: %s=%d, %s!\n", "guc_log_level", i915_modparams.guc_log_level, !intel_uc_fw_supported(guc_fw) ? @@ -176,34 +178,29 @@ static void sanitize_options_early(struct drm_i915_private *i915) GEM_BUG_ON(i915_modparams.guc_log_level < 0); } -void intel_uc_init_early(struct drm_i915_private *i915) +void intel_uc_init_early(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; - struct intel_huc *huc = &i915->gt.uc.huc; + intel_guc_init_early(&uc->guc); + intel_huc_init_early(&uc->huc); - intel_guc_init_early(guc); - intel_huc_init_early(huc); - - sanitize_options_early(i915); + sanitize_options_early(uc); } -void intel_uc_cleanup_early(struct drm_i915_private *i915) +void intel_uc_cleanup_early(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; - - guc_free_load_err_log(guc); + guc_free_load_err_log(&uc->guc); } /** * intel_uc_init_mmio - setup uC MMIO access - * @i915: device private + * @uc: the intel_uc structure * * Setup minimal state necessary for MMIO accesses later in the * initialization sequence. */ -void intel_uc_init_mmio(struct drm_i915_private *i915) +void intel_uc_init_mmio(struct intel_uc *uc) { - intel_guc_init_send_regs(&i915->gt.uc.guc); + intel_guc_init_send_regs(&uc->guc); } static void guc_capture_load_err_log(struct intel_guc *guc) @@ -350,54 +347,56 @@ static void guc_disable_communication(struct intel_guc *guc) DRM_INFO("GuC communication disabled\n"); } -void intel_uc_fetch_firmwares(struct drm_i915_private *i915) +void intel_uc_fetch_firmwares(struct intel_uc *uc) { - if (!USES_GUC(i915)) + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + + if (!intel_uc_is_using_guc(uc)) return; - intel_uc_fw_fetch(i915, &i915->gt.uc.guc.fw); + intel_uc_fw_fetch(i915, &uc->guc.fw); - if (USES_HUC(i915)) - intel_uc_fw_fetch(i915, &i915->gt.uc.huc.fw); + if (intel_uc_is_using_huc(uc)) + intel_uc_fw_fetch(i915, &uc->huc.fw); } -void intel_uc_cleanup_firmwares(struct drm_i915_private *i915) +void intel_uc_cleanup_firmwares(struct intel_uc *uc) { - if (!USES_GUC(i915)) + if (!intel_uc_is_using_guc(uc)) return; - if (USES_HUC(i915)) - intel_uc_fw_cleanup_fetch(&i915->gt.uc.huc.fw); + if (intel_uc_is_using_huc(uc)) + intel_uc_fw_cleanup_fetch(&uc->huc.fw); - intel_uc_fw_cleanup_fetch(&i915->gt.uc.guc.fw); + intel_uc_fw_cleanup_fetch(&uc->guc.fw); } -int intel_uc_init(struct drm_i915_private *i915) +int intel_uc_init(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; - struct intel_huc *huc = &i915->gt.uc.huc; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; int ret; - if (!USES_GUC(i915)) + if (!intel_uc_is_using_guc(uc)) return 0; if (!intel_uc_fw_supported(&guc->fw)) return -ENODEV; /* XXX: GuC submission is unavailable for now */ - GEM_BUG_ON(USES_GUC_SUBMISSION(i915)); + GEM_BUG_ON(intel_uc_is_using_guc_submission(uc)); ret = intel_guc_init(guc); if (ret) return ret; - if (USES_HUC(i915)) { + if (intel_uc_is_using_huc(uc)) { ret = intel_huc_init(huc); if (ret) goto err_guc; } - if (USES_GUC_SUBMISSION(i915)) { + if (intel_uc_is_using_guc_submission(uc)) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -410,59 +409,60 @@ int intel_uc_init(struct drm_i915_private *i915) return 0; err_huc: - if (USES_HUC(i915)) + if (intel_uc_is_using_huc(uc)) intel_huc_fini(huc); err_guc: intel_guc_fini(guc); return ret; } -void intel_uc_fini(struct drm_i915_private *i915) +void intel_uc_fini(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_guc *guc = &uc->guc; - if (!USES_GUC(i915)) + if (!intel_uc_is_using_guc(uc)) return; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - if (USES_GUC_SUBMISSION(i915)) + if (intel_uc_is_using_guc_submission(uc)) intel_guc_submission_fini(guc); - if (USES_HUC(i915)) - intel_huc_fini(&i915->gt.uc.huc); + if (intel_uc_is_using_huc(uc)) + intel_huc_fini(&uc->huc); intel_guc_fini(guc); } -static void __uc_sanitize(struct drm_i915_private *i915) +static void __uc_sanitize(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; - struct intel_huc *huc = &i915->gt.uc.huc; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); intel_huc_sanitize(huc); intel_guc_sanitize(guc); - __intel_uc_reset_hw(i915); + __intel_uc_reset_hw(uc); } -void intel_uc_sanitize(struct drm_i915_private *i915) +void intel_uc_sanitize(struct intel_uc *uc) { - if (!USES_GUC(i915)) + if (!intel_uc_is_using_guc(uc)) return; - __uc_sanitize(i915); + __uc_sanitize(uc); } -int intel_uc_init_hw(struct drm_i915_private *i915) +int intel_uc_init_hw(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; - struct intel_huc *huc = &i915->gt.uc.huc; + struct drm_i915_private *i915 = uc_to_gt(uc)->i915; + struct intel_guc *guc = &uc->guc; + struct intel_huc *huc = &uc->huc; int ret, attempts; - if (!USES_GUC(i915)) + if (!intel_uc_is_using_guc(uc)) return 0; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); @@ -481,11 +481,11 @@ int intel_uc_init_hw(struct drm_i915_private *i915) * Always reset the GuC just before (re)loading, so * that the state and timing are fairly predictable */ - ret = __intel_uc_reset_hw(i915); + ret = __intel_uc_reset_hw(uc); if (ret) goto err_out; - if (USES_HUC(i915)) { + if (intel_uc_is_using_huc(uc)) { ret = intel_huc_fw_upload(huc); if (ret) goto err_out; @@ -509,7 +509,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) if (ret) goto err_log_capture; - if (USES_HUC(i915)) { + if (intel_uc_is_using_huc(uc)) { ret = intel_huc_auth(huc); if (ret) goto err_communication; @@ -519,7 +519,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) if (ret) goto err_communication; - if (USES_GUC_SUBMISSION(i915)) { + if (intel_uc_is_using_guc_submission(uc)) { ret = intel_guc_submission_enable(guc); if (ret) goto err_communication; @@ -528,9 +528,9 @@ int intel_uc_init_hw(struct drm_i915_private *i915) dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", guc->fw.major_ver_found, guc->fw.minor_ver_found); dev_info(i915->drm.dev, "GuC submission %s\n", - enableddisabled(USES_GUC_SUBMISSION(i915))); + enableddisabled(intel_uc_is_using_guc_submission(uc))); dev_info(i915->drm.dev, "HuC %s\n", - enableddisabled(USES_HUC(i915))); + enableddisabled(intel_uc_is_using_huc(uc))); return 0; @@ -542,7 +542,7 @@ err_communication: err_log_capture: guc_capture_load_err_log(guc); err_out: - __uc_sanitize(i915); + __uc_sanitize(uc); /* * Note that there is no fallback as either user explicitly asked for @@ -555,42 +555,42 @@ err_out: return ret; } -void intel_uc_fini_hw(struct drm_i915_private *i915) +void intel_uc_fini_hw(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_guc *guc = &uc->guc; if (!intel_guc_is_loaded(guc)) return; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - if (USES_GUC_SUBMISSION(i915)) + if (intel_uc_is_using_guc_submission(uc)) intel_guc_submission_disable(guc); guc_disable_communication(guc); - __uc_sanitize(i915); + __uc_sanitize(uc); } /** * intel_uc_reset_prepare - Prepare for reset - * @i915: device private + * @uc: the intel_uc structure * * Preparing for full gpu reset. */ -void intel_uc_reset_prepare(struct drm_i915_private *i915) +void intel_uc_reset_prepare(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_guc *guc = &uc->guc; if (!intel_guc_is_loaded(guc)) return; guc_stop_communication(guc); - __uc_sanitize(i915); + __uc_sanitize(uc); } -void intel_uc_runtime_suspend(struct drm_i915_private *i915) +void intel_uc_runtime_suspend(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_guc *guc = &uc->guc; int err; if (!intel_guc_is_loaded(guc)) @@ -603,21 +603,21 @@ void intel_uc_runtime_suspend(struct drm_i915_private *i915) guc_disable_communication(guc); } -void intel_uc_suspend(struct drm_i915_private *i915) +void intel_uc_suspend(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_guc *guc = &uc->guc; intel_wakeref_t wakeref; if (!intel_guc_is_loaded(guc)) return; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) - intel_uc_runtime_suspend(i915); + with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) + intel_uc_runtime_suspend(uc); } -int intel_uc_resume(struct drm_i915_private *i915) +int intel_uc_resume(struct intel_uc *uc) { - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_guc *guc = &uc->guc; int err; if (!intel_guc_is_loaded(guc)) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index a2fdc84afff2..fe3362fd7706 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -33,34 +33,34 @@ struct intel_uc { struct intel_huc huc; }; -void intel_uc_init_early(struct drm_i915_private *dev_priv); -void intel_uc_cleanup_early(struct drm_i915_private *dev_priv); -void intel_uc_init_mmio(struct drm_i915_private *dev_priv); -void intel_uc_fetch_firmwares(struct drm_i915_private *dev_priv); -void intel_uc_cleanup_firmwares(struct drm_i915_private *dev_priv); -void intel_uc_sanitize(struct drm_i915_private *dev_priv); -int intel_uc_init_hw(struct drm_i915_private *dev_priv); -void intel_uc_fini_hw(struct drm_i915_private *dev_priv); -int intel_uc_init(struct drm_i915_private *dev_priv); -void intel_uc_fini(struct drm_i915_private *dev_priv); -void intel_uc_reset_prepare(struct drm_i915_private *i915); -void intel_uc_suspend(struct drm_i915_private *i915); -void intel_uc_runtime_suspend(struct drm_i915_private *i915); -int intel_uc_resume(struct drm_i915_private *dev_priv); +void intel_uc_init_early(struct intel_uc *uc); +void intel_uc_cleanup_early(struct intel_uc *uc); +void intel_uc_init_mmio(struct intel_uc *uc); +void intel_uc_fetch_firmwares(struct intel_uc *uc); +void intel_uc_cleanup_firmwares(struct intel_uc *uc); +void intel_uc_sanitize(struct intel_uc *uc); +int intel_uc_init_hw(struct intel_uc *uc); +void intel_uc_fini_hw(struct intel_uc *uc); +int intel_uc_init(struct intel_uc *uc); +void intel_uc_fini(struct intel_uc *uc); +void intel_uc_reset_prepare(struct intel_uc *uc); +void intel_uc_suspend(struct intel_uc *uc); +void intel_uc_runtime_suspend(struct intel_uc *uc); +int intel_uc_resume(struct intel_uc *uc); -static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915) +static inline bool intel_uc_is_using_guc(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc > 0; } -static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915) +static inline bool intel_uc_is_using_guc_submission(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; } -static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915) +static inline bool intel_uc_is_using_huc(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 434819deac5b..0caf2f3ce279 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -940,7 +940,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_detect_pch(dev_priv); intel_wopcm_init_early(&dev_priv->wopcm); - intel_uc_init_early(dev_priv); + intel_uc_init_early(&dev_priv->gt.uc); intel_pm_setup(dev_priv); intel_init_dpio(dev_priv); ret = intel_power_domains_init(dev_priv); @@ -957,7 +957,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) return 0; err_uc: - intel_uc_cleanup_early(dev_priv); + intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); @@ -975,7 +975,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) { intel_irq_fini(dev_priv); intel_power_domains_cleanup(dev_priv); - intel_uc_cleanup_early(dev_priv); + intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); i915_workqueues_cleanup(dev_priv); i915_engines_cleanup(dev_priv); @@ -1014,7 +1014,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) intel_uncore_prune_mmio_domains(&dev_priv->uncore); - intel_uc_init_mmio(dev_priv); + intel_uc_init_mmio(&dev_priv->gt.uc); ret = intel_engines_init_mmio(dev_priv); if (ret) @@ -2939,7 +2939,7 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_uc_runtime_suspend(dev_priv); + intel_uc_runtime_suspend(&dev_priv->gt.uc); intel_runtime_pm_disable_interrupts(dev_priv); @@ -2964,7 +2964,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_resume(dev_priv); + intel_uc_resume(&dev_priv->gt.uc); intel_gt_init_swizzling(&dev_priv->gt); i915_gem_restore_fences(dev_priv); @@ -3062,7 +3062,7 @@ static int intel_runtime_resume(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_resume(dev_priv); + intel_uc_resume(&dev_priv->gt.uc); /* * No point of rolling back things in case of an error, as the best diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1d2e9f6ee253..7d99e5364f15 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2295,9 +2295,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) /* Having a GuC is not the same as using a GuC */ -#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv) -#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv) -#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv) +#define USES_GUC(dev_priv) intel_uc_is_using_guc(&(dev_priv)->gt.uc) +#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc) +#define USES_HUC(dev_priv) intel_uc_is_using_huc(&(dev_priv)->gt.uc) #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f62dbd8c86de..a207b90924e4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1248,7 +1248,7 @@ int i915_gem_init_hw(struct drm_i915_private *i915) } /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(i915); + ret = intel_uc_init_hw(&i915->gt.uc); if (ret) { DRM_ERROR("Enabling uc failed (%d)\n", ret); goto out; @@ -1433,7 +1433,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - intel_uc_fetch_firmwares(dev_priv); + intel_uc_fetch_firmwares(&dev_priv->gt.uc); ret = intel_wopcm_init(&dev_priv->wopcm); if (ret) @@ -1481,7 +1481,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) intel_init_gt_powersave(dev_priv); - ret = intel_uc_init(dev_priv); + ret = intel_uc_init(&dev_priv->gt.uc); if (ret) goto err_pm; @@ -1545,9 +1545,9 @@ err_gt: mutex_lock(&dev_priv->drm.struct_mutex); err_init_hw: - intel_uc_fini_hw(dev_priv); + intel_uc_fini_hw(&dev_priv->gt.uc); err_uc_init: - intel_uc_fini(dev_priv); + intel_uc_fini(&dev_priv->gt.uc); err_pm: if (ret != -EIO) { intel_cleanup_gt_powersave(dev_priv); @@ -1564,7 +1564,7 @@ err_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); err_uc_fw: - intel_uc_cleanup_firmwares(dev_priv); + intel_uc_cleanup_firmwares(&dev_priv->gt.uc); if (ret != -EIO) { i915_gem_cleanup_userptr(dev_priv); @@ -1611,8 +1611,8 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) i915_gem_drain_workqueue(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); - intel_uc_fini_hw(dev_priv); - intel_uc_fini(dev_priv); + intel_uc_fini_hw(&dev_priv->gt.uc); + intel_uc_fini(&dev_priv->gt.uc); mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_drain_freed_objects(dev_priv); @@ -1630,7 +1630,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) intel_cleanup_gt_powersave(dev_priv); - intel_uc_cleanup_firmwares(dev_priv); + intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); intel_timelines_fini(dev_priv); -- cgit v1.2.3 From 84b1ca2f0e68e89186f552ab839dc8da7c3ddc87 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:14 +0100 Subject: drm/i915/uc: prefer intel_gt over i915 in GuC/HuC paths With our HW interface logic moving from i915 to gt and with GuC and HuC being part of the gt HW, it makes sense to use the intel_gt structure instead of i915 as our reference object in GuC/HuC paths. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-9-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/gt/intel_gt.h | 10 +++ drivers/gpu/drm/i915/gt/uc/intel_guc.c | 52 ++++++++------- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 81 ++++++++++++----------- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 43 ++++++------ drivers/gpu/drm/i915/gt/uc/intel_huc.c | 22 +++--- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 4 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 4 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +-- drivers/gpu/drm/i915/i915_gem_gtt.h | 4 +- 9 files changed, 120 insertions(+), 108 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 26c94521ad1b..640bb0531f5b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -17,6 +17,16 @@ static inline struct intel_gt *uc_to_gt(struct intel_uc *uc) return container_of(uc, struct intel_gt, uc); } +static inline struct intel_gt *guc_to_gt(struct intel_guc *guc) +{ + return container_of(guc, struct intel_gt, uc.guc); +} + +static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) +{ + return container_of(huc, struct intel_gt, uc.huc); +} + void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_hw(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 4173b35bf104..6b56f39072b1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -22,6 +22,7 @@ * */ +#include "gt/intel_gt.h" #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" @@ -29,16 +30,16 @@ static void gen8_guc_raise_irq(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); + intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER); } static void gen11_guc_raise_irq(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0); + intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0); } static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) @@ -52,11 +53,11 @@ static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i) void intel_guc_init_send_regs(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); enum forcewake_domains fw_domains = 0; unsigned int i; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(gt->i915) >= 11) { guc->send_regs.base = i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0)); guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT; @@ -67,7 +68,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) } for (i = 0; i < guc->send_regs.count; i++) { - fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, + fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore, guc_send_reg(guc, i), FW_REG_READ | FW_REG_WRITE); } @@ -127,7 +128,7 @@ static void guc_shared_data_destroy(struct intel_guc *guc) int intel_guc_init(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); int ret; ret = intel_uc_fw_init(&guc->fw); @@ -153,7 +154,7 @@ int intel_guc_init(struct intel_guc *guc) goto err_ads; /* We need to notify the guc whenever we change the GGTT */ - i915_ggtt_enable_guc(dev_priv); + i915_ggtt_enable_guc(gt->ggtt); return 0; @@ -172,9 +173,9 @@ err_fetch: void intel_guc_fini(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - i915_ggtt_disable_guc(dev_priv); + i915_ggtt_disable_guc(gt->ggtt); intel_guc_ct_fini(&guc->ct); @@ -282,7 +283,7 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc) */ void intel_guc_init_params(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; u32 params[GUC_CTL_MAX_DWORDS]; int i; @@ -302,14 +303,14 @@ void intel_guc_init_params(struct intel_guc *guc) * they are power context saved so it's ok to release forcewake * when we are done here and take it again at xfer time. */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER); + intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER); - I915_WRITE(SOFT_SCRATCH(0), 0); + intel_uncore_write(uncore, SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); + intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), params[i]); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER); + intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER); } int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, @@ -330,8 +331,7 @@ void intel_guc_to_host_event_handler_nop(struct intel_guc *guc) int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; u32 status; int i; int ret; @@ -380,7 +380,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, int count = min(response_buf_size, guc->send_regs.count - 1); for (i = 0; i < count; i++) - response_buf[i] = I915_READ(guc_send_reg(guc, i + 1)); + response_buf[i] = intel_uncore_read(uncore, + guc_send_reg(guc, i + 1)); } /* Use data from the GuC response as our return value */ @@ -454,7 +455,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) */ int intel_guc_suspend(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; int ret; u32 status; u32 action[] = { @@ -472,13 +473,14 @@ int intel_guc_suspend(struct intel_guc *guc) * in progress so we need to take care of that ourselves as well. */ - I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); + intel_uncore_write(uncore, SOFT_SCRATCH(14), + INTEL_GUC_SLEEP_STATE_INVALID_MASK); ret = intel_guc_send(guc, action, ARRAY_SIZE(action)); if (ret) return ret; - ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14), + ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK, 0, 0, 10, &status); if (ret) @@ -574,17 +576,17 @@ int intel_guc_resume(struct intel_guc *guc) */ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); struct drm_i915_gem_object *obj; struct i915_vma *vma; u64 flags; int ret; - obj = i915_gem_object_create_shmem(dev_priv, size); + obj = i915_gem_object_create_shmem(gt->i915, size); if (IS_ERR(obj)) return ERR_CAST(obj); - vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL); + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) goto err; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index ee95d4960c5c..98305e3fd42c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -27,6 +27,7 @@ * Alex Dai */ +#include "gt/intel_gt.h" #include "intel_guc_fw.h" #include "i915_drv.h" @@ -129,35 +130,37 @@ void intel_guc_fw_init_early(struct intel_guc *guc) static void guc_prepare_xfer(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); + struct intel_uncore *uncore = gt->uncore; + u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES | + GUC_ENABLE_READ_CACHE_LOGIC | + GUC_ENABLE_MIA_CACHING | + GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | + GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | + GUC_ENABLE_MIA_CLOCK_GATING; /* Must program this register before loading the ucode with DMA */ - I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES | - GUC_ENABLE_READ_CACHE_LOGIC | - GUC_ENABLE_MIA_CACHING | - GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | - GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | - GUC_ENABLE_MIA_CLOCK_GATING); - - if (IS_GEN9_LP(dev_priv)) - I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags); + + if (IS_GEN9_LP(gt->i915)) + intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); else - I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); + intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - if (IS_GEN(dev_priv, 9)) { + if (IS_GEN(gt->i915, 9)) { /* DOP Clock Gating Enable for GuC clocks */ - I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE | - I915_READ(GEN7_MISCCPCTL))); + intel_uncore_rmw(uncore, GEN7_MISCCPCTL, + 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE); /* allows for 5us (in 10ns units) before GT can go to RC6 */ - I915_WRITE(GUC_ARAT_C6DIS, 0x1FF); + intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF); } } /* Copy RSA signature from the fw image to HW for verification */ static void guc_xfer_rsa(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; struct intel_uc_fw *fw = &guc->fw; struct sg_table *pages = fw->obj->mm.pages; u32 rsa[UOS_RSA_SCRATCH_COUNT]; @@ -167,15 +170,13 @@ static void guc_xfer_rsa(struct intel_guc *guc) rsa, sizeof(rsa), fw->rsa_offset); for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) - I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); + intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]); } -static bool guc_xfer_completed(struct intel_guc *guc, u32 *status) +static bool guc_xfer_completed(struct intel_uncore *uncore, u32 *status) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - /* Did we complete the xfer? */ - *status = I915_READ(DMA_CTRL); + *status = intel_uncore_read(uncore, DMA_CTRL); return !(*status & START_DMA); } @@ -188,10 +189,9 @@ static bool guc_xfer_completed(struct intel_guc *guc, u32 *status) * This is used for polling the GuC status in a wait_for() * loop below. */ -static inline bool guc_ready(struct intel_guc *guc, u32 *status) +static inline bool guc_ready(struct intel_uncore *uncore, u32 *status) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); - u32 val = I915_READ(GUC_STATUS); + u32 val = intel_uncore_read(uncore, GUC_STATUS); u32 uk_val = val & GS_UKERNEL_MASK; *status = val; @@ -199,9 +199,8 @@ static inline bool guc_ready(struct intel_guc *guc, u32 *status) ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE)); } -static int guc_wait_ucode(struct intel_guc *guc) +static int guc_wait_ucode(struct intel_uncore *uncore) { - struct drm_i915_private *i915 = guc_to_i915(guc); u32 status; int ret; @@ -213,7 +212,7 @@ static int guc_wait_ucode(struct intel_guc *guc) * (Higher levels of the driver may decide to reset the GuC and * attempt the ucode load again if this happens.) */ - ret = wait_for(guc_ready(guc, &status), 100); + ret = wait_for(guc_ready(uncore, &status), 100); DRM_DEBUG_DRIVER("GuC status %#x\n", status); if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { @@ -223,11 +222,11 @@ static int guc_wait_ucode(struct intel_guc *guc) if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { DRM_ERROR("GuC firmware exception. EIP: %#x\n", - intel_uncore_read(&i915->uncore, SOFT_SCRATCH(13))); + intel_uncore_read(uncore, SOFT_SCRATCH(13))); ret = -ENXIO; } - if (ret == 0 && !guc_xfer_completed(guc, &status)) { + if (ret == 0 && !guc_xfer_completed(uncore, &status)) { DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", status); ret = -ENXIO; @@ -245,7 +244,7 @@ static int guc_wait_ucode(struct intel_guc *guc) */ static int guc_xfer_ucode(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; struct intel_uc_fw *guc_fw = &guc->fw; unsigned long offset; @@ -253,24 +252,26 @@ static int guc_xfer_ucode(struct intel_guc *guc) * The header plus uCode will be copied to WOPCM via DMA, excluding any * other components */ - I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); + intel_uncore_write(uncore, DMA_COPY_SIZE, + guc_fw->header_size + guc_fw->ucode_size); /* Set the source address for the new blob */ offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset; - I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); - I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); + intel_uncore_write(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); + intel_uncore_write(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); /* * Set the DMA destination. Current uCode expects the code to be * loaded at 8k; locations below this are used for the stack. */ - I915_WRITE(DMA_ADDR_1_LOW, 0x2000); - I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0x2000); + intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); /* Finally start the DMA */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); + intel_uncore_write(uncore, DMA_CTRL, + _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); - return guc_wait_ucode(guc); + return guc_wait_ucode(uncore); } /* * Load the GuC firmware blob into the MinuteIA. @@ -278,12 +279,12 @@ static int guc_xfer_ucode(struct intel_guc *guc) static int guc_fw_xfer(struct intel_uc_fw *guc_fw) { struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; int ret; GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); guc_prepare_xfer(guc); @@ -296,7 +297,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw) ret = guc_xfer_ucode(guc); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fa22d377ae48..a0f2a01365bc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -28,6 +28,7 @@ #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" #include "gt/intel_lrc_reg.h" #include "intel_guc_submission.h" @@ -201,10 +202,10 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); - return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; + return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; } static void __init_doorbell(struct intel_guc_client *client) @@ -1001,9 +1002,10 @@ void intel_guc_submission_fini(struct intel_guc *guc) guc_stage_desc_pool_destroy(guc); } -static void guc_interrupts_capture(struct drm_i915_private *dev_priv) +static void guc_interrupts_capture(struct intel_gt *gt) { - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = >->i915->gt_pm.rps; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; int irqs; @@ -1012,16 +1014,16 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv) * to GuC */ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, id) + for_each_engine(engine, gt->i915, id) ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; /* These three registers have the same bit definitions */ - I915_WRITE(GUC_BCS_RCS_IER, ~irqs); - I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); - I915_WRITE(GUC_WD_VECS_IER, ~irqs); + intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs); + intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs); + intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs); /* * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all @@ -1046,9 +1048,10 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv) rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; } -static void guc_interrupts_release(struct drm_i915_private *dev_priv) +static void guc_interrupts_release(struct intel_gt *gt) { - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = >->i915->gt_pm.rps; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; int irqs; @@ -1059,13 +1062,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv) */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, id) + for_each_engine(engine, gt->i915, id) ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route all GT interrupts to the host */ - I915_WRITE(GUC_BCS_RCS_IER, 0); - I915_WRITE(GUC_VCS2_VCS1_IER, 0); - I915_WRITE(GUC_WD_VECS_IER, 0); + intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0); + intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0); + intel_uncore_write(uncore, GUC_WD_VECS_IER, 0); rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; @@ -1115,7 +1118,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) int intel_guc_submission_enable(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); struct intel_engine_cs *engine; enum intel_engine_id id; int err; @@ -1140,9 +1143,9 @@ int intel_guc_submission_enable(struct intel_guc *guc) return err; /* Take over from manual control of ELSP (execlists) */ - guc_interrupts_capture(dev_priv); + guc_interrupts_capture(gt); - for_each_engine(engine, dev_priv, id) { + for_each_engine(engine, gt->i915, id) { engine->set_default_submission = guc_set_default_submission; engine->set_default_submission(engine); } @@ -1152,11 +1155,11 @@ int intel_guc_submission_enable(struct intel_guc *guc) void intel_guc_submission_disable(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ + GEM_BUG_ON(gt->awake); /* GT should be parked first */ - guc_interrupts_release(dev_priv); + guc_interrupts_release(gt); guc_clients_disable(guc); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 581c9c3d4fc0..99f0fa2e8ff5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -24,6 +24,7 @@ #include +#include "gt/intel_gt.h" #include "intel_huc.h" #include "i915_drv.h" @@ -46,8 +47,8 @@ void intel_huc_init_early(struct intel_huc *huc) static int intel_huc_rsa_data_create(struct intel_huc *huc) { - struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_gt *gt = huc_to_gt(huc); + struct intel_guc *guc = >->uc.guc; struct i915_vma *vma; void *vaddr; @@ -112,8 +113,8 @@ void intel_huc_fini(struct intel_huc *huc) */ int intel_huc_auth(struct intel_huc *huc) { - struct drm_i915_private *i915 = huc_to_i915(huc); - struct intel_guc *guc = &i915->gt.uc.guc; + struct intel_gt *gt = huc_to_gt(huc); + struct intel_guc *guc = >->uc.guc; int ret; if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) @@ -127,7 +128,7 @@ int intel_huc_auth(struct intel_huc *huc) } /* Check authentication status, it should be done by now */ - ret = __intel_wait_for_register(&i915->uncore, + ret = __intel_wait_for_register(gt->uncore, huc->status.reg, huc->status.mask, huc->status.value, @@ -159,16 +160,15 @@ fail: */ int intel_huc_check_status(struct intel_huc *huc) { - struct drm_i915_private *dev_priv = huc_to_i915(huc); + struct intel_gt *gt = huc_to_gt(huc); intel_wakeref_t wakeref; bool status = false; - if (!HAS_HUC(dev_priv)) + if (!intel_uc_is_using_huc(>->uc)) return -ENODEV; - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - status = (I915_READ(huc->status.reg) & huc->status.mask) == - huc->status.value; + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + status = intel_uncore_read(gt->uncore, huc->status.reg); - return status; + return (status & huc->status.mask) == huc->status.value; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 06e726ba9863..becfd34a26c1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -4,6 +4,7 @@ * Copyright © 2014-2018 Intel Corporation */ +#include "gt/intel_gt.h" #include "intel_huc_fw.h" #include "i915_drv.h" @@ -139,8 +140,7 @@ static void huc_xfer_rsa(struct intel_huc *huc) static int huc_xfer_ucode(struct intel_huc *huc) { struct intel_uc_fw *huc_fw = &huc->fw; - struct drm_i915_private *dev_priv = huc_to_i915(huc); - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_uncore *uncore = huc_to_gt(huc)->uncore; unsigned long offset = 0; u32 size; int ret; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 0c43d547bc94..41a770865477 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -228,7 +228,7 @@ static void guc_free_load_err_log(struct intel_guc *guc) */ static void guc_clear_mmio_msg(struct intel_guc *guc) { - intel_uncore_write(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15), 0); + intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0); } static void guc_get_mmio_msg(struct intel_guc *guc) @@ -237,7 +237,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc) spin_lock_irq(&guc->irq_lock); - val = intel_uncore_read(&guc_to_i915(guc)->uncore, SOFT_SCRATCH(15)); + val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); guc->mmio_msg |= val & guc->msg_enabled_mask; /* diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6da564e27a64..1e3ccdf7fe1b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -3333,10 +3333,8 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) return 0; } -void i915_ggtt_enable_guc(struct drm_i915_private *i915) +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; - GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate); ggtt->invalidate = guc_ggtt_invalidate; @@ -3344,10 +3342,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915) ggtt->invalidate(ggtt); } -void i915_ggtt_disable_guc(struct drm_i915_private *i915) +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; - /* XXX Temporary pardon for error unload */ if (ggtt->invalidate == gen6_ggtt_invalidate) return; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index a450349b3a50..de156634a889 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -631,8 +631,8 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv); int i915_ggtt_init_hw(struct drm_i915_private *dev_priv); int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv); -void i915_ggtt_enable_guc(struct drm_i915_private *i915); -void i915_ggtt_disable_guc(struct drm_i915_private *i915); +void i915_ggtt_enable_guc(struct i915_ggtt *ggtt); +void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); int i915_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); -- cgit v1.2.3 From 2239e6dff2067c23f0afb7fab62ef139dc957d48 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:15 +0100 Subject: drm/i915/guc: prefer intel_gt in guc interrupt functions We can get rid of a few more guc_to_i915 and start compartmentalizing interrupt management a bit more. We should be able to move more code in the future once the gt_pm code is also moved across to gt. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-10-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/gt/intel_gt_types.h | 2 + drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_irq.c | 73 +++++++++++++++++--------------- 3 files changed, 42 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index d3b90c6ee8cf..34d4a868e4f1 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -74,6 +74,8 @@ struct intel_gt { u32 pm_imr; u32 pm_ier; + + u32 pm_guc_events; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7d99e5364f15..c4e4d136c937 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1400,7 +1400,6 @@ struct drm_i915_private { }; u32 gt_irq_mask; u32 pm_rps_events; - u32 pm_guc_events; u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 78c748cb9df8..91f8c81028c3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -42,6 +42,8 @@ #include "display/intel_lpe_audio.h" #include "display/intel_psr.h" +#include "gt/intel_gt.h" + #include "i915_drv.h" #include "i915_irq.h" #include "i915_trace.h" @@ -601,85 +603,90 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) void gen9_reset_guc_interrupts(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + assert_rpm_wakelock_held(&i915->runtime_pm); - spin_lock_irq(&dev_priv->irq_lock); - gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); - spin_unlock_irq(&dev_priv->irq_lock); + spin_lock_irq(&i915->irq_lock); + gen6_reset_pm_iir(i915, gt->pm_guc_events); + spin_unlock_irq(&i915->irq_lock); } void gen9_enable_guc_interrupts(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + assert_rpm_wakelock_held(&i915->runtime_pm); - spin_lock_irq(&dev_priv->irq_lock); + spin_lock_irq(&i915->irq_lock); if (!guc->interrupts.enabled) { - WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & - dev_priv->pm_guc_events); + WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) & + gt->pm_guc_events); guc->interrupts.enabled = true; - gen6_enable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events); + gen6_enable_pm_irq(gt, gt->pm_guc_events); } - spin_unlock_irq(&dev_priv->irq_lock); + spin_unlock_irq(&i915->irq_lock); } void gen9_disable_guc_interrupts(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + assert_rpm_wakelock_held(&i915->runtime_pm); - spin_lock_irq(&dev_priv->irq_lock); + spin_lock_irq(&i915->irq_lock); guc->interrupts.enabled = false; - gen6_disable_pm_irq(&dev_priv->gt, dev_priv->pm_guc_events); + gen6_disable_pm_irq(gt, gt->pm_guc_events); - spin_unlock_irq(&dev_priv->irq_lock); - intel_synchronize_irq(dev_priv); + spin_unlock_irq(&i915->irq_lock); + intel_synchronize_irq(i915); gen9_reset_guc_interrupts(guc); } void gen11_reset_guc_interrupts(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; spin_lock_irq(&i915->irq_lock); - gen11_reset_one_iir(&i915->gt, 0, GEN11_GUC); + gen11_reset_one_iir(gt, 0, GEN11_GUC); spin_unlock_irq(&i915->irq_lock); } void gen11_enable_guc_interrupts(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); - spin_lock_irq(&dev_priv->irq_lock); + spin_lock_irq(>->i915->irq_lock); if (!guc->interrupts.enabled) { u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); - WARN_ON_ONCE(gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GUC)); - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events); - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events); + WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC)); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events); guc->interrupts.enabled = true; } - spin_unlock_irq(&dev_priv->irq_lock); + spin_unlock_irq(>->i915->irq_lock); } void gen11_disable_guc_interrupts(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; - spin_lock_irq(&dev_priv->irq_lock); + spin_lock_irq(&i915->irq_lock); guc->interrupts.enabled = false; - I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0); - I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); - spin_unlock_irq(&dev_priv->irq_lock); - intel_synchronize_irq(dev_priv); + spin_unlock_irq(&i915->irq_lock); + intel_synchronize_irq(i915); gen11_reset_guc_interrupts(guc); } @@ -4757,7 +4764,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11) - dev_priv->pm_guc_events = GUC_INTR_GUC2HOST << 16; + dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv)) -- cgit v1.2.3 From a8120bc23eeae8f09a49167e32fa79ce5336546e Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Sat, 13 Jul 2019 11:00:16 +0100 Subject: drm/i915/uc: kill uc_to_i915 Get rid of them to avoid more users being added while the guc code transitions to use gt more than i915. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Acked-by: Michal Wajdeczko Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190713100016.8026-11-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 8 ++++---- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 3 ++- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 13 +++++++------ drivers/gpu/drm/i915/gt/uc/intel_huc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 10 ---------- 8 files changed, 18 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 6b56f39072b1..83f2c197375f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -77,7 +77,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) void intel_guc_init_early(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; intel_guc_fw_init_early(guc); intel_guc_ct_init_early(&guc->ct); @@ -204,7 +204,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) { u32 flags = 0; - if (!USES_GUC_SUBMISSION(guc_to_i915(guc))) + if (!intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) flags |= GUC_CTL_DISABLE_SCHEDULER; return flags; @@ -214,7 +214,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) { u32 flags = 0; - if (USES_GUC_SUBMISSION(guc_to_i915(guc))) { + if (intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) { u32 ctxnum, base; base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); @@ -414,7 +414,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, int intel_guc_sample_forcewake(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; u32 action[2]; action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 69859d1e047f..a0da80241f22 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -22,6 +22,7 @@ * */ +#include "gt/intel_gt.h" #include "intel_guc_ads.h" #include "intel_uc.h" #include "i915_drv.h" @@ -85,7 +86,7 @@ struct __guc_ads_blob { static void __guc_ads_init(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 98305e3fd42c..3dfa40fdbe99 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -76,7 +76,7 @@ MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH); static void guc_fw_select(struct intel_uc_fw *guc_fw) { struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 9be5d3a6fb5f..77fda1e85d3b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -24,6 +24,7 @@ #include +#include "gt/intel_gt.h" #include "intel_guc_log.h" #include "i915_drv.h" @@ -209,7 +210,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log, log->stats[type].sampled_overflow += 16; } - dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev, + dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev, "GuC log buffer overflow\n"); } @@ -383,7 +384,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log) static int guc_log_relay_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; struct rchan *guc_log_relay_chan; size_t n_subbufs, subbuf_size; int ret; @@ -429,7 +430,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log) static void guc_log_capture_logs(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; guc_read_update_log_buffer(log); @@ -498,7 +499,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log) int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; int ret = 0; @@ -593,7 +594,7 @@ out_unlock: void intel_guc_log_relay_flush(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; /* @@ -612,7 +613,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) void intel_guc_log_relay_close(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; guc_log_disable_flush_events(log); intel_synchronize_irq(i915); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 99f0fa2e8ff5..bc14439173d7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -30,7 +30,7 @@ void intel_huc_init_early(struct intel_huc *huc) { - struct drm_i915_private *i915 = huc_to_i915(huc); + struct drm_i915_private *i915 = huc_to_gt(huc)->i915; intel_huc_fw_init_early(huc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index becfd34a26c1..543854c42d9d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -70,7 +70,7 @@ MODULE_FIRMWARE(I915_ICL_HUC_UCODE); static void huc_fw_select(struct intel_uc_fw *huc_fw) { struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - struct drm_i915_private *dev_priv = huc_to_i915(huc); + struct drm_i915_private *dev_priv = huc_to_gt(huc)->i915; GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 41a770865477..5ebb0a534718 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -252,7 +252,7 @@ static void guc_get_mmio_msg(struct intel_guc *guc) static void guc_handle_mmio_msg(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; /* we need communication to be enabled to reply to GuC */ GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop); @@ -284,7 +284,7 @@ static void guc_disable_interrupts(struct intel_guc *guc) static int guc_enable_communication(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_i915(guc); + struct drm_i915_private *i915 = guc_to_gt(guc)->i915; int ret; ret = intel_guc_ct_enable(&guc->ct); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c4e4d136c937..d14e07315512 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1902,16 +1902,6 @@ static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm) return container_of(wopcm, struct drm_i915_private, wopcm); } -static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) -{ - return container_of(guc, struct drm_i915_private, gt.uc.guc); -} - -static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) -{ - return container_of(huc, struct drm_i915_private, gt.uc.huc); -} - /* Simple iterator over all initialised engines */ #define for_each_engine(engine__, dev_priv__, id__) \ for ((id__) = 0; \ -- cgit v1.2.3 From 506927ec8bbbdd99261cec4cd28cd0fd54e02218 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 09:25:49 +0100 Subject: drm/i915/selftests: Ignore self-preemption suppression under gvt GVT forces single port submission of individual requests. We do not enjoy the context amalgamation that the test depends upon for setting up the test (where port 0 has a large number of requests with a priority change somewhere in the middle). Under single request submission of gvt it is quite able for the preemption event to occur while another context is active and so there be a real need to act upon that preemption. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111108 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Zhenyu Wang Acked-by: Zhenyu Wang Link: https://patchwork.freedesktop.org/patch/msgid/20190712082549.25053-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 678e9b2edf8d..6b6cea76cc8b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -854,6 +854,9 @@ static int live_suppress_self_preempt(void *arg) if (USES_GUC_SUBMISSION(i915)) return 0; /* presume black blox */ + if (intel_vgpu_active(i915)) + return 0; /* GVT forces single port & request submission */ + mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); -- cgit v1.2.3 From 48cf0a1ec1701787945008c945aa35bb6cfacb70 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Fri, 12 Jul 2019 15:16:41 -0700 Subject: drm/i915/ehl: Map MCC pins based on PHY, not port MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we distinguish between phy and port(ddi), mcc_port_to_ddc_pin should use the phy, not the DDI, for determining DDC pins. We're only converting the MCC function at the moment since EHL is the only platform that has configurations where port!=phy. Cc: José Roberto de Souza Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190712221641.21031-1-matthew.d.roper@intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_hdmi.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 77af0dfd93ce..098ea2c5d831 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2943,20 +2943,21 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) { + enum phy phy = intel_port_to_phy(dev_priv, port); u8 ddc_pin; - switch (port) { - case PORT_A: + switch (phy) { + case PHY_A: ddc_pin = GMBUS_PIN_1_BXT; break; - case PORT_B: + case PHY_B: ddc_pin = GMBUS_PIN_2_BXT; break; - case PORT_C: + case PHY_C: ddc_pin = GMBUS_PIN_9_TC1_ICP; break; default: - MISSING_CASE(port); + MISSING_CASE(phy); ddc_pin = GMBUS_PIN_1_BXT; break; } -- cgit v1.2.3 From 3944709df8e9298225fc2b29e53ee8e6f4b26618 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Thu, 11 Jul 2019 17:53:42 -0700 Subject: drm/i915: Add support for retrying hotplug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is some scenarios that we are aware that sink probe can fail, so lets add the infrastructure to let hotplug() hook to request another probe after some time. v2: Handle shared HPD pins (Imre) v3: Rebased v4: Renamed INTEL_HOTPLUG_NOCHANGE to INTEL_HOTPLUG_UNCHANGED to keep it consistent(Rodrigo) v5: Making the working queue used explicit through all the callers to hotplug_work (Ville) Tested-by: Timo Aaltonen Cc: Ville Syrjälä Reviewed-by: Rodrigo Vivi Signed-off-by: José Roberto de Souza Signed-off-by: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190712005343.24571-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 12 +++--- drivers/gpu/drm/i915/display/intel_dp.c | 12 +++--- drivers/gpu/drm/i915/display/intel_hotplug.c | 59 ++++++++++++++++++++++------ drivers/gpu/drm/i915/display/intel_hotplug.h | 5 ++- drivers/gpu/drm/i915/display/intel_sdvo.c | 8 ++-- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/intel_drv.h | 11 +++++- 8 files changed, 80 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 8445244aa593..c89d0c7543dc 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4070,14 +4070,16 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder, return modeset_pipe(&crtc->base, ctx); } -static bool intel_ddi_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +static enum intel_hotplug_state +intel_ddi_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { struct drm_modeset_acquire_ctx ctx; - bool changed; + enum intel_hotplug_state state; int ret; - changed = intel_encoder_hotplug(encoder, connector); + state = intel_encoder_hotplug(encoder, connector, irq_received); drm_modeset_acquire_init(&ctx, 0); @@ -4099,7 +4101,7 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder, drm_modeset_acquire_fini(&ctx); WARN(ret, "Acquiring modeset locks failed with %i\n", ret); - return changed; + return state; } static struct intel_connector * diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index ca333b3c7415..14c191d9c147 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4863,14 +4863,16 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, * retrain the link to get a picture. That's in case no * userspace component reacted to intermittent HPD dip. */ -static bool intel_dp_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +static enum intel_hotplug_state +intel_dp_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { struct drm_modeset_acquire_ctx ctx; - bool changed; + enum intel_hotplug_state state; int ret; - changed = intel_encoder_hotplug(encoder, connector); + state = intel_encoder_hotplug(encoder, connector, irq_received); drm_modeset_acquire_init(&ctx, 0); @@ -4889,7 +4891,7 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder, drm_modeset_acquire_fini(&ctx); WARN(ret, "Acquiring modeset locks failed with %i\n", ret); - return changed; + return state; } static void intel_dp_check_service_irq(struct intel_dp *intel_dp) diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index ea3de4acc850..342587d91d57 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -112,6 +112,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, #define HPD_STORM_DETECT_PERIOD 1000 #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) +#define HPD_RETRY_DELAY 1000 /** * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin @@ -266,8 +267,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } -bool intel_encoder_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +enum intel_hotplug_state +intel_encoder_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { struct drm_device *dev = connector->base.dev; enum drm_connector_status old_status; @@ -279,7 +282,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder, drm_helper_probe_detect(&connector->base, NULL, false); if (old_status == connector->base.status) - return false; + return INTEL_HOTPLUG_UNCHANGED; DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", connector->base.base.id, @@ -287,7 +290,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder, drm_get_connector_status_name(old_status), drm_get_connector_status_name(connector->base.status)); - return true; + return INTEL_HOTPLUG_CHANGED; } static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) @@ -339,7 +342,7 @@ static void i915_digport_work_func(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); dev_priv->hotplug.event_bits |= old_bits; spin_unlock_irq(&dev_priv->irq_lock); - schedule_work(&dev_priv->hotplug.hotplug_work); + queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); } } @@ -349,14 +352,16 @@ static void i915_digport_work_func(struct work_struct *work) static void i915_hotplug_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, hotplug.hotplug_work); + container_of(work, struct drm_i915_private, + hotplug.hotplug_work.work); struct drm_device *dev = &dev_priv->drm; struct intel_connector *intel_connector; struct intel_encoder *intel_encoder; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - bool changed = false; + u32 changed = 0, retry = 0; u32 hpd_event_bits; + u32 hpd_retry_bits; mutex_lock(&dev->mode_config.mutex); DRM_DEBUG_KMS("running encoder hotplug functions\n"); @@ -365,6 +370,8 @@ static void i915_hotplug_work_func(struct work_struct *work) hpd_event_bits = dev_priv->hotplug.event_bits; dev_priv->hotplug.event_bits = 0; + hpd_retry_bits = dev_priv->hotplug.retry_bits; + dev_priv->hotplug.retry_bits = 0; /* Enable polling for connectors which had HPD IRQ storms */ intel_hpd_irq_storm_switch_to_polling(dev_priv); @@ -373,16 +380,29 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { + u32 hpd_bit; + intel_connector = to_intel_connector(connector); if (!intel_connector->encoder) continue; intel_encoder = intel_connector->encoder; - if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { + hpd_bit = BIT(intel_encoder->hpd_pin); + if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) { DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", connector->name, intel_encoder->hpd_pin); - changed |= intel_encoder->hotplug(intel_encoder, - intel_connector); + switch (intel_encoder->hotplug(intel_encoder, + intel_connector, + hpd_event_bits & hpd_bit)) { + case INTEL_HOTPLUG_UNCHANGED: + break; + case INTEL_HOTPLUG_CHANGED: + changed |= hpd_bit; + break; + case INTEL_HOTPLUG_RETRY: + retry |= hpd_bit; + break; + } } } drm_connector_list_iter_end(&conn_iter); @@ -390,6 +410,17 @@ static void i915_hotplug_work_func(struct work_struct *work) if (changed) drm_kms_helper_hotplug_event(dev); + + /* Remove shared HPD pins that have changed */ + retry &= ~changed; + if (retry) { + spin_lock_irq(&dev_priv->irq_lock); + dev_priv->hotplug.retry_bits |= retry; + spin_unlock_irq(&dev_priv->irq_lock); + + mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, + msecs_to_jiffies(HPD_RETRY_DELAY)); + } } @@ -516,7 +547,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (queue_dig) queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); if (queue_hp) - schedule_work(&dev_priv->hotplug.hotplug_work); + queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0); } /** @@ -636,7 +667,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv) void intel_hpd_init_work(struct drm_i915_private *dev_priv) { - INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); + INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work, + i915_hotplug_work_func); INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, @@ -650,11 +682,12 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) dev_priv->hotplug.long_port_mask = 0; dev_priv->hotplug.short_port_mask = 0; dev_priv->hotplug.event_bits = 0; + dev_priv->hotplug.retry_bits = 0; spin_unlock_irq(&dev_priv->irq_lock); cancel_work_sync(&dev_priv->hotplug.dig_port_work); - cancel_work_sync(&dev_priv->hotplug.hotplug_work); + cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work); cancel_work_sync(&dev_priv->hotplug.poll_init_work); cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 805f897dbb7a..b0cd447b7fbc 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -15,8 +15,9 @@ struct intel_connector; struct intel_encoder; void intel_hpd_poll_init(struct drm_i915_private *dev_priv); -bool intel_encoder_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector); +enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received); void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask); void intel_hpd_init(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 213843a93c4e..c5e2dfd7ef80 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -1921,12 +1921,14 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) &intel_sdvo->hotplug_active, 2); } -static bool intel_sdvo_hotplug(struct intel_encoder *encoder, - struct intel_connector *connector) +static enum intel_hotplug_state +intel_sdvo_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received) { intel_sdvo_enable_hotplug(encoder); - return intel_encoder_hotplug(encoder, connector); + return intel_encoder_hotplug(encoder, connector, irq_received); } static bool diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0ac9b8d5e8b9..6b84d04a6a28 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -4071,7 +4071,7 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) */ intel_synchronize_irq(dev_priv); flush_work(&dev_priv->hotplug.dig_port_work); - flush_work(&dev_priv->hotplug.hotplug_work); + flush_delayed_work(&dev_priv->hotplug.hotplug_work); seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d14e07315512..a86a6ea3849f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -163,7 +163,7 @@ enum hpd_pin { #define HPD_STORM_DEFAULT_THRESHOLD 50 struct i915_hotplug { - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct { unsigned long last_jiffies; @@ -175,6 +175,7 @@ struct i915_hotplug { } state; } stats[HPD_NUM_PINS]; u32 event_bits; + u32 retry_bits; struct delayed_work reenable_work; u32 long_port_mask; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index e8ecbd55476e..c4016164c34e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -101,14 +101,21 @@ struct intel_fbdev { struct mutex hpd_lock; }; +enum intel_hotplug_state { + INTEL_HOTPLUG_UNCHANGED, + INTEL_HOTPLUG_CHANGED, + INTEL_HOTPLUG_RETRY, +}; + struct intel_encoder { struct drm_encoder base; enum intel_output_type type; enum port port; unsigned int cloneable; - bool (*hotplug)(struct intel_encoder *encoder, - struct intel_connector *connector); + enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received); enum intel_output_type (*compute_output_type)(struct intel_encoder *, struct intel_crtc_state *, struct drm_connector_state *); -- cgit v1.2.3 From bb80c9255770fa1ed54e889a6bee628bdd0f6762 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 11 Jul 2019 17:53:43 -0700 Subject: drm/i915: Enable hotplug retry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Right now we are aware of two cases that needs another hotplug retry: - Unpowered type-c dongles - HDMI slow unplug Both have a complete explanation in the code to schedule another run of the hotplug handler. It could have more checks to just trigger the retry in those two specific cases but why would sink signal a long pulse if there is no change? Also the drawback of running the hotplug handler again is really low and that could fix another cases that we are not aware. Also retrying for old DP ports(non-DDI) to make it consistent and not cause CI failures if those systems are connected to chamelium boards that will be used to simulate the issues reported in here. v2: Also retrying for old DP ports(non-DDI)(Imre) v4: Renamed INTEL_HOTPLUG_NOCHANGE to INTEL_HOTPLUG_UNCHANGED to keep it consistent(Rodrigo) Tested-by: Timo Aaltonen Cc: Ville Syrjälä Cc: Imre Deak Cc: Jani Nikula Reviewed-by: Imre Deak Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190712005343.24571-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 21 +++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.c | 7 +++++++ drivers/gpu/drm/i915/display/intel_hdmi.c | 28 +++++++++++++++++++++++++++- 3 files changed, 55 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c89d0c7543dc..18bc0f2690c9 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4075,6 +4075,7 @@ intel_ddi_hotplug(struct intel_encoder *encoder, struct intel_connector *connector, bool irq_received) { + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); struct drm_modeset_acquire_ctx ctx; enum intel_hotplug_state state; int ret; @@ -4101,6 +4102,26 @@ intel_ddi_hotplug(struct intel_encoder *encoder, drm_modeset_acquire_fini(&ctx); WARN(ret, "Acquiring modeset locks failed with %i\n", ret); + /* + * Unpowered type-c dongles can take some time to boot and be + * responsible, so here giving some time to those dongles to power up + * and then retrying the probe. + * + * On many platforms the HDMI live state signal is known to be + * unreliable, so we can't use it to detect if a sink is connected or + * not. Instead we detect if it's connected based on whether we can + * read the EDID or not. That in turn has a problem during disconnect, + * since the HPD interrupt may be raised before the DDC lines get + * disconnected (due to how the required length of DDC vs. HPD + * connector pins are specified) and so we'll still be able to get a + * valid EDID. To solve this schedule another detection cycle if this + * time around we didn't detect any change in the sink's connection + * status. + */ + if (state == INTEL_HOTPLUG_UNCHANGED && irq_received && + !dig_port->dp.is_mst) + state = INTEL_HOTPLUG_RETRY; + return state; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 14c191d9c147..0eb5d66f87a7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4891,6 +4891,13 @@ intel_dp_hotplug(struct intel_encoder *encoder, drm_modeset_acquire_fini(&ctx); WARN(ret, "Acquiring modeset locks failed with %i\n", ret); + /* + * Keeping it consistent with intel_ddi_hotplug() and + * intel_hdmi_hotplug(). + */ + if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) + state = INTEL_HOTPLUG_RETRY; + return state; } diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 098ea2c5d831..9bf28de10401 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -3126,6 +3126,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, DRM_DEBUG_KMS("CEC notifier get failed\n"); } +static enum intel_hotplug_state +intel_hdmi_hotplug(struct intel_encoder *encoder, + struct intel_connector *connector, bool irq_received) +{ + enum intel_hotplug_state state; + + state = intel_encoder_hotplug(encoder, connector, irq_received); + + /* + * On many platforms the HDMI live state signal is known to be + * unreliable, so we can't use it to detect if a sink is connected or + * not. Instead we detect if it's connected based on whether we can + * read the EDID or not. That in turn has a problem during disconnect, + * since the HPD interrupt may be raised before the DDC lines get + * disconnected (due to how the required length of DDC vs. HPD + * connector pins are specified) and so we'll still be able to get a + * valid EDID. To solve this schedule another detection cycle if this + * time around we didn't detect any change in the sink's connection + * status. + */ + if (state == INTEL_HOTPLUG_UNCHANGED && irq_received) + state = INTEL_HOTPLUG_RETRY; + + return state; +} + void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { @@ -3149,7 +3175,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port)); - intel_encoder->hotplug = intel_encoder_hotplug; + intel_encoder->hotplug = intel_hdmi_hotplug; intel_encoder->compute_config = intel_hdmi_compute_config; if (HAS_PCH_SPLIT(dev_priv)) { intel_encoder->disable = pch_disable_hdmi; -- cgit v1.2.3 From 7aaddd96d5febcf5b24357a326b3038d49a20532 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Thu, 11 Jul 2019 05:13:13 +0300 Subject: drm/modes: Don't apply cmdline's rotation if it wasn't specified The rotation mode from cmdline shouldn't be taken into account if it wasn't specified in the cmdline. This fixes ignored default display orientation when display mode is given using cmdline without the rotation being specified. Fixes: 1bf4e09227c3 ("drm/modes: Allow to specify rotation and reflection on the commandline") Signed-off-by: Dmitry Osipenko Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20190711021313.29445-1-digetx@gmail.com --- drivers/gpu/drm/drm_client_modeset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 56d36779d213..c8922b7cac09 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -859,7 +859,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) * simple XOR between the two handle the addition nicely. */ cmdline = &connector->cmdline_mode; - if (cmdline->specified) { + if (cmdline->specified && cmdline->rotation_reflection) { unsigned int cmdline_rest, panel_rest; unsigned int cmdline_rot, panel_rot; unsigned int sum_rot, sum_rest; -- cgit v1.2.3 From cfe7288c276e359eebf057699fe86c2f8af14224 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 15 Jul 2019 09:09:28 +0100 Subject: drm/i915: Lock the engine while dumping the active request We cannot let the request be retired and freed while we are trying to dump it during error capture. It is not sufficient just to grab a reference to the request, as during retirement we may free the ring which we are also dumping. So take the engine lock to prevent retiring and freeing of the request. Reported-by: Alex Shumsky Fixes: 83c317832eb1 ("drm/i915: Dump the ringbuffer of the active request for debugging") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Joonas Lahtinen Cc: Alex Shumsky Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190715080946.15593-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 11 ++++------- drivers/gpu/drm/i915/i915_gpu_error.c | 6 ++++-- 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 022e00eb79ad..9570659aa360 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1471,6 +1471,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_request *rq; intel_wakeref_t wakeref; + unsigned long flags; if (header) { va_list ap; @@ -1490,10 +1491,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, i915_reset_engine_count(error, engine), i915_reset_count(error)); - rcu_read_lock(); - drm_printf(m, "\tRequests:\n"); + spin_lock_irqsave(&engine->active.lock, flags); rq = intel_engine_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); @@ -1513,8 +1513,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, print_request_ring(m, rq); } - - rcu_read_unlock(); + spin_unlock_irqrestore(&engine->active.lock, flags); wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); if (wakeref) { @@ -1676,7 +1675,6 @@ struct i915_request * intel_engine_find_active_request(struct intel_engine_cs *engine) { struct i915_request *request, *active = NULL; - unsigned long flags; /* * We are called by the error capture, reset and to dump engine @@ -1689,7 +1687,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) * At all other times, we must assume the GPU is still running, but * we only care about the snapshot of this moment. */ - spin_lock_irqsave(&engine->active.lock, flags); + lockdep_assert_held(&engine->active.lock); list_for_each_entry(request, &engine->active.requests, sched.link) { if (i915_request_completed(request)) continue; @@ -1704,7 +1702,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) active = request; break; } - spin_unlock_irqrestore(&engine->active.lock, flags); return active; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 78e388fa059c..c5b89bf4d616 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1411,6 +1411,7 @@ static void gem_record_rings(struct i915_gpu_state *error) struct intel_engine_cs *engine = i915->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; struct i915_request *request; + unsigned long flags; ee->engine_id = -1; @@ -1422,10 +1423,11 @@ static void gem_record_rings(struct i915_gpu_state *error) error_record_engine_registers(error, engine, ee); error_record_engine_execlists(engine, ee); + spin_lock_irqsave(&engine->active.lock, flags); request = intel_engine_find_active_request(engine); if (request) { struct i915_gem_context *ctx = request->gem_context; - struct intel_ring *ring; + struct intel_ring *ring = request->ring; ee->vm = ctx->vm ?: &engine->gt->ggtt->vm; @@ -1455,7 +1457,6 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->rq_post = request->postfix; ee->rq_tail = request->tail; - ring = request->ring; ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; ee->ringbuffer = @@ -1463,6 +1464,7 @@ static void gem_record_rings(struct i915_gpu_state *error) engine_record_requests(engine, request, ee); } + spin_unlock_irqrestore(&engine->active.lock, flags); ee->hws_page = i915_error_object_create(i915, -- cgit v1.2.3 From 09975b861aa0c5cc0f1a6f9543807f344c4c12ae Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 9 Jul 2019 10:12:33 +0100 Subject: drm/i915/execlists: Disable preemption under GVT Preempt-to-busy uses a GPU semaphore to enforce an idle-barrier across preemption, but mediated gvt does not fully support semaphores. v2: Fiddle around with the flags and settle on using has-semaphores for the core bits so that we retain the ability to preempt our own semaphores. Signed-off-by: Chris Wilson Cc: Zhenyu Wang Cc: Xiaolin Zhang Cc: Tvrtko Ursulin Cc: Mika Kuoppala Acked-by: Zhenyu Wang Link: https://patchwork.freedesktop.org/patch/msgid/20190709091233.8573-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_lrc.c | 24 ++++++++++++++++++------ drivers/gpu/drm/i915/gt/selftest_lrc.c | 6 ++++++ 3 files changed, 26 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 9570659aa360..c0bc9cb7f228 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -829,6 +829,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; int ret; + engine->set_default_submission(engine); + /* We may need to do things with the shrinker which * require us to immediately switch back to the default * context. This can cause a problem as pinning the @@ -847,8 +849,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine) engine->emit_fini_breadcrumb_dw = ret; - engine->set_default_submission(engine); - return 0; err_unpin: diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 9e0992498087..a220575a69bc 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -306,6 +306,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, { int last_prio; + if (!intel_engine_has_semaphores(engine)) + return false; + /* * Check if the current priority hint merits a preemption attempt. * @@ -904,6 +907,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) { int hint; + if (!intel_engine_has_semaphores(engine)) + return false; + if (list_is_last(&rq->sched.link, &engine->active.requests)) return false; @@ -2656,7 +2662,8 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - cs = emit_preempt_busywait(request, cs); + if (intel_engine_has_semaphores(request->engine)) + cs = emit_preempt_busywait(request, cs); request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2680,7 +2687,8 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - cs = emit_preempt_busywait(request, cs); + if (intel_engine_has_semaphores(request->engine)) + cs = emit_preempt_busywait(request, cs); request->tail = intel_ring_offset(request, cs); assert_ring_tail_valid(request->ring, request->tail); @@ -2728,10 +2736,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->unpark = NULL; engine->flags |= I915_ENGINE_SUPPORTS_STATS; - if (!intel_vgpu_active(engine->i915)) + if (!intel_vgpu_active(engine->i915)) { engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) - engine->flags |= I915_ENGINE_HAS_PREEMPTION; + if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) + engine->flags |= I915_ENGINE_HAS_PREEMPTION; + } } static void execlists_destroy(struct intel_engine_cs *engine) @@ -3419,7 +3428,6 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; - ve->base.flags = I915_ENGINE_IS_VIRTUAL; /* * The decision on whether to submit a request using semaphores @@ -3516,8 +3524,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; ve->base.emit_fini_breadcrumb_dw = sibling->emit_fini_breadcrumb_dw; + + ve->base.flags = sibling->flags; } + ve->base.flags |= I915_ENGINE_IS_VIRTUAL; + return &ve->context; err_put: diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 6b6cea76cc8b..60f27e52d267 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -269,6 +269,9 @@ static int live_timeslice_preempt(void *arg) enum intel_engine_id id; for_each_engine(engine, i915, id) { + if (!intel_engine_has_preemption(engine)) + continue; + memset(vaddr, 0, PAGE_SIZE); err = slice_semaphore_queue(engine, vma, count); @@ -354,6 +357,9 @@ static int live_busywait_preempt(void *arg) struct igt_live_test t; u32 *cs; + if (!intel_engine_has_preemption(engine)) + continue; + if (!intel_engine_can_store_dword(engine)) continue; -- cgit v1.2.3 From 8a98e839601f83fc1b31963224d9e1279e5d122f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 12:27:24 +0100 Subject: drm/i915/gtt: Recursive ppgtt alloc for gen8 Refactor the separate allocation routines into a single recursive function. Signed-off-by: Chris Wilson Reviewed-by: Abdiel Janulgue Link: https://patchwork.freedesktop.org/patch/msgid/20190712112725.2892-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 272 +++++++++++++----------------------- 1 file changed, 97 insertions(+), 175 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1e3ccdf7fe1b..49de6d39488f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1007,199 +1007,119 @@ static void gen8_ppgtt_clear(struct i915_address_space *vm, start, start + length, vm->top); } -static void gen8_ppgtt_clear_pd(struct i915_address_space *vm, - struct i915_page_directory *pd, - u64 start, u64 length) -{ - GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - - start >>= GEN8_PTE_SHIFT; - length >>= GEN8_PTE_SHIFT; - - __gen8_ppgtt_clear(vm, pd, start, start + length, 1); -} - -static void gen8_ppgtt_clear_pdp(struct i915_address_space *vm, - struct i915_page_directory * const pdp, - u64 start, u64 length) -{ - GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); - GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - - start >>= GEN8_PTE_SHIFT; - length >>= GEN8_PTE_SHIFT; - - __gen8_ppgtt_clear(vm, pdp, start, start + length, 2); -} - -static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, - struct i915_page_directory *pd, - u64 start, u64 length) +static int __gen8_ppgtt_alloc(struct i915_address_space * const vm, + struct i915_page_directory * const pd, + u64 * const start, u64 end, int lvl) { - struct i915_page_table *pt, *alloc = NULL; - u64 from = start; - unsigned int pde; + const struct i915_page_scratch * const scratch = &vm->scratch[lvl]; + struct i915_page_table *alloc = NULL; + unsigned int idx, len; int ret = 0; + len = gen8_pd_range(*start, end, lvl--, &idx); + DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n", + __func__, vm, lvl + 1, *start, end, + idx, len, atomic_read(px_used(pd))); + GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1)); + spin_lock(&pd->lock); - gen8_for_each_pde(pt, pd, start, length, pde) { - const int count = gen8_pte_count(start, length); + GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */ + do { + struct i915_page_table *pt = pd->entry[idx]; if (!pt) { spin_unlock(&pd->lock); - pt = fetch_and_zero(&alloc); - if (!pt) - pt = alloc_pt(vm); - if (IS_ERR(pt)) { - ret = PTR_ERR(pt); - goto unwind; - } + DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n", + __func__, vm, lvl + 1, idx); - if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) - fill_px(pt, vm->scratch[0].encode); + pt = fetch_and_zero(&alloc); + if (lvl) { + if (!pt) { + pt = &alloc_pd(vm)->pt; + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } - spin_lock(&pd->lock); - if (!pd->entry[pde]) { - set_pd_entry(pd, pde, pt); + fill_px(pt, vm->scratch[lvl].encode); } else { - alloc = pt; - pt = pd->entry[pde]; - } - } - - atomic_add(count, &pt->used); - } - spin_unlock(&pd->lock); - goto out; - -unwind: - gen8_ppgtt_clear_pd(vm, pd, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - return ret; -} - -static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, - struct i915_page_directory *pdp, - u64 start, u64 length) -{ - struct i915_page_directory *pd, *alloc = NULL; - u64 from = start; - unsigned int pdpe; - int ret = 0; + if (!pt) { + pt = alloc_pt(vm); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto out; + } + } - spin_lock(&pdp->lock); - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - if (!pd) { - spin_unlock(&pdp->lock); - - pd = fetch_and_zero(&alloc); - if (!pd) - pd = alloc_pd(vm); - if (IS_ERR(pd)) { - ret = PTR_ERR(pd); - goto unwind; + if (intel_vgpu_active(vm->i915) || + gen8_pt_count(*start, end) < I915_PDES) + fill_px(pt, vm->scratch[lvl].encode); } - fill_px(pd, vm->scratch[1].encode); + spin_lock(&pd->lock); + if (likely(!pd->entry[idx])) + set_pd_entry(pd, idx, pt); + else + alloc = pt, pt = pd->entry[idx]; + } - spin_lock(&pdp->lock); - if (!pdp->entry[pdpe]) { - set_pd_entry(pdp, pdpe, pd); - } else { - alloc = pd; - pd = pdp->entry[pdpe]; + if (lvl) { + atomic_inc(&pt->used); + spin_unlock(&pd->lock); + + ret = __gen8_ppgtt_alloc(vm, as_pd(pt), + start, end, lvl); + if (unlikely(ret)) { + if (release_pd_entry(pd, idx, pt, scratch)) + free_px(vm, pt); + goto out; } - } - atomic_inc(px_used(pd)); - spin_unlock(&pdp->lock); - ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); - if (unlikely(ret)) - goto unwind_pd; + spin_lock(&pd->lock); + atomic_dec(&pt->used); + GEM_BUG_ON(!atomic_read(&pt->used)); + } else { + unsigned int count = gen8_pt_count(*start, end); - spin_lock(&pdp->lock); - atomic_dec(px_used(pd)); - } - spin_unlock(&pdp->lock); - goto out; + DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} inserting pte\n", + __func__, vm, lvl, *start, end, + gen8_pd_index(*start, 0), count, + atomic_read(&pt->used)); -unwind_pd: - if (release_pd_entry(pdp, pdpe, &pd->pt, &vm->scratch[2])) - free_px(vm, pd); -unwind: - gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); + atomic_add(count, &pt->used); + GEM_BUG_ON(atomic_read(&pt->used) > I915_PDES); + *start += count; + } + } while (idx++, --len); + spin_unlock(&pd->lock); out: if (alloc) free_px(vm, alloc); return ret; } -static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, - u64 start, u64 length) -{ - return gen8_ppgtt_alloc_pdp(vm, - i915_vm_to_ppgtt(vm)->pd, start, length); -} - -static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, - u64 start, u64 length) +static int gen8_ppgtt_alloc(struct i915_address_space *vm, + u64 start, u64 length) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - struct i915_page_directory * const pml4 = ppgtt->pd; - struct i915_page_directory *pdp, *alloc = NULL; u64 from = start; - int ret = 0; - u32 pml4e; - - spin_lock(&pml4->lock); - gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { - if (!pdp) { - spin_unlock(&pml4->lock); - - pdp = fetch_and_zero(&alloc); - if (!pdp) - pdp = alloc_pd(vm); - if (IS_ERR(pdp)) { - ret = PTR_ERR(pdp); - goto unwind; - } - - fill_px(pdp, vm->scratch[2].encode); + int err; - spin_lock(&pml4->lock); - if (!pml4->entry[pml4e]) { - set_pd_entry(pml4, pml4e, pdp); - } else { - alloc = pdp; - pdp = pml4->entry[pml4e]; - } - } - atomic_inc(px_used(pdp)); - spin_unlock(&pml4->lock); + GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); + GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT))); - ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); - if (unlikely(ret)) - goto unwind_pdp; + start >>= GEN8_PTE_SHIFT; + length >>= GEN8_PTE_SHIFT; + GEM_BUG_ON(length == 0); - spin_lock(&pml4->lock); - atomic_dec(px_used(pdp)); - } - spin_unlock(&pml4->lock); - goto out; + err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, + &start, start + length, vm->top); + if (unlikely(err)) + __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, + from, start, vm->top); -unwind_pdp: - if (release_pd_entry(pml4, pml4e, &pdp->pt, &vm->scratch[3])) - free_px(vm, pdp); -unwind: - gen8_ppgtt_clear(vm, from, start - from); -out: - if (alloc) - free_px(vm, alloc); - return ret; + return err; } static inline struct sgt_dma { @@ -1496,19 +1416,22 @@ free_scratch: static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->vm; - struct i915_page_directory *pdp = ppgtt->pd; - struct i915_page_directory *pd; - u64 start = 0, length = ppgtt->vm.total; - unsigned int pdpe; + struct i915_page_directory *pd = ppgtt->pd; + unsigned int idx; + + GEM_BUG_ON(vm->top != 2); + GEM_BUG_ON((vm->total >> __gen8_pte_shift(2)) != GEN8_3LVL_PDPES); + + for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { + struct i915_page_directory *pde; - gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { - pd = alloc_pd(vm); - if (IS_ERR(pd)) - return PTR_ERR(pd); + pde = alloc_pd(vm); + if (IS_ERR(pde)) + return PTR_ERR(pde); - fill_px(pd, vm->scratch[1].encode); - set_pd_entry(pdp, pdpe, pd); - atomic_inc(px_used(pd)); /* keep pinned */ + fill_px(pde, vm->scratch[1].encode); + set_pd_entry(pd, idx, pde); + atomic_inc(px_used(pde)); /* keep pinned */ } return 0; @@ -1597,7 +1520,6 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) } if (i915_vm_is_4lvl(&ppgtt->vm)) { - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl; ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl; } else { if (intel_vgpu_active(i915)) { @@ -1606,10 +1528,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) goto err_free_pd; } - ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl; ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl; } + ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; ppgtt->vm.clear_range = gen8_ppgtt_clear; if (intel_vgpu_active(i915)) -- cgit v1.2.3 From bea5faf7be325bba63d82b5d37e31b43140ddabf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Jul 2019 12:27:25 +0100 Subject: drm/i915/gtt: Tidy up ppgtt insertion for gen8 Apply the new radix shift helpers to extract the multi-level indices cleanly when inserting pte into the gtt tree. Signed-off-by: Chris Wilson Reviewed-by: Abdiel Janulgue Link: https://patchwork.freedesktop.org/patch/msgid/20190712112725.2892-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 115 ++++++++++++++---------------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 90 ++-------------------------- 2 files changed, 48 insertions(+), 157 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 49de6d39488f..220aba5a94d2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1131,47 +1131,28 @@ static inline struct sgt_dma { return (struct sgt_dma) { sg, addr, addr + sg->length }; } -struct gen8_insert_pte { - u16 pml4e; - u16 pdpe; - u16 pde; - u16 pte; -}; - -static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) -{ - return (struct gen8_insert_pte) { - gen8_pml4e_index(start), - gen8_pdpe_index(start), - gen8_pde_index(start), - gen8_pte_index(start), - }; -} - -static __always_inline bool +static __always_inline u64 gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, struct i915_page_directory *pdp, struct sgt_dma *iter, - struct gen8_insert_pte *idx, + u64 idx, enum i915_cache_level cache_level, u32 flags) { struct i915_page_directory *pd; const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags); gen8_pte_t *vaddr; - bool ret; - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); - pd = i915_pd_entry(pdp, idx->pdpe); - vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); + pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { - vaddr[idx->pte] = pte_encode | iter->dma; + vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; if (iter->dma >= iter->max) { iter->sg = __sg_next(iter->sg); if (!iter->sg) { - ret = false; + idx = 0; break; } @@ -1179,30 +1160,22 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt, iter->max = iter->dma + iter->sg->length; } - if (++idx->pte == GEN8_PTES) { - idx->pte = 0; - - if (++idx->pde == I915_PDES) { - idx->pde = 0; - + if (gen8_pd_index(++idx, 0) == 0) { + if (gen8_pd_index(idx, 1) == 0) { /* Limited by sg length for 3lvl */ - if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { - idx->pdpe = 0; - ret = true; + if (gen8_pd_index(idx, 2) == 0) break; - } - GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm)); - pd = pdp->entry[idx->pdpe]; + pd = pdp->entry[gen8_pd_index(idx, 2)]; } kunmap_atomic(vaddr); - vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde)); + vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); } } while (1); kunmap_atomic(vaddr); - return ret; + return idx; } static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, @@ -1212,9 +1185,9 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sgt_dma iter = sgt_dma(vma); - struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); - gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx, + gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, + vma->node.start >> GEN8_PTE_SHIFT, cache_level, flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; @@ -1231,39 +1204,38 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, dma_addr_t rem = iter->sg->length; do { - struct gen8_insert_pte idx = gen8_insert_pte(start); struct i915_page_directory *pdp = - i915_pdp_entry(pml4, idx.pml4e); - struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe); - unsigned int page_size; - bool maybe_64K = false; + i915_pd_entry(pml4, __gen8_pte_index(start, 3)); + struct i915_page_directory *pd = + i915_pd_entry(pdp, __gen8_pte_index(start, 2)); gen8_pte_t encode = pte_encode; + unsigned int maybe_64K = -1; + unsigned int page_size; gen8_pte_t *vaddr; - u16 index, max; + u16 index; if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M && IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) && - rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) { - index = idx.pde; - max = I915_PDES; - page_size = I915_GTT_PAGE_SIZE_2M; - + rem >= I915_GTT_PAGE_SIZE_2M && + !__gen8_pte_index(start, 0)) { + index = __gen8_pte_index(start, 1); encode |= GEN8_PDE_PS_2M; + page_size = I915_GTT_PAGE_SIZE_2M; vaddr = kmap_atomic_px(pd); } else { - struct i915_page_table *pt = i915_pt_entry(pd, idx.pde); + struct i915_page_table *pt = + i915_pt_entry(pd, __gen8_pte_index(start, 1)); - index = idx.pte; - max = GEN8_PTES; + index = __gen8_pte_index(start, 0); page_size = I915_GTT_PAGE_SIZE; if (!index && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K && IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (max - index) * I915_GTT_PAGE_SIZE)) - maybe_64K = true; + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)) + maybe_64K = __gen8_pte_index(start, 1); vaddr = kmap_atomic_px(pt); } @@ -1284,16 +1256,16 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, iter->dma = sg_dma_address(iter->sg); iter->max = iter->dma + rem; - if (maybe_64K && index < max && + if (maybe_64K != -1 && index < I915_PDES && !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) && (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) || - rem >= (max - index) * I915_GTT_PAGE_SIZE))) - maybe_64K = false; + rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))) + maybe_64K = -1; if (unlikely(!IS_ALIGNED(iter->dma, page_size))) break; } - } while (rem >= page_size && index < max); + } while (rem >= page_size && index < I915_PDES); kunmap_atomic(vaddr); @@ -1303,14 +1275,14 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, * it and have reached the end of the sg table and we have * enough padding. */ - if (maybe_64K && - (index == max || + if (maybe_64K != -1 && + (index == I915_PDES || (i915_vm_has_scratch_64K(vma->vm) && !iter->sg && IS_ALIGNED(vma->node.start + vma->node.size, I915_GTT_PAGE_SIZE_2M)))) { vaddr = kmap_atomic_px(pd); - vaddr[idx.pde] |= GEN8_PDE_IPS_64K; + vaddr[maybe_64K] |= GEN8_PDE_IPS_64K; kunmap_atomic(vaddr); page_size = I915_GTT_PAGE_SIZE_64K; @@ -1327,8 +1299,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, u16 i; encode = vma->vm->scratch[0].encode; - vaddr = kmap_atomic_px(i915_pt_entry(pd, - idx.pde)); + vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K)); for (i = 1; i < index; i += 16) memset64(vaddr + i, encode, 15); @@ -1354,13 +1325,13 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level, flags); } else { - struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start); + u64 idx = vma->node.start >> GEN8_PTE_SHIFT; - while (gen8_ppgtt_insert_pte_entries(ppgtt, - i915_pdp_entry(pml4, idx.pml4e++), - &iter, &idx, cache_level, - flags)) - GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); + while ((idx = gen8_ppgtt_insert_pte_entries(ppgtt, + i915_pd_entry(pml4, gen8_pd_index(idx, 3)), + &iter, idx, cache_level, + flags))) + ; vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index de156634a889..cea59ef1a365 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -115,29 +115,18 @@ typedef u64 gen8_pte_t; #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) -/* GEN8 32b style address is defined as a 3 level page table: +/* + * GEN8 32b style address is defined as a 3 level page table: * 31:30 | 29:21 | 20:12 | 11:0 * PDPE | PDE | PTE | offset * The difference as compared to normal x86 3 level page table is the PDPEs are * programmed via register. - */ -#define GEN8_3LVL_PDPES 4 -#define GEN8_PDE_SHIFT 21 -#define GEN8_PDE_MASK 0x1ff -#define GEN8_PTE_MASK 0x1ff -#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) - -/* GEN8 48b style address is defined as a 4 level page table: + * + * GEN8 48b style address is defined as a 4 level page table: * 47:39 | 38:30 | 29:21 | 20:12 | 11:0 * PML4E | PDPE | PDE | PTE | offset */ -#define GEN8_PML4ES_PER_PML4 512 -#define GEN8_PML4E_SHIFT 39 -#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1) -#define GEN8_PDPE_SHIFT 30 -/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page - * tables */ -#define GEN8_PDPE_MASK 0x1ff +#define GEN8_3LVL_PDPES 4 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) #define PPAT_CACHED_PDE 0 /* WB LLC */ @@ -521,15 +510,6 @@ static inline u32 gen6_pde_index(u32 addr) return i915_pde_index(addr, GEN6_PDE_SHIFT); } -static inline unsigned int -i915_pdpes_per_pdp(const struct i915_address_space *vm) -{ - if (i915_vm_is_4lvl(vm)) - return GEN8_PML4ES_PER_PML4; - - return GEN8_3LVL_PDPES; -} - static inline struct i915_page_table * i915_pt_entry(const struct i915_page_directory * const pd, const unsigned short n) @@ -544,66 +524,6 @@ i915_pd_entry(const struct i915_page_directory * const pdp, return pdp->entry[n]; } -static inline struct i915_page_directory * -i915_pdp_entry(const struct i915_page_directory * const pml4, - const unsigned short n) -{ - return pml4->entry[n]; -} - -/* Equivalent to the gen6 version, For each pde iterates over every pde - * between from start until start + length. On gen8+ it simply iterates - * over every page directory entry in a page directory. - */ -#define gen8_for_each_pde(pt, pd, start, length, iter) \ - for (iter = gen8_pde_index(start); \ - length > 0 && iter < I915_PDES && \ - (pt = i915_pt_entry(pd, iter), true); \ - ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \ - for (iter = gen8_pdpe_index(start); \ - length > 0 && iter < i915_pdpes_per_pdp(vm) && \ - (pd = i915_pd_entry(pdp, iter), true); \ - ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \ - for (iter = gen8_pml4e_index(start); \ - length > 0 && iter < GEN8_PML4ES_PER_PML4 && \ - (pdp = i915_pdp_entry(pml4, iter), true); \ - ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \ - temp = min(temp - start, length); \ - start += temp, length -= temp; }), ++iter) - -static inline u32 gen8_pte_index(u64 address) -{ - return i915_pte_index(address, GEN8_PDE_SHIFT); -} - -static inline u32 gen8_pde_index(u64 address) -{ - return i915_pde_index(address, GEN8_PDE_SHIFT); -} - -static inline u32 gen8_pdpe_index(u64 address) -{ - return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK; -} - -static inline u32 gen8_pml4e_index(u64 address) -{ - return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK; -} - -static inline u64 gen8_pte_count(u64 address, u64 length) -{ - return i915_pte_count(address, length, GEN8_PDE_SHIFT); -} - static inline dma_addr_t i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n) { -- cgit v1.2.3 From a9877da2d62958972548e98ecad0413d482b5eac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 Jul 2019 22:34:43 +0100 Subject: drm/i915/oa: Reconfigure contexts on the fly Avoid a global idle barrier by reconfiguring each context by rewriting them with MI_STORE_DWORD from the kernel context. v2: We only need to determine the desired register values once, they are the same for all contexts. v3: Don't remove the kernel context from the list of known GEM contexts; the world is not ready for that yet. Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Cc: Tvrtko Ursulin Reviewed-by: Lionel Landwerlin Link: https://patchwork.freedesktop.org/patch/msgid/20190716213443.9874-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 23 +-- drivers/gpu/drm/i915/gt/intel_context.c | 25 +++ drivers/gpu/drm/i915/gt/intel_context.h | 3 + drivers/gpu/drm/i915/gt/intel_lrc.c | 7 +- drivers/gpu/drm/i915/i915_perf.c | 243 +++++++++++++++++++++------- 5 files changed, 221 insertions(+), 80 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index c5f8bfa3f7b0..ffb59d96d4d8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1173,26 +1173,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) if (IS_ERR(rq)) return PTR_ERR(rq); - /* Queue this switch after all other activity by this context. */ - ret = i915_active_request_set(&ce->ring->timeline->last_request, rq); - if (ret) - goto out_add; - - /* - * Guarantee context image and the timeline remains pinned until the - * modifying request is retired by setting the ce activity tracker. - * - * But we only need to take one pin on the account of it. Or in other - * words transfer the pinned ce object to tracked active request. - */ - GEM_BUG_ON(i915_active_is_idle(&ce->active)); - ret = i915_active_ref(&ce->active, rq->fence.context, rq); - if (ret) - goto out_add; - - ret = gen8_emit_rpcs_config(rq, ce, sseu); + /* Serialise with the remote context */ + ret = intel_context_prepare_remote_request(ce, rq); + if (ret == 0) + ret = gen8_emit_rpcs_config(rq, ce, sseu); -out_add: i915_request_add(rq); return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 1110fc8f657a..b667e2b35804 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -239,6 +239,31 @@ void intel_context_exit_engine(struct intel_context *ce) intel_engine_pm_put(ce->engine); } +int intel_context_prepare_remote_request(struct intel_context *ce, + struct i915_request *rq) +{ + struct intel_timeline *tl = ce->ring->timeline; + int err; + + /* Only suitable for use in remotely modifying this context */ + GEM_BUG_ON(rq->hw_context == ce); + + /* Queue this switch after all other activity by this context. */ + err = i915_active_request_set(&tl->last_request, rq); + if (err) + return err; + + /* + * Guarantee context image and the timeline remains pinned until the + * modifying request is retired by setting the ce activity tracker. + * + * But we only need to take one pin on the account of it. Or in other + * words transfer the pinned ce object to tracked active request. + */ + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + return i915_active_ref(&ce->active, rq->fence.context, rq); +} + struct i915_request *intel_context_create_request(struct intel_context *ce) { struct i915_request *rq; diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 40cd8320fcc3..b41c610c2ce6 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -139,6 +139,9 @@ static inline void intel_context_timeline_unlock(struct intel_context *ce) mutex_unlock(&ce->ring->timeline->mutex); } +int intel_context_prepare_remote_request(struct intel_context *ce, + struct i915_request *rq); + struct i915_request *intel_context_create_request(struct intel_context *ce); #endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a220575a69bc..f35a57d6d34a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1576,9 +1576,12 @@ __execlists_update_reg_state(struct intel_context *ce, regs[CTX_RING_TAIL + 1] = ring->tail; /* RPCS */ - if (engine->class == RENDER_CLASS) + if (engine->class == RENDER_CLASS) { regs[CTX_R_PWR_CLK_STATE + 1] = intel_sseu_make_rpcs(engine->i915, &ce->sseu); + + i915_oa_init_reg_state(engine, ce, regs); + } } static int @@ -3001,8 +3004,6 @@ static void execlists_init_reg_state(u32 *regs, if (rcs) { regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); - - i915_oa_init_reg_state(engine, ce, regs); } regs[CTX_END] = MI_BATCH_BUFFER_END; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 007826ded9b3..ab82ccba896b 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1636,6 +1636,27 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) ~GT_NOA_ENABLE)); } +static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, + i915_reg_t reg) +{ + u32 mmio = i915_mmio_reg_offset(reg); + int i; + + /* + * This arbitrary default will select the 'EU FPU0 Pipeline + * Active' event. In the future it's anticipated that there + * will be an explicit 'No Event' we can select, but not yet... + */ + if (!oa_config) + return 0; + + for (i = 0; i < oa_config->flex_regs_len; i++) { + if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio) + return oa_config->flex_regs[i].value; + } + + return 0; +} /* * NB: It must always remain pointer safe to run this even if the OA unit * has been disabled. @@ -1669,28 +1690,8 @@ gen8_update_reg_state_unlocked(struct intel_context *ce, GEN8_OA_COUNTER_RESUME); for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { - u32 state_offset = ctx_flexeu0 + i * 2; - u32 mmio = i915_mmio_reg_offset(flex_regs[i]); - - /* - * This arbitrary default will select the 'EU FPU0 Pipeline - * Active' event. In the future it's anticipated that there - * will be an explicit 'No Event' we can select, but not yet... - */ - u32 value = 0; - - if (oa_config) { - u32 j; - - for (j = 0; j < oa_config->flex_regs_len; j++) { - if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) { - value = oa_config->flex_regs[j].value; - break; - } - } - } - - CTX_REG(reg_state, state_offset, flex_regs[i], value); + CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i], + oa_config_flex_reg(oa_config, flex_regs[i])); } CTX_REG(reg_state, @@ -1698,6 +1699,99 @@ gen8_update_reg_state_unlocked(struct intel_context *ce, intel_sseu_make_rpcs(i915, &ce->sseu)); } +struct flex { + i915_reg_t reg; + u32 offset; + u32 value; +}; + +static int +gen8_store_flex(struct i915_request *rq, + struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + u32 offset; + u32 *cs; + + cs = intel_ring_begin(rq, 4 * count); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; + do { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset + (flex->offset + 1) * sizeof(u32); + *cs++ = 0; + *cs++ = flex->value; + } while (flex++, --count); + + intel_ring_advance(rq, cs); + + return 0; +} + +static int +gen8_load_flex(struct i915_request *rq, + struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + u32 *cs; + + GEM_BUG_ON(!count || count > 63); + + cs = intel_ring_begin(rq, 2 * count + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(count); + do { + *cs++ = i915_mmio_reg_offset(flex->reg); + *cs++ = flex->value; + } while (flex++, --count); + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} + +static int gen8_modify_context(struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + struct i915_request *rq; + int err; + + lockdep_assert_held(&ce->pin_mutex); + + rq = i915_request_create(ce->engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + /* Serialise with the remote context */ + err = intel_context_prepare_remote_request(ce, rq); + if (err == 0) + err = gen8_store_flex(rq, ce, flex, count); + + i915_request_add(rq); + return err; +} + +static int gen8_modify_self(struct intel_context *ce, + const struct flex *flex, unsigned int count) +{ + struct i915_request *rq; + int err; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + err = gen8_load_flex(rq, ce, flex, count); + + i915_request_add(rq); + return err; +} + /* * Manages updating the per-context aspects of the OA stream * configuration across all contexts. @@ -1722,15 +1816,43 @@ gen8_update_reg_state_unlocked(struct intel_context *ce, * * Note: it's only the RCS/Render context that has any OA state. */ -static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, +static int gen8_configure_all_contexts(struct drm_i915_private *i915, const struct i915_oa_config *oa_config) { - unsigned int map_type = i915_coherent_map_type(dev_priv); + /* The MMIO offsets for Flex EU registers aren't contiguous */ + const u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset; +#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N)) + struct flex regs[] = { + { + GEN8_R_PWR_CLK_STATE, + CTX_R_PWR_CLK_STATE, + }, + { + GEN8_OACTXCONTROL, + i915->perf.oa.ctx_oactxctrl_offset, + ((i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME) + }, + { EU_PERF_CNTL0, ctx_flexeuN(0) }, + { EU_PERF_CNTL1, ctx_flexeuN(1) }, + { EU_PERF_CNTL2, ctx_flexeuN(2) }, + { EU_PERF_CNTL3, ctx_flexeuN(3) }, + { EU_PERF_CNTL4, ctx_flexeuN(4) }, + { EU_PERF_CNTL5, ctx_flexeuN(5) }, + { EU_PERF_CNTL6, ctx_flexeuN(6) }, + }; +#undef ctx_flexeuN + struct intel_engine_cs *engine; struct i915_gem_context *ctx; - struct i915_request *rq; - int ret; + enum intel_engine_id id; + int err; + int i; + + for (i = 2; i < ARRAY_SIZE(regs); i++) + regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&i915->drm.struct_mutex); /* * The OA register config is setup through the context image. This image @@ -1742,58 +1864,63 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, * this might leave small interval of time where the OA unit is * configured at an invalid sampling period. * - * So far the best way to work around this issue seems to be draining - * the GPU from any submitted work. + * Note that since we emit all requests from a single ring, there + * is still an implicit global barrier here that may cause a high + * priority context to wait for an otherwise independent low priority + * context. Contexts idle at the time of reconfiguration are not + * trapped behind the barrier. */ - ret = i915_gem_wait_for_idle(dev_priv, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (ret) - return ret; - - /* Update all contexts now that we've stalled the submission. */ - list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + list_for_each_entry(ctx, &i915->contexts.list, link) { struct i915_gem_engines_iter it; struct intel_context *ce; + if (ctx == i915->kernel_context) + continue; + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - u32 *regs; + GEM_BUG_ON(ce == ce->engine->kernel_context); if (ce->engine->class != RENDER_CLASS) continue; - /* OA settings will be set upon first use */ - if (!ce->state) - continue; - - regs = i915_gem_object_pin_map(ce->state->obj, - map_type); - if (IS_ERR(regs)) { - i915_gem_context_unlock_engines(ctx); - return PTR_ERR(regs); - } + err = intel_context_lock_pinned(ce); + if (err) + break; - ce->state->obj->mm.dirty = true; - regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); + regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); - gen8_update_reg_state_unlocked(ce, regs, oa_config); + /* Otherwise OA settings will be set upon first use */ + if (intel_context_is_pinned(ce)) + err = gen8_modify_context(ce, regs, ARRAY_SIZE(regs)); - i915_gem_object_unpin_map(ce->state->obj); + intel_context_unlock_pinned(ce); + if (err) + break; } i915_gem_context_unlock_engines(ctx); + if (err) + return err; } /* - * Apply the configuration by doing one context restore of the edited - * context image. + * After updating all other contexts, we need to modify ourselves. + * If we don't modify the kernel_context, we do not get events while + * idle. */ - rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context); - if (IS_ERR(rq)) - return PTR_ERR(rq); + for_each_engine(engine, i915, id) { + struct intel_context *ce = engine->kernel_context; - i915_request_add(rq); + if (engine->class != RENDER_CLASS) + continue; + + regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); + + err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs)); + if (err) + return err; + } return 0; } -- cgit v1.2.3 From bff9e34c678552eb172916d9288913e8bd8cc9d1 Mon Sep 17 00:00:00 2001 From: Mauro Carvalho Chehab Date: Mon, 15 Jul 2019 05:31:06 -0300 Subject: docs: fix broken doc references due to renames Some files got renamed but probably due to some merge conflicts, a few references still point to the old locations. Signed-off-by: Mauro Carvalho Chehab --- Documentation/RCU/rculist_nulls.txt | 2 +- Documentation/devicetree/bindings/arm/idle-states.txt | 2 +- Documentation/locking/spinlocks.rst | 4 ++-- Documentation/memory-barriers.txt | 2 +- Documentation/translations/ko_KR/memory-barriers.txt | 2 +- Documentation/watchdog/hpwdt.rst | 2 +- MAINTAINERS | 8 ++++---- drivers/gpu/drm/drm_modes.c | 2 +- drivers/i2c/busses/i2c-nvidia-gpu.c | 2 +- drivers/scsi/hpsa.c | 4 ++-- 10 files changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt index 8151f0195f76..23f115dc87cf 100644 --- a/Documentation/RCU/rculist_nulls.txt +++ b/Documentation/RCU/rculist_nulls.txt @@ -1,7 +1,7 @@ Using hlist_nulls to protect read-mostly linked lists and objects using SLAB_TYPESAFE_BY_RCU allocations. -Please read the basics in Documentation/RCU/listRCU.txt +Please read the basics in Documentation/RCU/listRCU.rst Using special makers (called 'nulls') is a convenient way to solve following problem : diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt index 326f29b270ad..2d325bed37e5 100644 --- a/Documentation/devicetree/bindings/arm/idle-states.txt +++ b/Documentation/devicetree/bindings/arm/idle-states.txt @@ -703,4 +703,4 @@ cpus { https://www.devicetree.org/specifications/ [6] ARM Linux Kernel documentation - Booting AArch64 Linux - Documentation/arm64/booting.txt + Documentation/arm64/booting.rst diff --git a/Documentation/locking/spinlocks.rst b/Documentation/locking/spinlocks.rst index 098107fb7d86..e93ec6645238 100644 --- a/Documentation/locking/spinlocks.rst +++ b/Documentation/locking/spinlocks.rst @@ -82,7 +82,7 @@ itself. The read lock allows many concurrent readers. Anything that **changes** the list will have to get the write lock. NOTE! RCU is better for list traversal, but requires careful - attention to design detail (see Documentation/RCU/listRCU.txt). + attention to design detail (see Documentation/RCU/listRCU.rst). Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_ time need to do any changes (even if you don't do it every time), you have @@ -90,7 +90,7 @@ to get the write-lock at the very beginning. NOTE! We are working hard to remove reader-writer spinlocks in most cases, so please don't add a new one without consensus. (Instead, see - Documentation/RCU/rcu.txt for complete information.) + Documentation/RCU/rcu.rst for complete information.) ---- diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 045bb8148fe9..1adbb8a371c7 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -548,7 +548,7 @@ There are certain things that the Linux kernel memory barriers do not guarantee: [*] For information on bus mastering DMA and coherency please read: - Documentation/PCI/pci.rst + Documentation/driver-api/pci/pci.rst Documentation/DMA-API-HOWTO.txt Documentation/DMA-API.txt diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index a33c2a536542..2774624ee843 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -569,7 +569,7 @@ ACQUIRE 는 해당 오퍼레이션의 로드 부분에만 적용되고 RELEASE [*] 버스 마스터링 DMA 와 일관성에 대해서는 다음을 참고하시기 바랍니다: - Documentation/PCI/pci.rst + Documentation/driver-api/pci/pci.rst Documentation/DMA-API-HOWTO.txt Documentation/DMA-API.txt diff --git a/Documentation/watchdog/hpwdt.rst b/Documentation/watchdog/hpwdt.rst index 94a96371113e..49c647dba8aa 100644 --- a/Documentation/watchdog/hpwdt.rst +++ b/Documentation/watchdog/hpwdt.rst @@ -59,7 +59,7 @@ Last reviewed: 08/20/2018 and loop forever. This is generally not what a watchdog user wants. For those wishing to learn more please see: - Documentation/kdump/kdump.rst + Documentation/admin-guide/kdump/kdump.rst Documentation/admin-guide/kernel-parameters.txt (panic=) Your Linux Distribution specific documentation. diff --git a/MAINTAINERS b/MAINTAINERS index 8671909ee75c..5fe6fd597138 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -899,7 +899,7 @@ L: linux-iio@vger.kernel.org W: http://ez.analog.com/community/linux-device-drivers S: Supported F: drivers/iio/adc/ad7124.c -F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt +F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml ANALOG DEVICES INC AD7606 DRIVER M: Stefan Popa @@ -4189,7 +4189,7 @@ M: Jens Axboe L: cgroups@vger.kernel.org L: linux-block@vger.kernel.org T: git git://git.kernel.dk/linux-block -F: Documentation/cgroup-v1/blkio-controller.rst +F: Documentation/admin-guide/cgroup-v1/blkio-controller.rst F: block/blk-cgroup.c F: include/linux/blk-cgroup.h F: block/blk-throttle.c @@ -6848,7 +6848,7 @@ R: Sagi Shahar R: Jon Olson L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/google/gve.txt +F: Documentation/networking/device_drivers/google/gve.rst F: drivers/net/ethernet/google GPD POCKET FAN DRIVER @@ -12096,7 +12096,7 @@ M: Juergen Gross M: Alok Kataria L: virtualization@lists.linux-foundation.org S: Supported -F: Documentation/virtual/paravirt_ops.txt +F: Documentation/virtual/paravirt_ops.rst F: arch/*/kernel/paravirt* F: arch/*/include/asm/paravirt*.h F: include/linux/hypervisor.h diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 57e6408288c8..4645af681ef8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1680,7 +1680,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len, * * Additionals options can be provided following the mode, using a comma to * separate each option. Valid options can be found in - * Documentation/fb/modedb.txt. + * Documentation/fb/modedb.rst. * * The intermediate drm_cmdline_mode structure is required to store additional * options from the command line modline like the force-enable/disable flag. diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c index cfc76b5de726..5a1235fd86bb 100644 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c @@ -364,7 +364,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev) /* * We need gpu_i2c_suspend() even if it is stub, for runtime pm to work * correctly. Without it, lspci shows runtime pm status as "D0" for the card. - * Documentation/power/pci.txt also insists for driver to provide this. + * Documentation/power/pci.rst also insists for driver to provide this. */ static __maybe_unused int gpu_i2c_suspend(struct device *dev) { diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 43a6b5350775..eaf6177ac9ee 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -7798,7 +7798,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h) hpsa_disable_interrupt_mode(h); /* pci_init 2 */ /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.rst + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); /* pci_init 1 */ pci_release_regions(h->pdev); /* pci_init 2 */ @@ -7881,7 +7881,7 @@ clean2: /* intmode+region, pci */ clean1: /* * call pci_disable_device before pci_release_regions per - * Documentation/PCI/pci.rst + * Documentation/driver-api/pci/pci.rst */ pci_disable_device(h->pdev); pci_release_regions(h->pdev); -- cgit v1.2.3 From 0629d4da1f159778063767fb0ac1c951034c5477 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 11:44:18 +0100 Subject: drm/i915: Update description of i915.enable_guc modparam Commit f774f0964919 ("drm/i915/guc: Turn on GuC/HuC auto mode") changed the default from 0 to -1 but forgot to update the description. Signed-off-by: Tvrtko Ursulin Fixes: f774f0964919 ("drm/i915/guc: Turn on GuC/HuC auto mode") Cc: Michal Wajdeczko Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717104418.23809-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_params.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 296452f9efe4..b4f481e1e6b6 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -146,7 +146,7 @@ i915_param_named_unsafe(edp_vswing, int, 0400, i915_param_named_unsafe(enable_guc, int, 0400, "Enable GuC load for GuC submission and/or HuC load. " "Required functionality can be selected using bitmask values. " - "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)"); + "(-1=auto [default], 0=disable, 1=GuC submission, 2=HuC load)"); i915_param_named(guc_log_level, int, 0400, "GuC firmware logging level. Requires GuC to be loaded. " -- cgit v1.2.3 From fff8102aaed59014cb2d8034bdca231185496b16 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 Jul 2019 13:49:29 +0100 Subject: drm/i915/execlists: Process interrupted context on reset By stopping the rings, we may trigger an arbitration point resulting in a premature context-switch (i.e. a completion event before the request is actually complete). This clears the active context before the reset, but we must remember to rewind the incomplete context for replay upon resume. Fixes: 1863e3020ab5 ("drm/i915/execlists: Always reset the context's RING registers") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190716124931.5870-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index f35a57d6d34a..6564b5da224f 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1425,7 +1425,8 @@ static void process_csb(struct intel_engine_cs *engine) * coherent (visible from the CPU) before the * user interrupt and CSB is processed. */ - GEM_BUG_ON(!i915_request_completed(*execlists->active)); + GEM_BUG_ON(!i915_request_completed(*execlists->active) && + !reset_in_progress(execlists)); execlists_schedule_out(*execlists->active++); GEM_BUG_ON(execlists->active - execlists->inflight > @@ -2251,7 +2252,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) */ rq = execlists_active(execlists); if (!rq) - return; + goto unwind; ce = rq->hw_context; GEM_BUG_ON(i915_active_is_idle(&ce->active)); @@ -2328,6 +2329,7 @@ out_replay: intel_ring_update_space(ce->ring); __execlists_update_reg_state(ce, engine); +unwind: /* Push back any incomplete requests for replay after the reset. */ __unwind_incomplete_requests(engine); } -- cgit v1.2.3 From c30d5dc653cbc78f9b634b7b72e25057a68c527c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 Jul 2019 13:49:28 +0100 Subject: drm/i915/gt: Push engine stopping into reset-prepare Push the engine stop into the back reset_prepare (where it already was!) This allows us to avoid dangerously setting the RING registers to 0 for logical contexts. If we clear the register on a live context, those invalid register values are recorded in the logical context state and replayed (with hilarious results). Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190716124931.5870-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 16 +++++++-- drivers/gpu/drm/i915/gt/intel_reset.c | 58 ------------------------------ drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 40 ++++++++++++++++++++- 3 files changed, 53 insertions(+), 61 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 6564b5da224f..d076d9148b6d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2183,11 +2183,23 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) __tasklet_disable_sync_once(&execlists->tasklet); GEM_BUG_ON(!reset_in_progress(execlists)); - intel_engine_stop_cs(engine); - /* And flush any current direct submission. */ spin_lock_irqsave(&engine->active.lock, flags); spin_unlock_irqrestore(&engine->active.lock, flags); + + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * FIXME: Wa for more modern gens needs to be validated + */ + intel_engine_stop_cs(engine); } static void reset_csb_pointers(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 7ddedfb16aa2..55e2ddcbd215 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -135,47 +135,6 @@ void __i915_request_reset(struct i915_request *rq, bool guilty) } } -static void gen3_stop_engine(struct intel_engine_cs *engine) -{ - struct intel_uncore *uncore = engine->uncore; - const u32 base = engine->mmio_base; - - GEM_TRACE("%s\n", engine->name); - - if (intel_engine_stop_cs(engine)) - GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); - - intel_uncore_write_fw(uncore, - RING_HEAD(base), - intel_uncore_read_fw(uncore, RING_TAIL(base))); - intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ - - intel_uncore_write_fw(uncore, RING_HEAD(base), 0); - intel_uncore_write_fw(uncore, RING_TAIL(base), 0); - intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); - - /* The ring must be empty before it is disabled */ - intel_uncore_write_fw(uncore, RING_CTL(base), 0); - - /* Check acts as a post */ - if (intel_uncore_read_fw(uncore, RING_HEAD(base))) - GEM_TRACE("%s: ring head [%x] not parked\n", - engine->name, - intel_uncore_read_fw(uncore, RING_HEAD(base))); -} - -static void stop_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - - if (INTEL_GEN(gt->i915) < 3) - return; - - for_each_engine_masked(engine, gt->i915, engine_mask, tmp) - gen3_stop_engine(engine); -} - static bool i915_in_reset(struct pci_dev *pdev) { u8 gdrst; @@ -607,23 +566,6 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { - /* - * We stop engines, otherwise we might get failed reset and a - * dead gpu (on elk). Also as modern gpu as kbl can suffer - * from system hang if batchbuffer is progressing when - * the reset is issued, regardless of READY_TO_RESET ack. - * Thus assume it is best to stop engines on all gens - * where we have a gpu reset. - * - * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) - * - * WaMediaResetMainRingCleanup:ctg,elk (presumably) - * - * FIXME: Wa for more modern gens needs to be validated - */ - if (retry) - stop_engines(gt, engine_mask); - GEM_TRACE("engine_mask=%x\n", engine_mask); preempt_disable(); ret = reset(gt, engine_mask, retry); diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index f1e571fa2e6d..213df144be15 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -739,7 +739,45 @@ out: static void reset_prepare(struct intel_engine_cs *engine) { - intel_engine_stop_cs(engine); + struct intel_uncore *uncore = engine->uncore; + const u32 base = engine->mmio_base; + + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * WaMediaResetMainRingCleanup:ctg,elk (presumably) + * + * FIXME: Wa for more modern gens needs to be validated + */ + GEM_TRACE("%s\n", engine->name); + + if (intel_engine_stop_cs(engine)) + GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); + + intel_uncore_write_fw(uncore, + RING_HEAD(base), + intel_uncore_read_fw(uncore, RING_TAIL(base))); + intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ + + intel_uncore_write_fw(uncore, RING_HEAD(base), 0); + intel_uncore_write_fw(uncore, RING_TAIL(base), 0); + intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); + + /* The ring must be empty before it is disabled */ + intel_uncore_write_fw(uncore, RING_CTL(base), 0); + + /* Check acts as a post */ + if (intel_uncore_read_fw(uncore, RING_HEAD(base))) + GEM_TRACE("%s: ring head [%x] not parked\n", + engine->name, + intel_uncore_read_fw(uncore, RING_HEAD(base))); } static void reset_ring(struct intel_engine_cs *engine, bool stalled) -- cgit v1.2.3 From 5270130db8c8694f7d97cc4fdc9440fb30bc2192 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 26 Jun 2019 21:03:40 +0300 Subject: drm/i915: Add gen8_de_pipe_fault_mask() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reduce the clutter a bit by introducing gen8_de_pipe_fault_mask(). Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190626180344.26314-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/i915_irq.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 91f8c81028c3..11c73af92597 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2844,6 +2844,14 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) return mask; } +static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 9) + return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + else + return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; +} + static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { @@ -2956,12 +2964,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - fault_errors = iir; - if (INTEL_GEN(dev_priv) >= 9) - fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; - else - fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; - + fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); if (fault_errors) DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", pipe_name(pipe), -- cgit v1.2.3 From 9a36a6517d5cc8bf7d9c1fde9058269701802e31 Mon Sep 17 00:00:00 2001 From: Vivek Kasireddy Date: Tue, 16 Jul 2019 19:13:16 -0700 Subject: drm/i915/ehl: Use an id of 4 while accessing DPLL4's CR0 and CR1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although, DPLL4 enable and disable is associated with MGPLL1_ENABLE register, we can use ICL_DPLL_CFGCR0/CR1 macros to access this dpll's CR0 and CR1 registers by passing an id of 4 to these macros. Reported-by: Ville Syrjälä Cc: Ville Syrjälä Cc: José Roberto de Souza Cc: Matt Roper Cc: Imre Deak Signed-off-by: Vivek Kasireddy Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190717021316.18610-1-vivek.kasireddy@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 319a26a1ec10..f9bdf8514a53 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3127,8 +3127,13 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id)); hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id)); } else { - hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); + if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { + hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4)); + hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4)); + } else { + hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); + } } ret = true; @@ -3169,8 +3174,13 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, cfgcr0_reg = TGL_DPLL_CFGCR0(id); cfgcr1_reg = TGL_DPLL_CFGCR1(id); } else { - cfgcr0_reg = ICL_DPLL_CFGCR0(id); - cfgcr1_reg = ICL_DPLL_CFGCR1(id); + if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) { + cfgcr0_reg = ICL_DPLL_CFGCR0(4); + cfgcr1_reg = ICL_DPLL_CFGCR1(4); + } else { + cfgcr0_reg = ICL_DPLL_CFGCR0(id); + cfgcr1_reg = ICL_DPLL_CFGCR1(id); + } } I915_WRITE(cfgcr0_reg, hw_state->cfgcr0); -- cgit v1.2.3 From bffb31f73b29a60ef693842d8744950c2819851d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 17 Jul 2019 14:45:36 +0300 Subject: drm/i915: Make sure cdclk is high enough for DP audio on VLV/CHV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On VLV/CHV there is some kind of linkage between the cdclk frequency and the DP link frequency. The spec says: "For DP audio configuration, cdclk frequency shall be set to meet the following requirements: DP Link Frequency(MHz) | Cdclk frequency(MHz) 270 | 320 or higher 162 | 200 or higher" I suspect that would more accurately be expressed as "cdclk >= DP link clock", and in any case we can express it like that in the code because of the limited set of cdclk (200, 266, 320, 400 MHz) and link frequencies (162 and 270 MHz) we support. Without this we can end up in a situation where the cdclk is too low and enabling DP audio will kill the pipe. Happens eg. with 2560x1440 modes where the 266MHz cdclk is sufficient to pump the pixels (241.5 MHz dotclock) but is too low for the DP audio due to the link frequency being 270 MHz. v2: Spell out the cdclk and link frequencies we actually support Cc: stable@vger.kernel.org Tested-by: Stefan Gottwald Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111149 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190717114536.22937-1-ville.syrjala@linux.intel.com Acked-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_cdclk.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index d0581a1ac243..93b0d190c184 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2262,6 +2262,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); + /* + * "For DP audio configuration, cdclk frequency shall be set to + * meet the following requirements: + * DP Link Frequency(MHz) | Cdclk frequency(MHz) + * 270 | 320 or higher + * 162 | 200 or higher" + */ + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) + min_cdclk = max(crtc_state->port_clock, min_cdclk); + /* * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. -- cgit v1.2.3 From d45a4dd527068f0d7ee6cdd9e13c03b3f9e59dba Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 18 Jul 2019 15:54:04 +0100 Subject: drm/i915: Drop wmb() inside pread_gtt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Inside pread, we only ever read from the GTT so the serialising wmb() instructions around the GGTT PTE updates are pointless. Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145407.21352-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a207b90924e4..fed0bc421a55 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -395,11 +395,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, unsigned page_length = PAGE_SIZE - page_offset; page_length = remain < page_length ? remain : page_length; if (node.allocated) { - wmb(); ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), node.start, I915_CACHE_NONE, 0); - wmb(); } else { page_base += offset & PAGE_MASK; } @@ -419,7 +417,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, out_unpin: mutex_lock(&i915->drm.struct_mutex); if (node.allocated) { - wmb(); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); remove_mappable_node(&node); } else { -- cgit v1.2.3 From bdae33b8b82bb379a5b11040b0b37df25c7871c9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 18 Jul 2019 15:54:05 +0100 Subject: drm/i915: Use maximum write flush for pwrite_gtt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As recently disovered by forcing big-core (!llc) machines to use the GTT paths, we need our full GTT write flush before manipulating the GTT PTE or else the writes may be directed to the wrong page. Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Matthew Auld Cc: Ville Syrjälä Cc: stable@vger.kernel.org Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145407.21352-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fed0bc421a55..c6ba350e6e4f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -610,7 +610,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, unsigned int page_length = PAGE_SIZE - page_offset; page_length = remain < page_length ? remain : page_length; if (node.allocated) { - wmb(); /* flush the write before we modify the GGTT */ + /* flush the write before we modify the GGTT */ + intel_gt_flush_ggtt_writes(ggtt->vm.gt); ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), node.start, I915_CACHE_NONE, 0); @@ -639,8 +640,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, i915_gem_object_unlock_fence(obj, fence); out_unpin: mutex_lock(&i915->drm.struct_mutex); + intel_gt_flush_ggtt_writes(ggtt->vm.gt); if (node.allocated) { - wmb(); ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); remove_mappable_node(&node); } else { -- cgit v1.2.3 From b5ea9c9337007d6e700280c8a60b4e10d070fb53 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Wed, 17 Jul 2019 15:34:51 -0700 Subject: drm/i915/vbt: Fix VBT parsing for the PSR section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A single 32-bit PSR2 training pattern field follows the sixteen element array of PSR table entries in the VBT spec. But, we incorrectly define this PSR2 field for each of the PSR table entries. As a result, the PSR1 training pattern duration for any panel_type != 0 will be parsed incorrectly. Secondly, PSR2 training pattern durations for VBTs with bdb version >= 226 will also be wrong. Cc: Rodrigo Vivi Cc: José Roberto de Souza Cc: stable@vger.kernel.org Cc: stable@vger.kernel.org #v5.2 Fixes: 88a0d9606aff ("drm/i915/vbt: Parse and use the new field with PSR2 TP2/3 wakeup time") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111088 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204183 Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Acked-by: Rodrigo Vivi Tested-by: François Guerraz Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190717223451.2595-1-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 2 +- drivers/gpu/drm/i915/display/intel_vbt_defs.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 21501d565327..b416b394b641 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -766,7 +766,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) } if (bdb->version >= 226) { - u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; + u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; switch (wakeup_time) { diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 93f5c9d204d6..09cd37fb0b1c 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -481,13 +481,13 @@ struct psr_table { /* TP wake up time in multiple of 100 */ u16 tp1_wakeup_time; u16 tp2_tp3_wakeup_time; - - /* PSR2 TP2/TP3 wakeup time for 16 panels */ - u32 psr2_tp2_tp3_wakeup_time; } __packed; struct bdb_psr { struct psr_table psr_table[16]; + + /* PSR2 TP2/TP3 wakeup time for 16 panels */ + u32 psr2_tp2_tp3_wakeup_time; } __packed; /* -- cgit v1.2.3 From ad28e02420beae459bf48be14de5de1d76e79704 Mon Sep 17 00:00:00 2001 From: Joseph Greathouse Date: Wed, 17 Jul 2019 11:55:22 -0500 Subject: drm/amdgpu: Default disable GDS for compute VMIDs The GDS and GWS blocks default to allowing all VMIDs to access all entries. Graphics VMIDs can handle setting these limits when the driver launches work. However, compute workloads under HWS control don't go through the kernel driver. Instead, HWS firmware should set these limits when a process is put into a VMID slot. Disable access to these devices by default by turning off all mask bits (for OA) and setting BASE=SIZE=0 (for GDS and GWS) for all compute VMIDs. If a process wants to use these resources, they can request this from the HWS firmware (when such capabilities are enabled). HWS will then handle setting the base and limit for the process when it is assigned to a VMID. This will also prevent user kernels from getting 'stuck' in GWS by accident if they write GWS-using code but HWS firmware is not set up to handle GWS reset. Until HWS is enabled to handle GWS properly, all GWS accesses will MEM_VIOL fault the kernel. v2: Move initialization outside of SRBM mutex Signed-off-by: Joseph Greathouse Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 9 +++++++++ 4 files changed, 36 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 1675d5837c3c..3df50c9c2fb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) } nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0db9f488da7e..21187275dfd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v7_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5f401b41ef7c..751567f78567 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3706,6 +3706,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v8_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f4c4eea62526..1cf639a51178 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1918,6 +1918,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) } soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v9_0_constants_init(struct amdgpu_device *adev) -- cgit v1.2.3 From 23d66e75d7c75f108754297f6eae77348a7cd544 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 17 Jul 2019 16:32:27 +0800 Subject: drm/amd/powerplay: report bootup clock as max supported on dpm disabled With gfxclk or uclk dpm disabled, it's reasonable to report bootup clock as the max supported. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index f1565c448de5..768aae2e20da 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, { int ret = 0, clk_id = 0; uint32_t param = 0; + uint32_t clock_limit; if (!min && !max) return -EINVAL; - if (!smu_clk_dpm_is_enabled(smu, clk_type)) + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + return 0; + } mutex_lock(&smu->mutex); clk_id = smu_clk_get_index(smu, clk_type); -- cgit v1.2.3 From 5f872b723a451a26ad0f1d29541df9de5d23529d Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 12:49:15 +0800 Subject: drm/amdgpu: do not create ras debugfs/sysfs node for ASICs that don't have ras ability driver shouldn't init any ras debugfs/sysfs node for ASICs that don't have ras hardware ability Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a4412e47810..3a9ece450b31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1557,6 +1557,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_check_supported(adev, &con->hw_supported, &con->supported); + if (!con->hw_supported) { + amdgpu_ras_set_context(adev, NULL); + kfree(con); + return 0; + } + con->features = 0; INIT_LIST_HEAD(&con->head); /* Might need get this flag from vbios. */ -- cgit v1.2.3 From 59d9c0ab7169346d53b12db6a5e986a26a28c479 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 12:52:56 +0800 Subject: drm/amdgpu: disable GFX RAS by default GFX RAS has not been stablized yet. disable GFX ras until it is fully funcitonal. Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2e8b4238efd..5376328d3fd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; -uint amdgpu_ras_mask = 0xffffffff; +uint amdgpu_ras_mask = 0xfffffffb; /** * DOC: vramlimit (int) -- cgit v1.2.3 From 29bd650809225f51ba475c556f43e53e392c44e3 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 13:59:38 +0800 Subject: drm/amdgpu: only allow error injection to UMC IP block error injection to other IP blocks (except UMC) will be enabled until RAS feature stablize on those IP blocks Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3a9ece450b31..fc346eb1aacd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -689,6 +689,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + if (block_info.block_id != TA_RAS_BLOCK__UMC) { + DRM_INFO("%s error injection is not supported yet\n", + ras_block_str(info->head.block)); + return -EINVAL; + } + ret = psp_ras_trigger_error(&adev->psp, &block_info); if (ret) DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", -- cgit v1.2.3 From 578a4daa1cd61f9783b5d0f566d6ec0a2cb9f6a3 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 16:03:46 +0800 Subject: drm/amdgpu: drop ras self test this function is not needed any more. error injection is the only way to validate ras but it can't be executed in amdgpu_ras_init, where gpu is even not initialized Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index fc346eb1aacd..fac7aa2c244f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, static int amdgpu_ras_release_vram(struct amdgpu_device *adev, struct amdgpu_bo **bo_ptr); -static void amdgpu_ras_self_test(struct amdgpu_device *adev) -{ - /* TODO */ -} - static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1582,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (amdgpu_ras_fs_init(adev)) goto fs_out; - amdgpu_ras_self_test(adev); - DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); -- cgit v1.2.3 From 1a195ed5f197fcfd1d99ceedd469857fcd7d8c4f Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Thu, 18 Jul 2019 15:46:55 +0800 Subject: drm/amd/powerplay: change sysfs pp_dpm_xxx format for navi10 v2: set average clock value on level 1 when current clock equal min or max clock (fine grained dpm support). the navi10 gfxclk (sclk) support fine grained DPM, so use level 1 to show current dpm freq in sysfs pp_dpm_xxx Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 47 ++++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 2dae0ae0829e..8293b5216aad 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -626,11 +626,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, return ret; } +static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + DpmDescriptor_t *dpm_desc = NULL; + uint32_t clk_index = 0; + + clk_index = smu_clk_get_index(smu, clk_type); + dpm_desc = &pptable->DpmDescriptor[clk_index]; + + /* 0 - Fine grained DPM, 1 - Discrete DPM */ + return dpm_desc->SnapToDiscrete == 0 ? true : false; +} + static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t freq_values[3] = {0}; + uint32_t mark_index = 0; switch (clk_type) { case SMU_GFXCLK: @@ -643,22 +658,42 @@ static int navi10_print_clk_levels(struct smu_context *smu, ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) return size; + /* 10KHz -> MHz */ cur_value = cur_value / 100; - size += sprintf(buf, "current clk: %uMhz\n", cur_value); - ret = smu_get_dpm_level_count(smu, clk_type, &count); if (ret) return size; - for (i = 0; i < count; i++) { - ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + for (i = 0; i < count; i++) { + ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (ret) + return size; + + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + } + } else { + ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); + if (ret) + return size; + ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) return size; - size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, - cur_value == value ? "*" : ""); + freq_values[1] = cur_value; + mark_index = cur_value == freq_values[0] ? 0 : + cur_value == freq_values[2] ? 2 : 1; + if (mark_index != 1) + freq_values[1] = (freq_values[0] + freq_values[2]) / 2; + + for (i = 0; i < 3; i++) { + size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i], + i == mark_index ? "*" : ""); + } + } break; default: -- cgit v1.2.3 From 3457b3055e43fbe457d9779c1362d56f19a888e7 Mon Sep 17 00:00:00 2001 From: Fuqian Huang Date: Thu, 11 Jul 2019 11:10:21 +0800 Subject: drm/ttm: use the same attributes when freeing d_page->vaddr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In function __ttm_dma_alloc_page(), d_page->addr is allocated by dma_alloc_attrs() but freed with use dma_free_coherent() in __ttm_dma_free_page(). Use the correct dma_free_attrs() to free d_page->vaddr. Signed-off-by: Fuqian Huang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index d594f7520b7b..7d78e6deac89 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool, static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) { + unsigned long attrs = 0; dma_addr_t dma = d_page->dma; d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; - dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma); + if (pool->type & IS_HUGE) + attrs = DMA_ATTR_NO_WARN; + + dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); kfree(d_page); d_page = NULL; -- cgit v1.2.3 From c6d5245d41de3a9786707b34189c41d6412fd0ba Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 18 Jul 2019 11:38:46 -0400 Subject: drm/amdgpu: use VCN firmware offset for cache window Since we are using the signed FW now, and also using PSP firmware loading, but it's still potential to break driver when loading FW directly instead of PSP, so we should add offset. Signed-off-by: Leo Liu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 988c0adaca91..1cfc2620b2dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.gpu_addr)); offset = size; - /* No signed header for now from firmware WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); } WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); -- cgit v1.2.3 From 61f33f6aa88388e36ff8ef27a40b4a173c1511d1 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Fri, 31 May 2019 15:14:13 -0400 Subject: drm/amd/display: initialize p_state to proper value [why] On some modes SMU will be in infinite loop state at boot, this is because driver assumes p_state_support is false, but this is the opposite of the assumed boot state by SMU. we optimize away notifying SMU about no pstate, and so they will get stuck [how] when we init clk manager, init pstate to true, so it matches driver load assumption Signed-off-by: Jun Lei Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 08a774fc7b67..740f5db22bb5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, void dcn2_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; } void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) -- cgit v1.2.3 From 0bd8ac7ed5f9a1a26c722c6cdbc4cb178d36cc03 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 3 Jun 2019 08:13:12 -0400 Subject: drm/amd/display: fix up HUBBUB hw programming for VM [why] Some values were not being converted or bit-shifted properly for HW registers, causing black screen [how] Fix up the values before programming HW Signed-off-by: Jun Lei Reviewed-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 17 ++++++++--------- drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 4 ++-- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index ece6e136437b..c72a9ff57f15 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -366,25 +366,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.depth = 1; - phys_config.block_size = 4096; phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; - + phys_config.depth = 0; + phys_config.block_size = 0; // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 959f5b654611..1ea505f7a05a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth { }; enum dcn_hubbub_page_table_block_size { - DCN_PAGE_TABLE_BLOCK_SIZE_4KB, - DCN_PAGE_TABLE_BLOCK_SIZE_64KB + DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0, + DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4 }; struct dcn_hubbub_phys_addr_config { -- cgit v1.2.3 From 5d109be38b23c8859ec78a2ed7c254ccd569719d Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 30 May 2019 15:47:51 -0400 Subject: drm/amd/display: fix dsc disable A regression caused dsc to never get disabled in certain situations. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Nikola Cornij Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0b84a322b8a2..94f2f9fc6956 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1740,8 +1740,11 @@ static void dcn20_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) { dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); } - } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + else if (pipe_ctx->stream_res.dsc) + dp_set_dsc_enable(pipe_ctx, false); +#endif /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable -- cgit v1.2.3 From db31af12a5169f4ac26acec759c1d872eef26554 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 3 Jun 2019 11:37:44 -0400 Subject: drm/amd/display: cap DCFCLK hardmin to 507 for NV10 [why] Due to limitation in SMU/PPLIB, it is not possible to know Fmax @ Vmin for DCFCLK. This causes issues at high display configurations where extra headroom of DCFCLK can enable P-state switching [how] Use existing override logic. If override not defined, then force min = 507 Signed-off-by: Jun Lei Reviewed-by: Eric Yang Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d200bc3cec71..b949e202d6cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. + min_dcfclk = 507; for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; -- cgit v1.2.3 From 5b25e5f1a97284020abee7348427f89abdb674e8 Mon Sep 17 00:00:00 2001 From: Harmanprit Tatla Date: Tue, 4 Jun 2019 14:12:21 -0400 Subject: drm/amd/display: No audio endpoint for Dell MST display [Why] There are certain MST displays (i.e. Dell P2715Q) that although have the MST feature set to off may still report it is a branch device and a non-zero value for downstream port present. This can lead to us incorrectly classifying a dp dongle connection as being active and disabling the audio endpoint for the display. [How] Modified the placement and condition used to assign the is_branch_dev bit. Signed-off-by: Harmanprit Tatla Reviewed-by: Aric Cyr Acked-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 056be4c34a98..3e00c88bd2b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2230,11 +2230,18 @@ static void get_active_converter_info( link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + link->dpcd_caps.is_branch_dev = false; return; } /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ - link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) { + link->dpcd_caps.is_branch_dev = false; + } + + else { + link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + } switch (ds_port.fields.PORT_TYPE) { case DOWNSTREAM_VGA: -- cgit v1.2.3 From 90bbf6374b88bdc1411fd83b24d87513ba23d519 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 4 Jun 2019 14:48:33 -0400 Subject: drm/amd/display: Set default block_size, even in unexpected cases We're not expected to enter the default case, but not returning a default value here is incorrect. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index c72a9ff57f15..6e2dbd03f9bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne break; default: ASSERT(false); + block_size = page_table_block_size; break; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 1ea505f7a05a..9502478c4a1b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -62,7 +62,7 @@ enum dcn_hubbub_page_table_depth { enum dcn_hubbub_page_table_block_size { DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0, - DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4 + DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4, }; struct dcn_hubbub_phys_addr_config { -- cgit v1.2.3 From 61011e63f87fe5dd0ebff787cd78df4d7d66aec5 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Wed, 5 Jun 2019 14:29:47 -0400 Subject: drm/amd/display: Set one 4:2:0-related PPS field as recommended by DSC spec [why] 'second_line_offset_adj' was mistakenly left at zero, even though DSC spec v1.2a recommends setting this field to 512 for 4:2:0. [how] Set 'second_line_offset_adj' to 512 for 4:2:0 and leave at zero otherwise Signed-off-by: Nikola Cornij Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c index 67089765780b..340ef4d41ebd 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c @@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; + /* As per DSC spec v1.2a recommendation: */ + if (vdsc_cfg->native_420) + vdsc_cfg->second_line_offset_adj = 512; + else + vdsc_cfg->second_line_offset_adj = 0; + return 0; } EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); -- cgit v1.2.3 From 4a876eecf6a5bfbe05ca6358e1b6a484e27ce32f Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Wed, 5 Jun 2019 10:53:40 -0400 Subject: drm/amd/display: swap system aperture high/low [why] Currently logical values are swapped in HW, causing system aperture to be undefined, so VA and PA cannot co-exist [how] program values correctly Signed-off-by: Jun Lei Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 94f2f9fc6956..710727e5d0f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1153,8 +1153,8 @@ void dcn20_enable_plane( apt.sys_default.quad_part = 0; - apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr; - apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr; + apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; + apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; // Program system aperture settings pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); -- cgit v1.2.3 From 4fc1609bcd5475a9cef1caeb10a04106f4f85fac Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Tue, 4 Jun 2019 15:52:59 -0400 Subject: drm/amd/display: skip retrain in dc_link_set_preferred_link_settings() if using passive dongle [Why] Fixes issue when we have a display connected using a passive dongle and then emulate over it using a DP connection at 1 x 1.62 Ghz. System hangs because register bus returns back 0xFFFFFFFF for all register reads after setting register DIG_BE_CNTL in dcn10_link_encoder_connect_dig_be_to_fe(). Hang occurs later when trying to do a register read. [How] At the start of the emulation, dc_link_set_preferred_link_settings() and dp_retrain_link_dp_test() is called, even though it is connected using a passive dongle. Add an extra condition in dp_retrain_link_dp_test() to check for link->dongle_max_pix_clk > 0. This is the only way we know if the connection is using passive dongle so we don't retrain DP. Signed-off-by: Samson Tam Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8dbf759eba45..435d50356bad 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2984,8 +2984,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc, /* Retrain with preferred link settings only relevant for * DP signal type + * Check for non-DP signal or if passive dongle present */ - if (!dc_is_dp_signal(link->connector_signal)) + if (!dc_is_dp_signal(link->connector_signal) || + link->dongle_max_pix_clk > 0) return; for (i = 0; i < MAX_PIPES; i++) { -- cgit v1.2.3 From 492d9ec244923420af96db6b69ad7d575859aa92 Mon Sep 17 00:00:00 2001 From: Murton Liu Date: Mon, 10 Jun 2019 17:55:28 -0400 Subject: drm/amd/display: Clock does not lower in Updateplanes [why] We reset the optimized_required in atomic_plane_disable flag immediately after it is set in atomic_plane_disconnect, causing us to never have flag set during next flip in UpdatePlanes. [how] Optimize directly after each time plane is removed. Signed-off-by: Murton Liu Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index e50a696fcb5d..0c4340404e24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2516,6 +2516,12 @@ static void dcn10_apply_ctx_for_surface( if (removed_pipe[i]) dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (removed_pipe[i]) { + dc->hwss.optimize_bandwidth(dc, context); + break; + } + if (dc->hwseq->wa.DEGVIDCN10_254) hubbub1_wm_change_req_wa(dc->res_pool->hubbub); } -- cgit v1.2.3 From 7f6964c5a05e6593bda3a4bcb5581d0b72fc71cb Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 5 Jun 2019 15:02:04 -0400 Subject: drm/amd/display: Copy max_clks_by_state after dce_clk_mgr_construct [Why] For DCE110, DCE112 and DCE120 the max_clks_by_state for the clk_mgr are copied from their respective table before the call to dce_clk_mgr_construct, but then dce_clk_mgr_construct overwrites these with the dce80_max_clks_by_state. [How] Copy these after we call dce_clk_mgr_construct so we're using the right tables. Signed-off-by: Nicholas Kazlauskas Reviewed-by: David Francis Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c | 4 ++-- drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c | 4 ++-- drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index c1a92c16535c..5cc3acccda2a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -262,12 +262,12 @@ void dce110_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce110_max_clks_by_state, sizeof(dce110_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 778392c73187..7c746ef1e32e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -226,12 +226,12 @@ void dce112_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce112_max_clks_by_state, sizeof(dce112_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c index 906310c3e2eb..5399b8cf6b75 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c @@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = { void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce120_max_clks_by_state, sizeof(dce120_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->base.dprefclk_khz = 600000; clk_mgr->base.funcs = &dce120_funcs; } -- cgit v1.2.3 From dd5d9348da02dd83dbb235e55aa0acb3f48ccc95 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 11 Jun 2019 18:18:36 -0400 Subject: drm/amd/display: wait for the whole frame after global unlock [why] The current code will not wait for the entire frame after global unlock. This causes dsc dynamic target bpp update corruption when there is a surface update immediately happens after this. [how] Wait for the entire whole frame after unlock before continuing the rest of stream and surface update. Signed-off-by: Wenjing Liu Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 710727e5d0f8..e5e78aa930a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global( CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, + CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->lock_doublebuffer_disable( pipe->stream_res.tg); } -- cgit v1.2.3 From ca6f188cdf80de09b92174cf5fb2716021264222 Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Thu, 13 Jun 2019 12:49:37 -0400 Subject: drm/amd/display: Poll for GPUVM context ready (v2) [Why] Hardware docs state that we must wait until the GPUVM context is ready after programming it. [How] Poll until the valid bit of PAGE_TABLE_BASE_ADDR_LO32 is set to 1 after programming it. v2: fix include for udelay (Alex) Signed-off-by: Julian Parkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c | 37 +++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c index 27679ef6ebe8..96c263223315 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c @@ -23,6 +23,8 @@ * */ +#include + #include "dcn20_vmid.h" #include "reg_helper.h" @@ -36,6 +38,38 @@ #define FN(reg_name, field_name) \ vmid->shifts->field_name, vmid->masks->field_name +static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid) +{ + /* According the hardware spec, we need to poll for the lowest + * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM + * context is updated. We can't use REG_WAIT here since we + * don't have a seperate field to wait on. + * + * TODO: Confirm timeout / poll interval with hardware team + */ + + int max_times = 10000; + int delay_us = 5; + int i; + + for (i = 0; i < max_times; ++i) { + uint32_t entry_lo32; + + REG_GET(PAGE_TABLE_BASE_ADDR_LO32, + VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, + &entry_lo32); + + if (entry_lo32 & 0x1) + return; + + udelay(delay_us); + } + + /* VM setup timed out */ + DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n"); + ASSERT(0); +} + void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) { REG_SET(PAGE_TABLE_START_ADDR_HI32, 0, @@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_ REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF); + /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */ REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF); + + dcn20_wait_for_vmid_ready(vmid); } -- cgit v1.2.3 From c7990daebe71d11a9e360b5c3b0ecd1846a3a4bb Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Fri, 14 Jun 2019 15:04:00 -0400 Subject: drm/amd/display: Wait for backlight programming completion in set backlight level [WHY] Currently we don't wait for blacklight programming completion in DMCU when setting backlight level. Some sequences such as PSR static screen event trigger reprogramming requires it to be complete. [How] Add generic wait for dmcu command completion in set backlight level. Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index f8903bcabe49..58bd131d5b48 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -239,6 +239,10 @@ static void dmcu_set_backlight_level( s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); REG_WRITE(BIOS_SCRATCH_2, s2); + + /* waitDMCUReadyForCmd */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, + 0, 1, 80000); } static void dce_abm_init(struct abm *abm) -- cgit v1.2.3 From 288af96df16b629552c5bcc9ec0f0191c6198a72 Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Wed, 29 May 2019 18:52:17 -0400 Subject: drm/amd/display: Check for valid stream_encode Before accessing it's vtable, check that stream_encoder is non-null. Signed-off-by: Ilya Bakoulin Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index af7f8be230f7..352862370390 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; - if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { + if (pipe_ctx->stream_res.stream_enc && + pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { if (pipe_ctx->stream->dmdata_address.quad_part != 0) { /* if using dynamic meta, don't set up generic infopackets */ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; -- cgit v1.2.3 From 12d0e503dd9e0576487b2f20577717b2a36fe0c5 Mon Sep 17 00:00:00 2001 From: Derek Lai Date: Tue, 18 Jun 2019 14:55:57 +0800 Subject: drm/amd/display: Read max down spread [Why] When launch D10.2, driver will write DPCD 0x107 with 0x00 [How] Read MAX_DOWNSPREAD (0x0003h) then keep in current link settings Signed-off-by: Derek Lai Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 435d50356bad..652960c5548a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; + union max_down_spread max_down_spread = { {0} }; // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { @@ -576,6 +577,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) link->cur_link_settings.link_rate = link_bw_set; link->cur_link_settings.use_link_rate_set = false; } + // Read DPCD 00003h to find the max down spread. + core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, + &max_down_spread.raw, sizeof(max_down_spread)); + link->cur_link_settings.link_spread = + max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } static bool detect_dp( -- cgit v1.2.3 From 19f876967a98db63fbfca7e0d9f55099f52189e0 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 20 Jun 2019 13:03:25 -0400 Subject: drm/amd/display: Disable Audio on reinitialize hardware [Why] When we recover from hang, we do not want to skip the audio enable call. [How] Disable audio in dc_reinitialize_hardware Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 1 + .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 740f5db22bb5..50bfb5921de0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -333,6 +333,7 @@ void dcn20_clk_mgr_construct( struct dccg *dccg) { clk_mgr->base.ctx = ctx; + clk_mgr->pp_smu = pp_smu; clk_mgr->base.funcs = &dcn2_funcs; clk_mgr->regs = &clk_mgr_regs; clk_mgr->clk_mgr_shift = &clk_mgr_shift; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 858a58856ebd..8005989c1263 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -965,11 +965,17 @@ void hwss_edp_backlight_control( void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *core_dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = core_dc->clk_mgr; + struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; + if (!pipe_ctx->stream) + return; + + core_dc = pipe_ctx->stream->ctx->dc; + clk_mgr = core_dc->clk_mgr; + if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; @@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) { - struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc *dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = dc->clk_mgr; + struct clk_mgr *clk_mgr; + + if (!pipe_ctx || !pipe_ctx->stream) + return; + + dc = pipe_ctx->stream->ctx->dc; + clk_mgr = dc->clk_mgr; if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; -- cgit v1.2.3 From 1ca068ed34d6b39d336c1b0d618ed73ba8f04548 Mon Sep 17 00:00:00 2001 From: Zi Yu Liao Date: Thu, 20 Jun 2019 10:55:26 -0400 Subject: drm/amd/display: fix DMCU hang when going into Modern Standby [why] When the system is going into suspend, set_backlight gets called after the eDP got blanked. Since smooth brightness is enabled, the driver will make a call into the DMCU to ramp the brightness. The DMCU would try to enable ABM to do so. But since the display is blanked, this ends up causing ABM1_ACE_DBUF_REG_UPDATE_PENDING to get stuck at 1, which results in a dead lock in the DMCU firmware. [how] Disable brightness ramping when the eDP display is blanked. Signed-off-by: Zi Yu Liao Reviewed-by: Eric Yang Acked-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 652960c5548a..f9bed7c65b43 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2336,7 +2336,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { if (core_dc->current_state->res_ctx. pipe_ctx[i].stream->link - == link) + == link) { /* DMCU -1 for all controller id values, * therefore +1 here */ @@ -2344,6 +2344,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, core_dc->current_state-> res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + + /* Disable brightness ramping when the display is blanked + * as it can hang the DMCU + */ + if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) + frame_ramp = 0; + } } } abm->funcs->set_backlight_level_pwm( -- cgit v1.2.3 From 74eda776d7a4e69ec7aa1ce30a87636f14220fbb Mon Sep 17 00:00:00 2001 From: Tai Man Date: Fri, 7 Jun 2019 17:32:27 -0400 Subject: drm/amd/display: use encoder's engine id to find matched free audio device [Why] On some platforms, the encoder id 3 is not populated. So the encoders are not stored in right order as index (id: 0, 1, 2, 4, 5) at pool. This would cause encoders id 4 & id 5 to fail when finding corresponding audio device, defaulting to the first available audio device. As result, we cannot stream audio into two DP ports with encoders id 4 & id 5. [How] It need to create enough audio device objects (0 - 5) to perform matching. Then use encoder engine id to find matched audio device. Signed-off-by: Tai Man Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 51a78283a86d..a0e29c37ab69 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -258,7 +258,7 @@ bool resource_construct( * PORT_CONNECTIVITY == 1 (as instructed by HW team). */ update_num_audio(&straps, &num_audio, &pool->audio_support); - for (i = 0; i < pool->pipe_count && i < num_audio; i++) { + for (i = 0; i < caps->num_audio; i++) { struct audio *aud = create_funcs->create_audio(ctx, i); if (aud == NULL) { @@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio( return pool->audios[i]; } } + + /* use engine id to find free audio */ + if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) { + return pool->audios[id]; + } + /*not found the matching one, first come first serve*/ for (i = 0; i < pool->audio_count; i++) { if (res_ctx->is_audio_acquired[i] == false) { -- cgit v1.2.3 From 09fc26c1718fe7a552866d4eda84e2cc5f9c9c78 Mon Sep 17 00:00:00 2001 From: Fatemeh Darbehani Date: Fri, 21 Jun 2019 17:44:50 -0400 Subject: drm/amd/display: Change min_h_sync_width from 8 to 4 [Why] Some display's hsync width is lower than the minimum dcn20 is set to support right now. This will cause optc1_validate_timing to fail which eventually will result in wrong set mode. This was set to 8 as per HW team's request for no valid reason. [How] Changing min_h_sync_width to 4 will let us validate timing for preffered mode and light up the headset. This change was made to Vega 10 before for a similar issue. Signed-off-by: Fatemeh Darbehani Reviewed-by: Joshua Aberback Acked-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 26a66ccf6e72..1ae973962d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1) optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 8; + optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue. optc1->min_v_sync_width = 1; optc1->comb_opp_id = 0xf; } -- cgit v1.2.3 From 39fee5f60ce069cfba55fc3a8ba55faacae330b9 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 24 Jun 2019 09:49:44 -0400 Subject: drm/amd/display: Wait for flip to complete [why] In pipe split issue occurs when we program immediate flip while vsync flip is pending [how] Don't program immediate flip until flip is no longer pending Signed-off-by: Alvin Lee Reviewed-by: Jaehyun Chung Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index e5e78aa930a6..d810c8940129 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1265,6 +1265,17 @@ void dcn20_pipe_control_lock( if (pipe->plane_state != NULL) flip_immediate = pipe->plane_state->flip_immediate; + if (flip_immediate && lock) { + while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) { + udelay(1); + } + + if (pipe->bottom_pipe != NULL) + while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) { + udelay(1); + } + } + /* In flip immediate and pipe splitting case, we need to use GSL * for synchronization. Only do setup on locking and on flip type change. */ -- cgit v1.2.3 From feb7eb522e0a7a22c1e60d386bd3c3bfa1d5e4f7 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 24 Jun 2019 18:18:58 -0400 Subject: drm/amd/display: put back front end initialization sequence [Why] Seamless boot optimization removed proper front end power off sequence. In driver disable enable case, this causes driver to power gate hubp and dpp while there is still memory fetching going on, this can cause invalid memory requests to be generated which will hang data fabric. [How] Put back proper front end power off sequence Signed-off-by: Eric Yang Reviewed-by: Anthony Koo Acked-by: Leo Li Acked-by: Tony Cheng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0c4340404e24..2118ea21d7e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc) * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct hubp *hubp = dc->res_pool->hubps[i]; - struct dpp *dpp = dc->res_pool->dpps[i]; - - hubp->funcs->hubp_init(hubp); - dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; - plane_atomic_power_down(dc, dpp, hubp); - } - - apply_DEGVIDCN10_253_wa(dc); + dc->hwss.init_pipes(dc, dc->current_state); } for (i = 0; i < dc->res_pool->audio_count; i++) { @@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, return result; } - - - - static bool dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) -- cgit v1.2.3 From 67fd6c0d2de8e51e84ff3fa6e68bbd524f823e49 Mon Sep 17 00:00:00 2001 From: Derek Lai Date: Tue, 2 Jul 2019 17:50:41 +0800 Subject: drm/amd/display: allocate 4 ddc engines for RV2 [Why] Driver will create 0, 1, and 2 ddc engines for RV2, but some platforms used 0, 1, and 3. [How] Still allocate 4 ddc engines for RV2. Signed-off-by: Derek Lai Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1a20461c2937..a12530a3ab9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = { .num_audio = 3, .num_stream_encoder = 3, .num_pll = 3, - .num_ddc = 3, + .num_ddc = 4, }; static const struct dc_plane_cap plane_cap = { -- cgit v1.2.3 From 0905f32977268149f06e3ce6ea4bd6d374dd891f Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Tue, 25 Jun 2019 14:55:53 -0400 Subject: drm/amd/display: Fix dc_create failure handling and 666 color depths [Why] It is possible (but very unlikely) that constructing dc fails before current_state is created. We support 666 color depth in some scenarios, but this isn't handled in get_norm_pix_clk. It uses exactly the same pixel clock as the 888 case. [How] Check for non null current_state before destructing. Add case for 666 color depth to get_norm_pix_clk to avoid assertion. Signed-off-by: Julian Parkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 6 ++++-- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4ef4dc63e221..fa20201eef3a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -502,8 +502,10 @@ void dc_stream_set_static_screen_events(struct dc *dc, static void destruct(struct dc *dc) { - dc_release_state(dc->current_state); - dc->current_state = NULL; + if (dc->current_state) { + dc_release_state(dc->current_state); + dc->current_state = NULL; + } destroy_links(dc); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index a0e29c37ab69..87f97b3a4106 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1839,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) pix_clk /= 2; if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (timing->display_color_depth) { + case COLOR_DEPTH_666: case COLOR_DEPTH_888: normalized_pix_clk = pix_clk; break; -- cgit v1.2.3 From 6ac25e6d5b2fbf251e9fa2f4131d42c815b43867 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 4 Jul 2019 15:17:42 -0400 Subject: drm/amd/display: Only enable audio if speaker allocation exists [Why] In dm_helpers_parse_edid_caps, there is a corner case where no speakers can be allocated even though the audio mode count is greater than 0. Enabling audio when no speaker allocations exists can cause issues in the video stream. [How] Add a check to not enable audio unless one or more speaker allocations exist (since doing this can cause issues in the video stream). Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 87f97b3a4106..2ceaab4fb5de 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1986,7 +1986,7 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count) { + stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); -- cgit v1.2.3 From 58caae27df41d2ef3b1ae3bd06284da5b85ac288 Mon Sep 17 00:00:00 2001 From: Zhan Liu Date: Tue, 2 Jul 2019 15:17:07 -0400 Subject: drm/amd/display: drop ASSERT() if eDP panel is not connected [Why] For boards that support eDP but do not have a physical eDP display connected an ASSERT will be thrown. This is not a critical failure and shouldn't be treated as such. [How] Drop the assertion. Signed-off-by: Zhan Liu Reviewed-by: Nicholas Kazlauskas Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index f9bed7c65b43..909b8f03346e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -554,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) msleep(8); } - ASSERT(status == DC_OK); - // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, &link_bw_set, sizeof(link_bw_set)); -- cgit v1.2.3 From 7352193a33dfc9b69ba3bf6a8caea925b96243b1 Mon Sep 17 00:00:00 2001 From: Tai Man Date: Fri, 28 Jun 2019 11:40:38 -0400 Subject: drm/amd/display: Increase size of audios array [Why] The audios array defined in "struct resource_pool" is only 6 (MAX_PIPES) but the max number of audio devices (num_audio) is 7. In some projects, it will run out of audios array. [How] Incraese the audios array size to 7. Signed-off-by: Tai Man Reviewed-by: Joshua Aberback Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c89393c19232..a148ffde8b12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -212,7 +212,7 @@ struct resource_pool { struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; unsigned int clk_src_count; - struct audio *audios[MAX_PIPES]; + struct audio *audios[MAX_AUDIOS]; unsigned int audio_count; struct audio_support audio_support; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 8759ec03aede..f82365e2d03c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -34,6 +34,7 @@ * Data types shared between different Virtual HW blocks ******************************************************************************/ +#define MAX_AUDIOS 7 #define MAX_PIPES 6 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB_PIPES 1 -- cgit v1.2.3 From b791f9dc2de4f9e66aae28451d1284d054542614 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Sat, 29 Jun 2019 14:38:04 -0400 Subject: drm/amd/display: do not read link setting if edp not connected [Why] Previously assume eDP sink present if connector present. Do not need to enforce this restriction. Fix issue where driver attempt to read link setting even though no edp connected. {How] Only read link setting after reading connection status. Signed-off-by: Eric Yang Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 909b8f03346e..355b4ba12796 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -722,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return false; } - if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* On detect, we want to make sure current link settings are - * up to date, especially if link was powered on by GOP. - */ - read_edp_current_link_settings_on_detect(link); - } - prev_sink = link->local_sink; if (prev_sink != NULL) { dc_sink_retain(prev_sink); @@ -770,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { + read_edp_current_link_settings_on_detect(link); detect_edp_sink_caps(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; -- cgit v1.2.3 From b70666934b41c081489d5ff3c5bf017796545d35 Mon Sep 17 00:00:00 2001 From: Dale Zhao Date: Wed, 10 Jul 2019 17:36:53 +0800 Subject: drm/amd/display: handle active dongle port type is DP++ or DP case [Why]: Some active dongles have DP++ port and DP port at the same time. Current code doesn't cover DP++ case and processes as default DVI case, in which audio is disabled. Because of dual mode, DP case is also treat as DVI case for the other port. [How]: According DP 1.4 spec, add DP++ procedure similar with HDMI case. Also add None dongle type for DP case. Signed-off-by: Dale Zhao Reviewed-by: Wenjing Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 30 ++++++++++++++++-------- drivers/gpu/drm/amd/display/include/dpcd_defs.h | 2 +- 2 files changed, 21 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 3e00c88bd2b6..2c7aaed907b9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2247,8 +2247,8 @@ static void get_active_converter_info( case DOWNSTREAM_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; break; - case DOWNSTREAM_DVI_HDMI: - /* At this point we don't know is it DVI or HDMI, + case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: + /* At this point we don't know is it DVI or HDMI or DP++, * assume DVI.*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; break; @@ -2265,6 +2265,10 @@ static void get_active_converter_info( det_caps, sizeof(det_caps)); switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { + /*Handle DP case as DONGLE_NONE*/ + case DOWN_STREAM_DETAILED_DP: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; case DOWN_STREAM_DETAILED_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; @@ -2274,6 +2278,8 @@ static void get_active_converter_info( DISPLAY_DONGLE_DP_DVI_CONVERTER; break; case DOWN_STREAM_DETAILED_HDMI: + case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: + /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_HDMI_CONVERTER; @@ -2289,14 +2295,18 @@ static void get_active_converter_info( link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = - hdmi_caps.bits.YCrCr422_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = - hdmi_caps.bits.YCrCr420_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = - hdmi_caps.bits.YCrCr422_CONVERSION; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = - hdmi_caps.bits.YCrCr420_CONVERSION; + /*YCBCR capability only for HDMI case*/ + if (port_caps->bits.DWN_STRM_PORTX_TYPE + == DOWN_STREAM_DETAILED_HDMI) { + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = + hdmi_caps.bits.YCrCr422_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = + hdmi_caps.bits.YCrCr420_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = + hdmi_caps.bits.YCrCr422_CONVERSION; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = + hdmi_caps.bits.YCrCr420_CONVERSION; + } link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = translate_dpcd_max_bpc( diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index 1c66166d0a94..2c90d1b46c8b 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -43,7 +43,7 @@ enum dpcd_revision { enum dpcd_downstream_port_type { DOWNSTREAM_DP = 0, DOWNSTREAM_VGA, - DOWNSTREAM_DVI_HDMI, + DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */ DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */ }; -- cgit v1.2.3 From 87dbad02d2254b741c71ce859c451fb1ae6f5340 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:00:26 +0800 Subject: drm/amdgpu: add navi14 asic type Add CHIP_NAVI14 to the list of asic types. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/drm/amd_asic_type.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5a7f893cf724..ba87964c6515 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -99,6 +99,7 @@ static const char *amdgpu_asic_name[] = { "VEGA20", "RAVEN", "NAVI10", + "NAVI14", "LAST", }; diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index bcc2bcf32886..0c4766af04af 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -50,6 +50,7 @@ enum amd_asic_type { CHIP_VEGA20, CHIP_RAVEN, CHIP_NAVI10, + CHIP_NAVI14, CHIP_LAST, }; -- cgit v1.2.3 From ed42cfe1ac905ebca2f59e4d9bed230e8e1599f5 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:01:38 +0800 Subject: drm/amdgpu: add gpu_info firmware for navi14 Add navi14 to case statement to load the GPU info firmware. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ba87964c6515..52a66f82e845 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -71,6 +71,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -1388,6 +1389,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_NAVI10: chip_name = "navi10"; break; + case CHIP_NAVI14: + chip_name = "navi14"; + break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); -- cgit v1.2.3 From 7ecb5cd451b7bce8196bf40c6e10c5f266b3994d Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:02:43 +0800 Subject: drm/amdgpu: set asic family and ip blocks for navi14 same with navi10 Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 52a66f82e845..b0179094c8dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1544,6 +1544,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return r; break; case CHIP_NAVI10: + case CHIP_NAVI14: adev->family = AMDGPU_FAMILY_NV; r = nv_set_ip_blocks(adev); -- cgit v1.2.3 From e2d2607f90732e59153c5cb74a31b86259d83bc2 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:04:19 +0800 Subject: drm/amdgpu: add navi14 ucode loading method Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index bfaa0eac3213..9e99736aa984 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -351,6 +351,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_NAVI10: + case CHIP_NAVI14: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else -- cgit v1.2.3 From a0f6d926f139d6fe20ca39f4a27ba0c51458bfd1 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:24:03 +0800 Subject: drm/amdgpu/soc15: initialize reg base for navi14 (v2) Initialize the IP register base offsets for navi14. v2: squash in MP, CLK, THM updates Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c | 53 ++ drivers/gpu/drm/amd/amdgpu/nv.c | 3 + drivers/gpu/drm/amd/amdgpu/nv.h | 1 + drivers/gpu/drm/amd/include/navi14_ip_offset.h | 1119 ++++++++++++++++++++++++ 5 files changed, 1177 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c create mode 100644 drivers/gpu/drm/amd/include/navi14_ip_offset.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 56e084367b93..3f5329906fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -66,7 +66,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ - vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o + vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c new file mode 100644 index 000000000000..28f3d6490649 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "nv.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "navi14_ip_offset.h" + +int navi14_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the blocke needed by driver */ + uint32_t i; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); + } + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 662612f89c70..9f67739165b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -370,6 +370,9 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) case CHIP_NAVI10: navi10_reg_base_init(adev); break; + case CHIP_NAVI14: + navi14_reg_base_init(adev); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 639c54933cc5..332d5cdc308e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -30,4 +30,5 @@ void nv_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int nv_set_ip_blocks(struct amdgpu_device *adev); int navi10_reg_base_init(struct amdgpu_device *adev); +int navi14_reg_base_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/include/navi14_ip_offset.h b/drivers/gpu/drm/amd/include/navi14_ip_offset.h new file mode 100644 index 000000000000..ecdd9eabe0dc --- /dev/null +++ b/drivers/gpu/drm/amd/include/navi14_ip_offset.h @@ -0,0 +1,1119 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _navi14_ip_offset_HEADER +#define _navi14_ip_offset_HEADER + +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 5 + + +struct IP_BASE_INSTANCE +{ + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE +{ + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + + +static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0x02408C00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x02401800, 0, 0, 0 } }, + { { 0x00016E00, 0x02401C00, 0, 0, 0 } }, + { { 0x00017000, 0x02402000, 0, 0, 0 } }, + { { 0x00017200, 0x02402400, 0, 0, 0 } }, + { { 0x0001B000, 0x0242D800, 0, 0, 0 } }, + { { 0x00017E00, 0x0240BC00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0x0240B800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DIO_BASE ={ { { { 0x02404000, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DMU_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DPCS_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0x02401400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE GC_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE HDA_BASE ={ { { { 0x004C0000, 0x02404800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0x0240A400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0x02408800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE PCIE0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } }, + { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0x00440000, 0x02401000, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0x02400C00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0x02425800, 0, 0, 0 } }, + { { 0x00054000, 0x02425C00, 0, 0, 0 } }, + { { 0x00094000, 0x02426000, 0, 0, 0 } }, + { { 0x000D4000, 0x02426400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE USB0_BASE ={ { { { 0x0242A800, 0x05B00000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x02403000, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; + + +#define ATHUB_BASE__INST0_SEG0 0x00000C00 +#define ATHUB_BASE__INST0_SEG1 0x02408C00 +#define ATHUB_BASE__INST0_SEG2 0 +#define ATHUB_BASE__INST0_SEG3 0 +#define ATHUB_BASE__INST0_SEG4 0 + +#define ATHUB_BASE__INST1_SEG0 0 +#define ATHUB_BASE__INST1_SEG1 0 +#define ATHUB_BASE__INST1_SEG2 0 +#define ATHUB_BASE__INST1_SEG3 0 +#define ATHUB_BASE__INST1_SEG4 0 + +#define ATHUB_BASE__INST2_SEG0 0 +#define ATHUB_BASE__INST2_SEG1 0 +#define ATHUB_BASE__INST2_SEG2 0 +#define ATHUB_BASE__INST2_SEG3 0 +#define ATHUB_BASE__INST2_SEG4 0 + +#define ATHUB_BASE__INST3_SEG0 0 +#define ATHUB_BASE__INST3_SEG1 0 +#define ATHUB_BASE__INST3_SEG2 0 +#define ATHUB_BASE__INST3_SEG3 0 +#define ATHUB_BASE__INST3_SEG4 0 + +#define ATHUB_BASE__INST4_SEG0 0 +#define ATHUB_BASE__INST4_SEG1 0 +#define ATHUB_BASE__INST4_SEG2 0 +#define ATHUB_BASE__INST4_SEG3 0 +#define ATHUB_BASE__INST4_SEG4 0 + +#define ATHUB_BASE__INST5_SEG0 0 +#define ATHUB_BASE__INST5_SEG1 0 +#define ATHUB_BASE__INST5_SEG2 0 +#define ATHUB_BASE__INST5_SEG3 0 +#define ATHUB_BASE__INST5_SEG4 0 + +#define ATHUB_BASE__INST6_SEG0 0 +#define ATHUB_BASE__INST6_SEG1 0 +#define ATHUB_BASE__INST6_SEG2 0 +#define ATHUB_BASE__INST6_SEG3 0 +#define ATHUB_BASE__INST6_SEG4 0 + +#define CLK_BASE__INST0_SEG0 0x00016C00 +#define CLK_BASE__INST0_SEG1 0x02401800 +#define CLK_BASE__INST0_SEG2 0 +#define CLK_BASE__INST0_SEG3 0 +#define CLK_BASE__INST0_SEG4 0 + +#define CLK_BASE__INST1_SEG0 0x00016E00 +#define CLK_BASE__INST1_SEG1 0x02401C00 +#define CLK_BASE__INST1_SEG2 0 +#define CLK_BASE__INST1_SEG3 0 +#define CLK_BASE__INST1_SEG4 0 + +#define CLK_BASE__INST2_SEG0 0x00017000 +#define CLK_BASE__INST2_SEG1 0x02402000 +#define CLK_BASE__INST2_SEG2 0 +#define CLK_BASE__INST2_SEG3 0 +#define CLK_BASE__INST2_SEG4 0 + +#define CLK_BASE__INST3_SEG0 0x00017200 +#define CLK_BASE__INST3_SEG1 0x02402400 +#define CLK_BASE__INST3_SEG2 0 +#define CLK_BASE__INST3_SEG3 0 +#define CLK_BASE__INST3_SEG4 0 + +#define CLK_BASE__INST4_SEG0 0x0001B000 +#define CLK_BASE__INST4_SEG1 0x0242D800 +#define CLK_BASE__INST4_SEG2 0 +#define CLK_BASE__INST4_SEG3 0 +#define CLK_BASE__INST4_SEG4 0 + +#define CLK_BASE__INST5_SEG0 0x00017E00 +#define CLK_BASE__INST5_SEG1 0x0240BC00 +#define CLK_BASE__INST5_SEG2 0 +#define CLK_BASE__INST5_SEG3 0 +#define CLK_BASE__INST5_SEG4 0 + +#define CLK_BASE__INST6_SEG0 0 +#define CLK_BASE__INST6_SEG1 0 +#define CLK_BASE__INST6_SEG2 0 +#define CLK_BASE__INST6_SEG3 0 +#define CLK_BASE__INST6_SEG4 0 + +#define DF_BASE__INST0_SEG0 0x00007000 +#define DF_BASE__INST0_SEG1 0x0240B800 +#define DF_BASE__INST0_SEG2 0 +#define DF_BASE__INST0_SEG3 0 +#define DF_BASE__INST0_SEG4 0 + +#define DF_BASE__INST1_SEG0 0 +#define DF_BASE__INST1_SEG1 0 +#define DF_BASE__INST1_SEG2 0 +#define DF_BASE__INST1_SEG3 0 +#define DF_BASE__INST1_SEG4 0 + +#define DF_BASE__INST2_SEG0 0 +#define DF_BASE__INST2_SEG1 0 +#define DF_BASE__INST2_SEG2 0 +#define DF_BASE__INST2_SEG3 0 +#define DF_BASE__INST2_SEG4 0 + +#define DF_BASE__INST3_SEG0 0 +#define DF_BASE__INST3_SEG1 0 +#define DF_BASE__INST3_SEG2 0 +#define DF_BASE__INST3_SEG3 0 +#define DF_BASE__INST3_SEG4 0 + +#define DF_BASE__INST4_SEG0 0 +#define DF_BASE__INST4_SEG1 0 +#define DF_BASE__INST4_SEG2 0 +#define DF_BASE__INST4_SEG3 0 +#define DF_BASE__INST4_SEG4 0 + +#define DF_BASE__INST5_SEG0 0 +#define DF_BASE__INST5_SEG1 0 +#define DF_BASE__INST5_SEG2 0 +#define DF_BASE__INST5_SEG3 0 +#define DF_BASE__INST5_SEG4 0 + +#define DF_BASE__INST6_SEG0 0 +#define DF_BASE__INST6_SEG1 0 +#define DF_BASE__INST6_SEG2 0 +#define DF_BASE__INST6_SEG3 0 +#define DF_BASE__INST6_SEG4 0 + +#define DIO_BASE__INST0_SEG0 0x02404000 +#define DIO_BASE__INST0_SEG1 0 +#define DIO_BASE__INST0_SEG2 0 +#define DIO_BASE__INST0_SEG3 0 +#define DIO_BASE__INST0_SEG4 0 + +#define DIO_BASE__INST1_SEG0 0 +#define DIO_BASE__INST1_SEG1 0 +#define DIO_BASE__INST1_SEG2 0 +#define DIO_BASE__INST1_SEG3 0 +#define DIO_BASE__INST1_SEG4 0 + +#define DIO_BASE__INST2_SEG0 0 +#define DIO_BASE__INST2_SEG1 0 +#define DIO_BASE__INST2_SEG2 0 +#define DIO_BASE__INST2_SEG3 0 +#define DIO_BASE__INST2_SEG4 0 + +#define DIO_BASE__INST3_SEG0 0 +#define DIO_BASE__INST3_SEG1 0 +#define DIO_BASE__INST3_SEG2 0 +#define DIO_BASE__INST3_SEG3 0 +#define DIO_BASE__INST3_SEG4 0 + +#define DIO_BASE__INST4_SEG0 0 +#define DIO_BASE__INST4_SEG1 0 +#define DIO_BASE__INST4_SEG2 0 +#define DIO_BASE__INST4_SEG3 0 +#define DIO_BASE__INST4_SEG4 0 + +#define DIO_BASE__INST5_SEG0 0 +#define DIO_BASE__INST5_SEG1 0 +#define DIO_BASE__INST5_SEG2 0 +#define DIO_BASE__INST5_SEG3 0 +#define DIO_BASE__INST5_SEG4 0 + +#define DIO_BASE__INST6_SEG0 0 +#define DIO_BASE__INST6_SEG1 0 +#define DIO_BASE__INST6_SEG2 0 +#define DIO_BASE__INST6_SEG3 0 +#define DIO_BASE__INST6_SEG4 0 + +#define DMU_BASE__INST0_SEG0 0x00000012 +#define DMU_BASE__INST0_SEG1 0x000000C0 +#define DMU_BASE__INST0_SEG2 0x000034C0 +#define DMU_BASE__INST0_SEG3 0x00009000 +#define DMU_BASE__INST0_SEG4 0x02403C00 + +#define DMU_BASE__INST1_SEG0 0 +#define DMU_BASE__INST1_SEG1 0 +#define DMU_BASE__INST1_SEG2 0 +#define DMU_BASE__INST1_SEG3 0 +#define DMU_BASE__INST1_SEG4 0 + +#define DMU_BASE__INST2_SEG0 0 +#define DMU_BASE__INST2_SEG1 0 +#define DMU_BASE__INST2_SEG2 0 +#define DMU_BASE__INST2_SEG3 0 +#define DMU_BASE__INST2_SEG4 0 + +#define DMU_BASE__INST3_SEG0 0 +#define DMU_BASE__INST3_SEG1 0 +#define DMU_BASE__INST3_SEG2 0 +#define DMU_BASE__INST3_SEG3 0 +#define DMU_BASE__INST3_SEG4 0 + +#define DMU_BASE__INST4_SEG0 0 +#define DMU_BASE__INST4_SEG1 0 +#define DMU_BASE__INST4_SEG2 0 +#define DMU_BASE__INST4_SEG3 0 +#define DMU_BASE__INST4_SEG4 0 + +#define DMU_BASE__INST5_SEG0 0 +#define DMU_BASE__INST5_SEG1 0 +#define DMU_BASE__INST5_SEG2 0 +#define DMU_BASE__INST5_SEG3 0 +#define DMU_BASE__INST5_SEG4 0 + +#define DMU_BASE__INST6_SEG0 0 +#define DMU_BASE__INST6_SEG1 0 +#define DMU_BASE__INST6_SEG2 0 +#define DMU_BASE__INST6_SEG3 0 +#define DMU_BASE__INST6_SEG4 0 + +#define DPCS_BASE__INST0_SEG0 0x00000012 +#define DPCS_BASE__INST0_SEG1 0x000000C0 +#define DPCS_BASE__INST0_SEG2 0x000034C0 +#define DPCS_BASE__INST0_SEG3 0x00009000 +#define DPCS_BASE__INST0_SEG4 0x02403C00 + +#define DPCS_BASE__INST1_SEG0 0 +#define DPCS_BASE__INST1_SEG1 0 +#define DPCS_BASE__INST1_SEG2 0 +#define DPCS_BASE__INST1_SEG3 0 +#define DPCS_BASE__INST1_SEG4 0 + +#define DPCS_BASE__INST2_SEG0 0 +#define DPCS_BASE__INST2_SEG1 0 +#define DPCS_BASE__INST2_SEG2 0 +#define DPCS_BASE__INST2_SEG3 0 +#define DPCS_BASE__INST2_SEG4 0 + +#define DPCS_BASE__INST3_SEG0 0 +#define DPCS_BASE__INST3_SEG1 0 +#define DPCS_BASE__INST3_SEG2 0 +#define DPCS_BASE__INST3_SEG3 0 +#define DPCS_BASE__INST3_SEG4 0 + +#define DPCS_BASE__INST4_SEG0 0 +#define DPCS_BASE__INST4_SEG1 0 +#define DPCS_BASE__INST4_SEG2 0 +#define DPCS_BASE__INST4_SEG3 0 +#define DPCS_BASE__INST4_SEG4 0 + +#define DPCS_BASE__INST5_SEG0 0 +#define DPCS_BASE__INST5_SEG1 0 +#define DPCS_BASE__INST5_SEG2 0 +#define DPCS_BASE__INST5_SEG3 0 +#define DPCS_BASE__INST5_SEG4 0 + +#define DPCS_BASE__INST6_SEG0 0 +#define DPCS_BASE__INST6_SEG1 0 +#define DPCS_BASE__INST6_SEG2 0 +#define DPCS_BASE__INST6_SEG3 0 +#define DPCS_BASE__INST6_SEG4 0 + +#define FUSE_BASE__INST0_SEG0 0x00017400 +#define FUSE_BASE__INST0_SEG1 0x02401400 +#define FUSE_BASE__INST0_SEG2 0 +#define FUSE_BASE__INST0_SEG3 0 +#define FUSE_BASE__INST0_SEG4 0 + +#define FUSE_BASE__INST1_SEG0 0 +#define FUSE_BASE__INST1_SEG1 0 +#define FUSE_BASE__INST1_SEG2 0 +#define FUSE_BASE__INST1_SEG3 0 +#define FUSE_BASE__INST1_SEG4 0 + +#define FUSE_BASE__INST2_SEG0 0 +#define FUSE_BASE__INST2_SEG1 0 +#define FUSE_BASE__INST2_SEG2 0 +#define FUSE_BASE__INST2_SEG3 0 +#define FUSE_BASE__INST2_SEG4 0 + +#define FUSE_BASE__INST3_SEG0 0 +#define FUSE_BASE__INST3_SEG1 0 +#define FUSE_BASE__INST3_SEG2 0 +#define FUSE_BASE__INST3_SEG3 0 +#define FUSE_BASE__INST3_SEG4 0 + +#define FUSE_BASE__INST4_SEG0 0 +#define FUSE_BASE__INST4_SEG1 0 +#define FUSE_BASE__INST4_SEG2 0 +#define FUSE_BASE__INST4_SEG3 0 +#define FUSE_BASE__INST4_SEG4 0 + +#define FUSE_BASE__INST5_SEG0 0 +#define FUSE_BASE__INST5_SEG1 0 +#define FUSE_BASE__INST5_SEG2 0 +#define FUSE_BASE__INST5_SEG3 0 +#define FUSE_BASE__INST5_SEG4 0 + +#define FUSE_BASE__INST6_SEG0 0 +#define FUSE_BASE__INST6_SEG1 0 +#define FUSE_BASE__INST6_SEG2 0 +#define FUSE_BASE__INST6_SEG3 0 +#define FUSE_BASE__INST6_SEG4 0 + +#define GC_BASE__INST0_SEG0 0x00001260 +#define GC_BASE__INST0_SEG1 0x0000A000 +#define GC_BASE__INST0_SEG2 0x02402C00 +#define GC_BASE__INST0_SEG3 0 +#define GC_BASE__INST0_SEG4 0 + +#define GC_BASE__INST1_SEG0 0 +#define GC_BASE__INST1_SEG1 0 +#define GC_BASE__INST1_SEG2 0 +#define GC_BASE__INST1_SEG3 0 +#define GC_BASE__INST1_SEG4 0 + +#define GC_BASE__INST2_SEG0 0 +#define GC_BASE__INST2_SEG1 0 +#define GC_BASE__INST2_SEG2 0 +#define GC_BASE__INST2_SEG3 0 +#define GC_BASE__INST2_SEG4 0 + +#define GC_BASE__INST3_SEG0 0 +#define GC_BASE__INST3_SEG1 0 +#define GC_BASE__INST3_SEG2 0 +#define GC_BASE__INST3_SEG3 0 +#define GC_BASE__INST3_SEG4 0 + +#define GC_BASE__INST4_SEG0 0 +#define GC_BASE__INST4_SEG1 0 +#define GC_BASE__INST4_SEG2 0 +#define GC_BASE__INST4_SEG3 0 +#define GC_BASE__INST4_SEG4 0 + +#define GC_BASE__INST5_SEG0 0 +#define GC_BASE__INST5_SEG1 0 +#define GC_BASE__INST5_SEG2 0 +#define GC_BASE__INST5_SEG3 0 +#define GC_BASE__INST5_SEG4 0 + +#define GC_BASE__INST6_SEG0 0 +#define GC_BASE__INST6_SEG1 0 +#define GC_BASE__INST6_SEG2 0 +#define GC_BASE__INST6_SEG3 0 +#define GC_BASE__INST6_SEG4 0 + +#define HDA_BASE__INST0_SEG0 0x004C0000 +#define HDA_BASE__INST0_SEG1 0x02404800 +#define HDA_BASE__INST0_SEG2 0 +#define HDA_BASE__INST0_SEG3 0 +#define HDA_BASE__INST0_SEG4 0 + +#define HDA_BASE__INST1_SEG0 0 +#define HDA_BASE__INST1_SEG1 0 +#define HDA_BASE__INST1_SEG2 0 +#define HDA_BASE__INST1_SEG3 0 +#define HDA_BASE__INST1_SEG4 0 + +#define HDA_BASE__INST2_SEG0 0 +#define HDA_BASE__INST2_SEG1 0 +#define HDA_BASE__INST2_SEG2 0 +#define HDA_BASE__INST2_SEG3 0 +#define HDA_BASE__INST2_SEG4 0 + +#define HDA_BASE__INST3_SEG0 0 +#define HDA_BASE__INST3_SEG1 0 +#define HDA_BASE__INST3_SEG2 0 +#define HDA_BASE__INST3_SEG3 0 +#define HDA_BASE__INST3_SEG4 0 + +#define HDA_BASE__INST4_SEG0 0 +#define HDA_BASE__INST4_SEG1 0 +#define HDA_BASE__INST4_SEG2 0 +#define HDA_BASE__INST4_SEG3 0 +#define HDA_BASE__INST4_SEG4 0 + +#define HDA_BASE__INST5_SEG0 0 +#define HDA_BASE__INST5_SEG1 0 +#define HDA_BASE__INST5_SEG2 0 +#define HDA_BASE__INST5_SEG3 0 +#define HDA_BASE__INST5_SEG4 0 + +#define HDA_BASE__INST6_SEG0 0 +#define HDA_BASE__INST6_SEG1 0 +#define HDA_BASE__INST6_SEG2 0 +#define HDA_BASE__INST6_SEG3 0 +#define HDA_BASE__INST6_SEG4 0 + +#define HDP_BASE__INST0_SEG0 0x00000F20 +#define HDP_BASE__INST0_SEG1 0x0240A400 +#define HDP_BASE__INST0_SEG2 0 +#define HDP_BASE__INST0_SEG3 0 +#define HDP_BASE__INST0_SEG4 0 + +#define HDP_BASE__INST1_SEG0 0 +#define HDP_BASE__INST1_SEG1 0 +#define HDP_BASE__INST1_SEG2 0 +#define HDP_BASE__INST1_SEG3 0 +#define HDP_BASE__INST1_SEG4 0 + +#define HDP_BASE__INST2_SEG0 0 +#define HDP_BASE__INST2_SEG1 0 +#define HDP_BASE__INST2_SEG2 0 +#define HDP_BASE__INST2_SEG3 0 +#define HDP_BASE__INST2_SEG4 0 + +#define HDP_BASE__INST3_SEG0 0 +#define HDP_BASE__INST3_SEG1 0 +#define HDP_BASE__INST3_SEG2 0 +#define HDP_BASE__INST3_SEG3 0 +#define HDP_BASE__INST3_SEG4 0 + +#define HDP_BASE__INST4_SEG0 0 +#define HDP_BASE__INST4_SEG1 0 +#define HDP_BASE__INST4_SEG2 0 +#define HDP_BASE__INST4_SEG3 0 +#define HDP_BASE__INST4_SEG4 0 + +#define HDP_BASE__INST5_SEG0 0 +#define HDP_BASE__INST5_SEG1 0 +#define HDP_BASE__INST5_SEG2 0 +#define HDP_BASE__INST5_SEG3 0 +#define HDP_BASE__INST5_SEG4 0 + +#define HDP_BASE__INST6_SEG0 0 +#define HDP_BASE__INST6_SEG1 0 +#define HDP_BASE__INST6_SEG2 0 +#define HDP_BASE__INST6_SEG3 0 +#define HDP_BASE__INST6_SEG4 0 + +#define MMHUB_BASE__INST0_SEG0 0x0001A000 +#define MMHUB_BASE__INST0_SEG1 0x02408800 +#define MMHUB_BASE__INST0_SEG2 0 +#define MMHUB_BASE__INST0_SEG3 0 +#define MMHUB_BASE__INST0_SEG4 0 + +#define MMHUB_BASE__INST1_SEG0 0 +#define MMHUB_BASE__INST1_SEG1 0 +#define MMHUB_BASE__INST1_SEG2 0 +#define MMHUB_BASE__INST1_SEG3 0 +#define MMHUB_BASE__INST1_SEG4 0 + +#define MMHUB_BASE__INST2_SEG0 0 +#define MMHUB_BASE__INST2_SEG1 0 +#define MMHUB_BASE__INST2_SEG2 0 +#define MMHUB_BASE__INST2_SEG3 0 +#define MMHUB_BASE__INST2_SEG4 0 + +#define MMHUB_BASE__INST3_SEG0 0 +#define MMHUB_BASE__INST3_SEG1 0 +#define MMHUB_BASE__INST3_SEG2 0 +#define MMHUB_BASE__INST3_SEG3 0 +#define MMHUB_BASE__INST3_SEG4 0 + +#define MMHUB_BASE__INST4_SEG0 0 +#define MMHUB_BASE__INST4_SEG1 0 +#define MMHUB_BASE__INST4_SEG2 0 +#define MMHUB_BASE__INST4_SEG3 0 +#define MMHUB_BASE__INST4_SEG4 0 + +#define MMHUB_BASE__INST5_SEG0 0 +#define MMHUB_BASE__INST5_SEG1 0 +#define MMHUB_BASE__INST5_SEG2 0 +#define MMHUB_BASE__INST5_SEG3 0 +#define MMHUB_BASE__INST5_SEG4 0 + +#define MMHUB_BASE__INST6_SEG0 0 +#define MMHUB_BASE__INST6_SEG1 0 +#define MMHUB_BASE__INST6_SEG2 0 +#define MMHUB_BASE__INST6_SEG3 0 +#define MMHUB_BASE__INST6_SEG4 0 + +#define MP0_BASE__INST0_SEG0 0x00016000 +#define MP0_BASE__INST0_SEG1 0x00DC0000 +#define MP0_BASE__INST0_SEG2 0x00E00000 +#define MP0_BASE__INST0_SEG3 0x00E40000 +#define MP0_BASE__INST0_SEG4 0x0243FC00 + +#define MP0_BASE__INST1_SEG0 0 +#define MP0_BASE__INST1_SEG1 0 +#define MP0_BASE__INST1_SEG2 0 +#define MP0_BASE__INST1_SEG3 0 +#define MP0_BASE__INST1_SEG4 0 + +#define MP0_BASE__INST2_SEG0 0 +#define MP0_BASE__INST2_SEG1 0 +#define MP0_BASE__INST2_SEG2 0 +#define MP0_BASE__INST2_SEG3 0 +#define MP0_BASE__INST2_SEG4 0 + +#define MP0_BASE__INST3_SEG0 0 +#define MP0_BASE__INST3_SEG1 0 +#define MP0_BASE__INST3_SEG2 0 +#define MP0_BASE__INST3_SEG3 0 +#define MP0_BASE__INST3_SEG4 0 + +#define MP0_BASE__INST4_SEG0 0 +#define MP0_BASE__INST4_SEG1 0 +#define MP0_BASE__INST4_SEG2 0 +#define MP0_BASE__INST4_SEG3 0 +#define MP0_BASE__INST4_SEG4 0 + +#define MP0_BASE__INST5_SEG0 0 +#define MP0_BASE__INST5_SEG1 0 +#define MP0_BASE__INST5_SEG2 0 +#define MP0_BASE__INST5_SEG3 0 +#define MP0_BASE__INST5_SEG4 0 + +#define MP0_BASE__INST6_SEG0 0 +#define MP0_BASE__INST6_SEG1 0 +#define MP0_BASE__INST6_SEG2 0 +#define MP0_BASE__INST6_SEG3 0 +#define MP0_BASE__INST6_SEG4 0 + +#define MP1_BASE__INST0_SEG0 0x00016000 +#define MP1_BASE__INST0_SEG1 0x00DC0000 +#define MP1_BASE__INST0_SEG2 0x00E00000 +#define MP1_BASE__INST0_SEG3 0x00E40000 +#define MP1_BASE__INST0_SEG4 0x0243FC00 + +#define MP1_BASE__INST1_SEG0 0 +#define MP1_BASE__INST1_SEG1 0 +#define MP1_BASE__INST1_SEG2 0 +#define MP1_BASE__INST1_SEG3 0 +#define MP1_BASE__INST1_SEG4 0 + +#define MP1_BASE__INST2_SEG0 0 +#define MP1_BASE__INST2_SEG1 0 +#define MP1_BASE__INST2_SEG2 0 +#define MP1_BASE__INST2_SEG3 0 +#define MP1_BASE__INST2_SEG4 0 + +#define MP1_BASE__INST3_SEG0 0 +#define MP1_BASE__INST3_SEG1 0 +#define MP1_BASE__INST3_SEG2 0 +#define MP1_BASE__INST3_SEG3 0 +#define MP1_BASE__INST3_SEG4 0 + +#define MP1_BASE__INST4_SEG0 0 +#define MP1_BASE__INST4_SEG1 0 +#define MP1_BASE__INST4_SEG2 0 +#define MP1_BASE__INST4_SEG3 0 +#define MP1_BASE__INST4_SEG4 0 + +#define MP1_BASE__INST5_SEG0 0 +#define MP1_BASE__INST5_SEG1 0 +#define MP1_BASE__INST5_SEG2 0 +#define MP1_BASE__INST5_SEG3 0 +#define MP1_BASE__INST5_SEG4 0 + +#define MP1_BASE__INST6_SEG0 0 +#define MP1_BASE__INST6_SEG1 0 +#define MP1_BASE__INST6_SEG2 0 +#define MP1_BASE__INST6_SEG3 0 +#define MP1_BASE__INST6_SEG4 0 + +#define NBIF0_BASE__INST0_SEG0 0x00000000 +#define NBIF0_BASE__INST0_SEG1 0x00000014 +#define NBIF0_BASE__INST0_SEG2 0x00000D20 +#define NBIF0_BASE__INST0_SEG3 0x00010400 +#define NBIF0_BASE__INST0_SEG4 0x0241B000 + +#define NBIF0_BASE__INST1_SEG0 0 +#define NBIF0_BASE__INST1_SEG1 0 +#define NBIF0_BASE__INST1_SEG2 0 +#define NBIF0_BASE__INST1_SEG3 0 +#define NBIF0_BASE__INST1_SEG4 0 + +#define NBIF0_BASE__INST2_SEG0 0 +#define NBIF0_BASE__INST2_SEG1 0 +#define NBIF0_BASE__INST2_SEG2 0 +#define NBIF0_BASE__INST2_SEG3 0 +#define NBIF0_BASE__INST2_SEG4 0 + +#define NBIF0_BASE__INST3_SEG0 0 +#define NBIF0_BASE__INST3_SEG1 0 +#define NBIF0_BASE__INST3_SEG2 0 +#define NBIF0_BASE__INST3_SEG3 0 +#define NBIF0_BASE__INST3_SEG4 0 + +#define NBIF0_BASE__INST4_SEG0 0 +#define NBIF0_BASE__INST4_SEG1 0 +#define NBIF0_BASE__INST4_SEG2 0 +#define NBIF0_BASE__INST4_SEG3 0 +#define NBIF0_BASE__INST4_SEG4 0 + +#define NBIF0_BASE__INST5_SEG0 0 +#define NBIF0_BASE__INST5_SEG1 0 +#define NBIF0_BASE__INST5_SEG2 0 +#define NBIF0_BASE__INST5_SEG3 0 +#define NBIF0_BASE__INST5_SEG4 0 + +#define NBIF0_BASE__INST6_SEG0 0 +#define NBIF0_BASE__INST6_SEG1 0 +#define NBIF0_BASE__INST6_SEG2 0 +#define NBIF0_BASE__INST6_SEG3 0 +#define NBIF0_BASE__INST6_SEG4 0 + +#define OSSSYS_BASE__INST0_SEG0 0x000010A0 +#define OSSSYS_BASE__INST0_SEG1 0x0240A000 +#define OSSSYS_BASE__INST0_SEG2 0 +#define OSSSYS_BASE__INST0_SEG3 0 +#define OSSSYS_BASE__INST0_SEG4 0 + +#define OSSSYS_BASE__INST1_SEG0 0 +#define OSSSYS_BASE__INST1_SEG1 0 +#define OSSSYS_BASE__INST1_SEG2 0 +#define OSSSYS_BASE__INST1_SEG3 0 +#define OSSSYS_BASE__INST1_SEG4 0 + +#define OSSSYS_BASE__INST2_SEG0 0 +#define OSSSYS_BASE__INST2_SEG1 0 +#define OSSSYS_BASE__INST2_SEG2 0 +#define OSSSYS_BASE__INST2_SEG3 0 +#define OSSSYS_BASE__INST2_SEG4 0 + +#define OSSSYS_BASE__INST3_SEG0 0 +#define OSSSYS_BASE__INST3_SEG1 0 +#define OSSSYS_BASE__INST3_SEG2 0 +#define OSSSYS_BASE__INST3_SEG3 0 +#define OSSSYS_BASE__INST3_SEG4 0 + +#define OSSSYS_BASE__INST4_SEG0 0 +#define OSSSYS_BASE__INST4_SEG1 0 +#define OSSSYS_BASE__INST4_SEG2 0 +#define OSSSYS_BASE__INST4_SEG3 0 +#define OSSSYS_BASE__INST4_SEG4 0 + +#define OSSSYS_BASE__INST5_SEG0 0 +#define OSSSYS_BASE__INST5_SEG1 0 +#define OSSSYS_BASE__INST5_SEG2 0 +#define OSSSYS_BASE__INST5_SEG3 0 +#define OSSSYS_BASE__INST5_SEG4 0 + +#define OSSSYS_BASE__INST6_SEG0 0 +#define OSSSYS_BASE__INST6_SEG1 0 +#define OSSSYS_BASE__INST6_SEG2 0 +#define OSSSYS_BASE__INST6_SEG3 0 +#define OSSSYS_BASE__INST6_SEG4 0 + +#define PCIE0_BASE__INST0_SEG0 0x00000000 +#define PCIE0_BASE__INST0_SEG1 0x00000014 +#define PCIE0_BASE__INST0_SEG2 0x00000D20 +#define PCIE0_BASE__INST0_SEG3 0x00010400 +#define PCIE0_BASE__INST0_SEG4 0x0241B000 + +#define PCIE0_BASE__INST1_SEG0 0 +#define PCIE0_BASE__INST1_SEG1 0 +#define PCIE0_BASE__INST1_SEG2 0 +#define PCIE0_BASE__INST1_SEG3 0 +#define PCIE0_BASE__INST1_SEG4 0 + +#define PCIE0_BASE__INST2_SEG0 0 +#define PCIE0_BASE__INST2_SEG1 0 +#define PCIE0_BASE__INST2_SEG2 0 +#define PCIE0_BASE__INST2_SEG3 0 +#define PCIE0_BASE__INST2_SEG4 0 + +#define PCIE0_BASE__INST3_SEG0 0 +#define PCIE0_BASE__INST3_SEG1 0 +#define PCIE0_BASE__INST3_SEG2 0 +#define PCIE0_BASE__INST3_SEG3 0 +#define PCIE0_BASE__INST3_SEG4 0 + +#define PCIE0_BASE__INST4_SEG0 0 +#define PCIE0_BASE__INST4_SEG1 0 +#define PCIE0_BASE__INST4_SEG2 0 +#define PCIE0_BASE__INST4_SEG3 0 +#define PCIE0_BASE__INST4_SEG4 0 + +#define PCIE0_BASE__INST5_SEG0 0 +#define PCIE0_BASE__INST5_SEG1 0 +#define PCIE0_BASE__INST5_SEG2 0 +#define PCIE0_BASE__INST5_SEG3 0 +#define PCIE0_BASE__INST5_SEG4 0 + +#define PCIE0_BASE__INST6_SEG0 0 +#define PCIE0_BASE__INST6_SEG1 0 +#define PCIE0_BASE__INST6_SEG2 0 +#define PCIE0_BASE__INST6_SEG3 0 +#define PCIE0_BASE__INST6_SEG4 0 + +#define SDMA_BASE__INST0_SEG0 0x00001260 +#define SDMA_BASE__INST0_SEG1 0x0000A000 +#define SDMA_BASE__INST0_SEG2 0x02402C00 +#define SDMA_BASE__INST0_SEG3 0 +#define SDMA_BASE__INST0_SEG4 0 + +#define SDMA_BASE__INST1_SEG0 0x00001260 +#define SDMA_BASE__INST1_SEG1 0x0000A000 +#define SDMA_BASE__INST1_SEG2 0x02402C00 +#define SDMA_BASE__INST1_SEG3 0 +#define SDMA_BASE__INST1_SEG4 0 + +#define SDMA_BASE__INST2_SEG0 0 +#define SDMA_BASE__INST2_SEG1 0 +#define SDMA_BASE__INST2_SEG2 0 +#define SDMA_BASE__INST2_SEG3 0 +#define SDMA_BASE__INST2_SEG4 0 + +#define SDMA_BASE__INST3_SEG0 0 +#define SDMA_BASE__INST3_SEG1 0 +#define SDMA_BASE__INST3_SEG2 0 +#define SDMA_BASE__INST3_SEG3 0 +#define SDMA_BASE__INST3_SEG4 0 + +#define SDMA_BASE__INST4_SEG0 0 +#define SDMA_BASE__INST4_SEG1 0 +#define SDMA_BASE__INST4_SEG2 0 +#define SDMA_BASE__INST4_SEG3 0 +#define SDMA_BASE__INST4_SEG4 0 + +#define SDMA_BASE__INST5_SEG0 0 +#define SDMA_BASE__INST5_SEG1 0 +#define SDMA_BASE__INST5_SEG2 0 +#define SDMA_BASE__INST5_SEG3 0 +#define SDMA_BASE__INST5_SEG4 0 + +#define SDMA_BASE__INST6_SEG0 0 +#define SDMA_BASE__INST6_SEG1 0 +#define SDMA_BASE__INST6_SEG2 0 +#define SDMA_BASE__INST6_SEG3 0 +#define SDMA_BASE__INST6_SEG4 0 + +#define SMUIO_BASE__INST0_SEG0 0x00016800 +#define SMUIO_BASE__INST0_SEG1 0x00016A00 +#define SMUIO_BASE__INST0_SEG2 0x00440000 +#define SMUIO_BASE__INST0_SEG3 0x02401000 +#define SMUIO_BASE__INST0_SEG4 0 + +#define SMUIO_BASE__INST1_SEG0 0 +#define SMUIO_BASE__INST1_SEG1 0 +#define SMUIO_BASE__INST1_SEG2 0 +#define SMUIO_BASE__INST1_SEG3 0 +#define SMUIO_BASE__INST1_SEG4 0 + +#define SMUIO_BASE__INST2_SEG0 0 +#define SMUIO_BASE__INST2_SEG1 0 +#define SMUIO_BASE__INST2_SEG2 0 +#define SMUIO_BASE__INST2_SEG3 0 +#define SMUIO_BASE__INST2_SEG4 0 + +#define SMUIO_BASE__INST3_SEG0 0 +#define SMUIO_BASE__INST3_SEG1 0 +#define SMUIO_BASE__INST3_SEG2 0 +#define SMUIO_BASE__INST3_SEG3 0 +#define SMUIO_BASE__INST3_SEG4 0 + +#define SMUIO_BASE__INST4_SEG0 0 +#define SMUIO_BASE__INST4_SEG1 0 +#define SMUIO_BASE__INST4_SEG2 0 +#define SMUIO_BASE__INST4_SEG3 0 +#define SMUIO_BASE__INST4_SEG4 0 + +#define SMUIO_BASE__INST5_SEG0 0 +#define SMUIO_BASE__INST5_SEG1 0 +#define SMUIO_BASE__INST5_SEG2 0 +#define SMUIO_BASE__INST5_SEG3 0 +#define SMUIO_BASE__INST5_SEG4 0 + +#define SMUIO_BASE__INST6_SEG0 0 +#define SMUIO_BASE__INST6_SEG1 0 +#define SMUIO_BASE__INST6_SEG2 0 +#define SMUIO_BASE__INST6_SEG3 0 +#define SMUIO_BASE__INST6_SEG4 0 + +#define THM_BASE__INST0_SEG0 0x00016600 +#define THM_BASE__INST0_SEG1 0x02400C00 +#define THM_BASE__INST0_SEG2 0 +#define THM_BASE__INST0_SEG3 0 +#define THM_BASE__INST0_SEG4 0 + +#define THM_BASE__INST1_SEG0 0 +#define THM_BASE__INST1_SEG1 0 +#define THM_BASE__INST1_SEG2 0 +#define THM_BASE__INST1_SEG3 0 +#define THM_BASE__INST1_SEG4 0 + +#define THM_BASE__INST2_SEG0 0 +#define THM_BASE__INST2_SEG1 0 +#define THM_BASE__INST2_SEG2 0 +#define THM_BASE__INST2_SEG3 0 +#define THM_BASE__INST2_SEG4 0 + +#define THM_BASE__INST3_SEG0 0 +#define THM_BASE__INST3_SEG1 0 +#define THM_BASE__INST3_SEG2 0 +#define THM_BASE__INST3_SEG3 0 +#define THM_BASE__INST3_SEG4 0 + +#define THM_BASE__INST4_SEG0 0 +#define THM_BASE__INST4_SEG1 0 +#define THM_BASE__INST4_SEG2 0 +#define THM_BASE__INST4_SEG3 0 +#define THM_BASE__INST4_SEG4 0 + +#define THM_BASE__INST5_SEG0 0 +#define THM_BASE__INST5_SEG1 0 +#define THM_BASE__INST5_SEG2 0 +#define THM_BASE__INST5_SEG3 0 +#define THM_BASE__INST5_SEG4 0 + +#define THM_BASE__INST6_SEG0 0 +#define THM_BASE__INST6_SEG1 0 +#define THM_BASE__INST6_SEG2 0 +#define THM_BASE__INST6_SEG3 0 +#define THM_BASE__INST6_SEG4 0 + +#define UMC_BASE__INST0_SEG0 0x00014000 +#define UMC_BASE__INST0_SEG1 0x02425800 +#define UMC_BASE__INST0_SEG2 0 +#define UMC_BASE__INST0_SEG3 0 +#define UMC_BASE__INST0_SEG4 0 + +#define UMC_BASE__INST1_SEG0 0x00054000 +#define UMC_BASE__INST1_SEG1 0x02425C00 +#define UMC_BASE__INST1_SEG2 0 +#define UMC_BASE__INST1_SEG3 0 +#define UMC_BASE__INST1_SEG4 0 + +#define UMC_BASE__INST2_SEG0 0x00094000 +#define UMC_BASE__INST2_SEG1 0x02426000 +#define UMC_BASE__INST2_SEG2 0 +#define UMC_BASE__INST2_SEG3 0 +#define UMC_BASE__INST2_SEG4 0 + +#define UMC_BASE__INST3_SEG0 0x000D4000 +#define UMC_BASE__INST3_SEG1 0x02426400 +#define UMC_BASE__INST3_SEG2 0 +#define UMC_BASE__INST3_SEG3 0 +#define UMC_BASE__INST3_SEG4 0 + +#define UMC_BASE__INST4_SEG0 0 +#define UMC_BASE__INST4_SEG1 0 +#define UMC_BASE__INST4_SEG2 0 +#define UMC_BASE__INST4_SEG3 0 +#define UMC_BASE__INST4_SEG4 0 + +#define UMC_BASE__INST5_SEG0 0 +#define UMC_BASE__INST5_SEG1 0 +#define UMC_BASE__INST5_SEG2 0 +#define UMC_BASE__INST5_SEG3 0 +#define UMC_BASE__INST5_SEG4 0 + +#define UMC_BASE__INST6_SEG0 0 +#define UMC_BASE__INST6_SEG1 0 +#define UMC_BASE__INST6_SEG2 0 +#define UMC_BASE__INST6_SEG3 0 +#define UMC_BASE__INST6_SEG4 0 + +#define USB0_BASE__INST0_SEG0 0x0242A800 +#define USB0_BASE__INST0_SEG1 0x05B00000 +#define USB0_BASE__INST0_SEG2 0 +#define USB0_BASE__INST0_SEG3 0 +#define USB0_BASE__INST0_SEG4 0 + +#define USB0_BASE__INST1_SEG0 0 +#define USB0_BASE__INST1_SEG1 0 +#define USB0_BASE__INST1_SEG2 0 +#define USB0_BASE__INST1_SEG3 0 +#define USB0_BASE__INST1_SEG4 0 + +#define USB0_BASE__INST2_SEG0 0 +#define USB0_BASE__INST2_SEG1 0 +#define USB0_BASE__INST2_SEG2 0 +#define USB0_BASE__INST2_SEG3 0 +#define USB0_BASE__INST2_SEG4 0 + +#define USB0_BASE__INST3_SEG0 0 +#define USB0_BASE__INST3_SEG1 0 +#define USB0_BASE__INST3_SEG2 0 +#define USB0_BASE__INST3_SEG3 0 +#define USB0_BASE__INST3_SEG4 0 + +#define USB0_BASE__INST4_SEG0 0 +#define USB0_BASE__INST4_SEG1 0 +#define USB0_BASE__INST4_SEG2 0 +#define USB0_BASE__INST4_SEG3 0 +#define USB0_BASE__INST4_SEG4 0 + +#define USB0_BASE__INST5_SEG0 0 +#define USB0_BASE__INST5_SEG1 0 +#define USB0_BASE__INST5_SEG2 0 +#define USB0_BASE__INST5_SEG3 0 +#define USB0_BASE__INST5_SEG4 0 + +#define USB0_BASE__INST6_SEG0 0 +#define USB0_BASE__INST6_SEG1 0 +#define USB0_BASE__INST6_SEG2 0 +#define USB0_BASE__INST6_SEG3 0 +#define USB0_BASE__INST6_SEG4 0 + +#define UVD0_BASE__INST0_SEG0 0x00007800 +#define UVD0_BASE__INST0_SEG1 0x00007E00 +#define UVD0_BASE__INST0_SEG2 0x02403000 +#define UVD0_BASE__INST0_SEG3 0 +#define UVD0_BASE__INST0_SEG4 0 + +#define UVD0_BASE__INST1_SEG0 0 +#define UVD0_BASE__INST1_SEG1 0 +#define UVD0_BASE__INST1_SEG2 0 +#define UVD0_BASE__INST1_SEG3 0 +#define UVD0_BASE__INST1_SEG4 0 + +#define UVD0_BASE__INST2_SEG0 0 +#define UVD0_BASE__INST2_SEG1 0 +#define UVD0_BASE__INST2_SEG2 0 +#define UVD0_BASE__INST2_SEG3 0 +#define UVD0_BASE__INST2_SEG4 0 + +#define UVD0_BASE__INST3_SEG0 0 +#define UVD0_BASE__INST3_SEG1 0 +#define UVD0_BASE__INST3_SEG2 0 +#define UVD0_BASE__INST3_SEG3 0 +#define UVD0_BASE__INST3_SEG4 0 + +#define UVD0_BASE__INST4_SEG0 0 +#define UVD0_BASE__INST4_SEG1 0 +#define UVD0_BASE__INST4_SEG2 0 +#define UVD0_BASE__INST4_SEG3 0 +#define UVD0_BASE__INST4_SEG4 0 + +#define UVD0_BASE__INST5_SEG0 0 +#define UVD0_BASE__INST5_SEG1 0 +#define UVD0_BASE__INST5_SEG2 0 +#define UVD0_BASE__INST5_SEG3 0 +#define UVD0_BASE__INST5_SEG4 0 + +#define UVD0_BASE__INST6_SEG0 0 +#define UVD0_BASE__INST6_SEG1 0 +#define UVD0_BASE__INST6_SEG2 0 +#define UVD0_BASE__INST6_SEG3 0 +#define UVD0_BASE__INST6_SEG4 0 + +#endif -- cgit v1.2.3 From c20697674d3264f1cb2094eeec41c36d8f3f6320 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 5 Jun 2019 17:58:57 +0800 Subject: drm/amdgpu/discovery: init reg base offset via ip discovery for navi14 Add IP discovery for navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c index 28f3d6490649..864668a7f1d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c @@ -29,8 +29,20 @@ int navi14_reg_base_init(struct amdgpu_device *adev) { - /* HW has more IP blocks, only initialized the blocke needed by driver */ - uint32_t i; + int r, i; + + if (amdgpu_discovery) { + r = amdgpu_discovery_reg_base_init(adev); + if (r) { + DRM_WARN("failed to init reg base from ip discovery table, " + "fallback to legacy init method\n"); + goto legacy_init; + } + + return 0; + } + +legacy_init: for (i = 0 ; i < MAX_INSTANCE ; ++i) { adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); @@ -49,5 +61,6 @@ int navi14_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); } + return 0; } -- cgit v1.2.3 From c8ff09bf41f851e6e9bb2a9f8353f6c78f80f3c1 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 25 Dec 2018 14:44:23 +0800 Subject: drm/amdgpu: increase max instance number for hw ip max instance number is 6 for navi10 and 7 for navi14, and we increase the reg_offset array size to avoid out-of-bound access Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8199d201b43a..ca82fef421e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -731,7 +731,7 @@ enum amd_hw_ip_block_type { MAX_HWIP }; -#define HWIP_MAX_INSTANCE 6 +#define HWIP_MAX_INSTANCE 7 struct amd_powerplay { void *pp_handle; -- cgit v1.2.3 From 05d72b8d36bceed5142f676b592d1a35fc23f584 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:19:42 +0800 Subject: drm/amdgpu/gmc10: add navi14 support same as navi10 Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5eeb72fcc123..8a1e23c6eee6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -524,6 +524,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) if (amdgpu_gart_size == -1) { switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: default: adev->gmc.gart_size = 512ULL << 20; break; @@ -601,9 +602,10 @@ static int gmc_v10_0_sw_init(void *handle) adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: /* * To fulfill 4-level page support, - * vm size is 256TB (48bit), maximum size of Navi10, + * vm size is 256TB (48bit), maximum size of Navi10/Navi14, * block size 512 (9bit) */ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); @@ -717,6 +719,7 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: break; default: break; -- cgit v1.2.3 From 6041f2a281e50728390da740ae9ef78bf579911e Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:05:32 +0800 Subject: drm/amdgpu/sdma5: add support for navi14 firmware Add support for navi14 sdma firmware files. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 3747c3f1f0cc..1be7f3e4d650 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -42,6 +42,9 @@ MODULE_FIRMWARE("amdgpu/navi10_sdma.bin"); MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/navi14_sdma.bin"); +MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin"); + #define SDMA1_REG_OFFSET 0x600 #define SDMA0_HYP_DEC_REG_START 0x5880 #define SDMA0_HYP_DEC_REG_END 0x5893 @@ -143,6 +146,9 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) case CHIP_NAVI10: chip_name = "navi10"; break; + case CHIP_NAVI14: + chip_name = "navi14"; + break; default: BUG(); } -- cgit v1.2.3 From 06823925ad6b4fe14e652e3a386ac1472f371fa9 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:07:22 +0800 Subject: drm/amdgpu/sdma5: add placeholder for navi14 golden settings To be filled in once they are available. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 1be7f3e4d650..caf34dd3c573 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -85,6 +85,9 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { }; +static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { +}; + static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) { u32 base; @@ -114,6 +117,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_nv10, (const u32)ARRAY_SIZE(golden_settings_sdma_nv10)); break; + case CHIP_NAVI14: + soc15_program_register_sequence(adev, + golden_settings_sdma_5, + (const u32)ARRAY_SIZE(golden_settings_sdma_5)); + soc15_program_register_sequence(adev, + golden_settings_sdma_nv14, + (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); + break; default: break; } -- cgit v1.2.3 From c049af3e850be62d3add8c902a9f5c45dcae8373 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 12 Feb 2019 02:58:06 +0000 Subject: drm/amdgpu/sdma5: add sdma5_0 golden settings for navi14 Add settings for navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Snow Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index caf34dd3c573..9ef36eb3ecfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -62,7 +62,6 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), @@ -74,7 +73,6 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), @@ -83,9 +81,13 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { }; static const struct soc15_reg_golden golden_settings_sdma_nv10[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000), }; static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), }; static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) -- cgit v1.2.3 From a994b742b89df02f15193f4074f6a43e026262cb Mon Sep 17 00:00:00 2001 From: tiancyin Date: Tue, 21 May 2019 14:43:48 +0800 Subject: drm/amdgpu/sdma5: update sdma5 golden settings for navi14 add new registers: mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL Reviewed-by: Xiaojie Yuan Signed-off-by: tiancyin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 9ef36eb3ecfd..dbfb1845297d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -62,6 +62,7 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), @@ -73,6 +74,7 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), -- cgit v1.2.3 From 9571710f0c4e4ad7ef98b1ae491fe3009e827b1a Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 22 Mar 2019 13:14:25 +0800 Subject: drm/amdgpu/sdma5: set clock gating for navi14 same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index dbfb1845297d..89893261f145 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1493,6 +1493,7 @@ static int sdma_v5_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: sdma_v5_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); sdma_v5_0_update_medium_grain_light_sleep(adev, -- cgit v1.2.3 From 654bcee0e4ea837ce661b4c305ade229f757093e Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:08:28 +0800 Subject: drm/amdgpu/gfx10: add support for navi14 firmware Add support for navi14 CP firmware files. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 1675d5837c3c..d61d70e0151c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -63,6 +63,13 @@ MODULE_FIRMWARE("amdgpu/navi10_mec.bin"); MODULE_FIRMWARE("amdgpu/navi10_mec2.bin"); MODULE_FIRMWARE("amdgpu/navi10_rlc.bin"); +MODULE_FIRMWARE("amdgpu/navi14_ce.bin"); +MODULE_FIRMWARE("amdgpu/navi14_pfp.bin"); +MODULE_FIRMWARE("amdgpu/navi14_me.bin"); +MODULE_FIRMWARE("amdgpu/navi14_mec.bin"); +MODULE_FIRMWARE("amdgpu/navi14_mec2.bin"); +MODULE_FIRMWARE("amdgpu/navi14_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), @@ -481,6 +488,9 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) case CHIP_NAVI10: chip_name = "navi10"; break; + case CHIP_NAVI14: + chip_name = "navi14"; + break; default: BUG(); } -- cgit v1.2.3 From 47b67bd7d426f67d595ec2af358fd8ecad91ab61 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 16 Jul 2019 13:22:04 -0500 Subject: drm/amdgpu/gfx10: add placeholder for navi14 golden settings To be filled in once available. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d61d70e0151c..0741ae36e661 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -116,6 +116,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = +{ + /* Pending on emulation bring up */ +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -257,6 +262,14 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_0_nv10, (const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10)); break; + case CHIP_NAVI14: + soc15_program_register_sequence(adev, + golden_settings_gc_10_1, + (const u32)ARRAY_SIZE(golden_settings_gc_10_1)); + soc15_program_register_sequence(adev, + golden_settings_gc_10_1_nv14, + (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); + break; default: break; } -- cgit v1.2.3 From 4bd80a4663c2b20cc92752ac73794458a86d1dab Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:21:35 +0800 Subject: drm/amdgpu/gfx10: add gfx config for navi14 Add gfx config details for navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 0741ae36e661..0767827b8509 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1056,6 +1056,14 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); break; + case CHIP_NAVI14: + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); + break; default: BUG(); break; -- cgit v1.2.3 From 41957a8ea9e0fb2d57b9d3b429f197ad96d42ac1 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:22:16 +0800 Subject: drm/amdgpu/gfx10: add clockgating support for navi14 Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 0767827b8509..18f302054c3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4078,6 +4078,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: gfx_v10_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; -- cgit v1.2.3 From 0c090023c639577e1a00438e21a112cffb3843d9 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 25 Dec 2018 14:45:21 +0800 Subject: drm/amdgpu: add me/mec configurations for navi14 Add navi14 to appropriate cases. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 18f302054c3d..df07643220c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1164,6 +1164,7 @@ static int gfx_v10_0_sw_init(void *handle) switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 2; adev->gfx.me.num_queue_per_pipe = 1; -- cgit v1.2.3 From e938ded64810151b99717b635219dbbc201970f1 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 25 Dec 2018 14:45:57 +0800 Subject: drm/amdgpu: set rlc funcs for navi14 Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index df07643220c9..913f76864136 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -5120,6 +5120,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs; break; default: -- cgit v1.2.3 From d55c193dbd43c8e548c93b69cda7f5a60e70c596 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 29 Jan 2019 22:36:15 +0800 Subject: drm/amdgpu/gfx10: set tcp harvest for navi14 Update settings for navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 913f76864136..225be8973536 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1485,6 +1485,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) /* * GCRD_TARGETS_DISABLE field contains * for Navi10: GL1C=[18:15], SQC=[14:10], TCP=[9:0] + * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0] */ u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask( 2 * max_wgp_per_sh + /* TCP */ @@ -1493,6 +1494,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) /* * UTCL1_UTCL0_INVREQ_DISABLE field contains * for Navi10: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0] + * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0] */ u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask( 2 * max_wgp_per_sh + /* TCP */ @@ -1500,7 +1502,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) 4 + /* RMI */ 1); /* SQG */ - if (adev->asic_type == CHIP_NAVI10) { + if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_NAVI14) { mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { -- cgit v1.2.3 From 58acab6629e114733dd1e2664b1be24647f5f536 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 14 Feb 2019 09:06:10 +0800 Subject: drm/amdgpu/gfx: add definition of mmCGTT_GS_NGG_CLK_CTRL Needed for clockgating. Signed-off-by: Xiaojie Yuan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 225be8973536..37eb5f3cae19 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -56,6 +56,9 @@ #define F32_CE_PROGRAM_RAM_SIZE 65536 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L +#define mmCGTT_GS_NGG_CLK_CTRL 0x5087 +#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1 + MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); MODULE_FIRMWARE("amdgpu/navi10_me.bin"); -- cgit v1.2.3 From 7417846725e9bd4801c5434600d233fa93e2bea4 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 12 Feb 2019 02:56:57 +0000 Subject: drm/amdgpu/gfx10: add gfx v10_1_1 golden settings for navi14 Add golden settings for navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Snow Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 42 ++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 37eb5f3cae19..d2ca4af5fcf5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -119,6 +119,44 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000002, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000043), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x13203120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x13203120), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_SX_EXPORT_BUFFER_SIZES, 0xffffffff, 0x08000400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_SX_SCOREBOARD_BUFFER_SIZES, 0xffffffff, 0x00400040), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00000000), +}; + static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ @@ -267,8 +305,8 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) break; case CHIP_NAVI14: soc15_program_register_sequence(adev, - golden_settings_gc_10_1, - (const u32)ARRAY_SIZE(golden_settings_gc_10_1)); + golden_settings_gc_10_1_1, + (const u32)ARRAY_SIZE(golden_settings_gc_10_1_1)); soc15_program_register_sequence(adev, golden_settings_gc_10_1_nv14, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); -- cgit v1.2.3 From be184b4ccc92be23de8ca8bce94d41c3f706121a Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 14 Feb 2019 07:12:00 +0800 Subject: drm/amdgpu/gfx: update gc_v10_1_1 golden setting Updated settings for hw team. Signed-off-by: Xiaojie Yuan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d2ca4af5fcf5..85641336423f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -145,7 +145,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00002188), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), -- cgit v1.2.3 From 4904ede127925df6225b1d96621c0b9e83a74f59 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 29 Mar 2019 19:46:46 +0800 Subject: drm/amdgpu/gfx10: update gfx golden settings for navi14 Add updated settings from hw team. Signed-off-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 85641336423f..648d612d4347 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -124,6 +124,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), @@ -133,6 +134,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), @@ -144,14 +146,14 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x13203120), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00002188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_SX_EXPORT_BUFFER_SIZES, 0xffffffff, 0x08000400), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_SX_SCOREBOARD_BUFFER_SIZES, 0xffffffff, 0x00400040), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00000000), -- cgit v1.2.3 From 3ddec51511fc6ba84595565b6f48badabd0e47dc Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 2 Jul 2019 14:20:04 -0500 Subject: drm/amdgpu/gfx10: update gfx golden settings for navi14 Updated settings from hw team. Signed-off-by: Tao Zhou Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 648d612d4347..72762de47dc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -135,28 +135,28 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000200, 0x00000200), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000043), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x13203120), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x13203120), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffe7), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00400000, 0x04440000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000), }; static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = -- cgit v1.2.3 From b8cb98cd3e2b600794585958dbea53d433d86736 Mon Sep 17 00:00:00 2001 From: Jack Xiao Date: Tue, 28 May 2019 13:27:11 +0800 Subject: drm/amdgpu/gfx10: fix programming of SC_HIZ_TILE_FIFO_SIZE field max fifo size is 128 and PA_SC_FIFO_SIZE[20:15]=SC_HIZ_TILE_FIFO_SIZE field is programmed in units of two entries, but 6 bits is insufficient to hold value 128/2 = 64, so set this field as 0 which is interpreted by the hardware as maximum physical fifo size(128). Signed-off-by: Xiaojie Yuan Signed-off-by: Jack Xiao Acked-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 72762de47dc0..0cf7c3faa91f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1103,7 +1103,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x80; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x0; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); break; -- cgit v1.2.3 From 5e71e011ff84cd467a6c255e8ea8dcd77ec442ff Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:23:27 +0800 Subject: drm/amdgpu/soc15: add support for navi14 same as navi10 Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 9f67739165b9..dfc247cb44a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -559,6 +559,11 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_ATHUB; adev->external_rev_id = adev->rev_id + 0x1; break; + case CHIP_NAVI14: + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x1; /* ??? */ + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -751,6 +756,7 @@ static int nv_common_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, -- cgit v1.2.3 From a29bfd1283abddae6a58686a934acc7102cf6778 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 19 Dec 2018 20:39:37 +0800 Subject: drm/amdgpu: add ip blocks for navi14 Add the initial IP blocks for navi14 Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index dfc247cb44a8..81aa154fd449 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -405,6 +405,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->enable_mes) amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); break; + case CHIP_NAVI14: + amdgpu_device_ip_block_add(adev, &nv_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); + break; default: return -EINVAL; } -- cgit v1.2.3 From 691f69b6a6b283e488ac91581640fd4e8fdb8c4a Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 16 Jan 2019 10:23:17 +0800 Subject: drm/amdgpu: enable virtual display for navi14 Virtual display is a sw based kms interface for virtualization and emulation. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 + drivers/gpu/drm/amd/amdgpu/nv.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 3cc0a16649f9..30a08f8ea4bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -458,6 +458,7 @@ static int dce_virtual_hw_init(void *handle) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_NAVI10: + case CHIP_NAVI14: break; default: DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 81aa154fd449..dabe650252b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -409,6 +409,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); break; -- cgit v1.2.3 From 82522b2d7f54a80daecf46a5931ac9bebcb90307 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 8 Jul 2019 14:03:15 -0500 Subject: drm/amdgpu/psp: add psp support for navi14 (v3) Same as navi10. v2: squash in logic fix (Colin Ian King) v3: squash in logic simplification (Alex) Signed-off-by: Xiaojie Yuan Reviewed-by: Snow Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 + drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c027e5e7713e..1f9105a6c050 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -57,6 +57,7 @@ static int psp_early_init(void *handle) psp->autoload_supported = false; break; case CHIP_NAVI10: + case CHIP_NAVI14: psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = true; break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 41b72588adcf..878221afad45 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -43,6 +43,7 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); MODULE_FIRMWARE("amdgpu/navi10_sos.bin"); MODULE_FIRMWARE("amdgpu/navi10_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi14_sos.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -72,6 +73,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_NAVI10: chip_name = "navi10"; break; + case CHIP_NAVI14: + chip_name = "navi14"; + break; default: BUG(); } -- cgit v1.2.3 From 4adc0732fceaa2166c3bbd6ff5d8f40b74783cf0 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Sun, 10 Feb 2019 21:45:32 +0000 Subject: drm/amdgpu: enable psp ip block for navi14 Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Snow Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index dabe650252b8..95df863bc8c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -409,6 +409,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); -- cgit v1.2.3 From 7840d8babe71d5bd447465779fd73ff59855d2fa Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 12 Feb 2019 03:12:31 +0000 Subject: drm/amdgpu/psp: start rlc autoload after psp received rlcg for navi14 Update for navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 1f9105a6c050..409725f40802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -998,7 +998,9 @@ out: return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ - if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { + if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM || + (adev->asic_type == CHIP_NAVI14 && + ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); -- cgit v1.2.3 From b02ff126898939a685c9a066b5d80dde3ff1c66f Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 13 Feb 2019 03:44:10 +0800 Subject: drm/amdgpu/smu11: add support for navi14 Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 1 + drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index f1565c448de5..d977d68320c9 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -538,6 +538,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA20: case CHIP_NAVI10: + case CHIP_NAVI14: if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) smu->od_enabled = true; smu_v11_0_set_smu_funcs(smu); diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 95c7c4dae523..0e9eead6ad29 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/vega20_smc.bin"); MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); +MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); #define SMU11_VOLTAGE_SCALE 4 @@ -157,6 +158,9 @@ static int smu_v11_0_init_microcode(struct smu_context *smu) case CHIP_NAVI10: chip_name = "navi10"; break; + case CHIP_NAVI14: + chip_name = "navi14"; + break; default: BUG(); } @@ -1799,6 +1803,7 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu) vega20_set_ppt_funcs(smu); break; case CHIP_NAVI10: + case CHIP_NAVI14: navi10_set_ppt_funcs(smu); break; default: -- cgit v1.2.3 From ab5e51211cbe08843c0f0e796601903c094f6db4 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 13 Feb 2019 03:34:54 +0800 Subject: drm/amdgpu: enable sw smu ip for navi14 same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 95df863bc8c0..b6deebacbfae 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -410,6 +410,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + if (is_support_sw_smu(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); -- cgit v1.2.3 From fce651e393a3d130dfb2e34de6afc9afc565b2f2 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Tue, 2 Jul 2019 10:41:40 -0500 Subject: drm/amd/display: add nv14 cases to amdgpu_dm Mostly shared with navi10. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4a29f72334d0..08b7149e9b70 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2359,6 +2359,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_RAVEN: #if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: + case CHIP_NAVI14: #endif if (dcn10_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); @@ -2519,6 +2520,11 @@ static int dm_early_init(void *handle) adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; + case CHIP_NAVI14: + adev->mode_info.num_crtc = 5; + adev->mode_info.num_hpd = 5; + adev->mode_info.num_dig = 5; + break; #endif default: DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); @@ -2814,6 +2820,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, adev->asic_type == CHIP_VEGA20 || #if defined(CONFIG_DRM_AMD_DC_DCN2_0) adev->asic_type == CHIP_NAVI10 || + adev->asic_type == CHIP_NAVI14 || #endif adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ -- cgit v1.2.3 From 2ebe177344d79bc61294d4365376978610d316c3 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Tue, 26 Feb 2019 14:38:59 -0500 Subject: drm/amd/display: add NAVI14 in resource construct Change the pipes to 5 if the asic is nv14 This is a temp patch, there was some refactor in the dml part of the code. which is not in this branch. for now this is good, we can implement this properly once we have an updated branch. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 23 ++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d200bc3cec71..293268d33929 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -695,6 +695,16 @@ static const struct dc_plane_cap plane_cap = { .fp16 = 1 } }; +static const struct resource_caps res_cap_nv14 = { + .num_timing_generator = 5, + .num_opp = 5, + .num_video_plane = 5, + .num_audio = 6, + .num_stream_encoder = 5, + .num_pll = 5, + .num_dwb = 1, + .num_ddc = 5, +}; static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, @@ -2878,17 +2888,22 @@ static bool construct( struct irq_service_init_data init_data; ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap_nv10; pool->base.funcs = &dcn20_res_pool_funcs; + if (ASICREV_IS_NAVI14_M(ctx->asic_id.hw_internal_rev)) { + pool->base.res_cap = &res_cap_nv14; + pool->base.pipe_count = 5; + pool->base.mpcc_count = 5; + } else { + pool->base.res_cap = &res_cap_nv10; + pool->base.pipe_count = 6; + pool->base.mpcc_count = 6; + } /************************************************* * Resource + asic cap harcoding * *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = 6; - pool->base.mpcc_count = 6; dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; -- cgit v1.2.3 From 8fceceb69e5370af575d0496f2e25bd035ad69b6 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Tue, 26 Feb 2019 13:38:17 -0500 Subject: drm/amd/display: add dm block enable DC for navi14. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/nv.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b0179094c8dc..6aa3c3e5bd50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2432,6 +2432,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) #endif #if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: + case CHIP_NAVI14: #endif return amdgpu_dc != 0; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index b6deebacbfae..102927c5f8f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -414,6 +414,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); break; -- cgit v1.2.3 From c6e764b40a78b6ffdd70431b03cacc999c069e9d Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Tue, 2 Jul 2019 10:43:55 -0500 Subject: drm/amd/display: add ASICREV defines v2 Add revs for navi10 and 14. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 887e6a8597c4..94b7d5ec155b 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -151,6 +151,14 @@ #define FAMILY_NV 143 /* DCN 2*/ +enum { + NV_NAVI10_P_A0 = 1, + NV_NAVI14_M_A0 = 20, + NV_UNKNOWN = 0xFF +}; + +#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI14_M_A0) +#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) #endif /* -- cgit v1.2.3 From e470d287c3f10d03eef25efecad11791691b737c Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 15 Mar 2019 19:10:47 +0800 Subject: drm/amdgpu: skip to load ta firmware for navi14 Not relevant on navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 878221afad45..f6a22da2b397 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -162,6 +162,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) } break; case CHIP_NAVI10: + case CHIP_NAVI14: break; default: BUG(); -- cgit v1.2.3 From baebcf2e911267b2f5e4cdff3b822d0648ce33c5 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 15 Mar 2019 19:15:21 +0800 Subject: drm/amd/display: skip to load dmcu firmware for navi14 not needed for navi14 at the moment. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 08b7149e9b70..e1871ad3c840 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -809,6 +809,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_NAVI10: + case CHIP_NAVI14: return 0; case CHIP_RAVEN: if (ASICREV_IS_PICASSO(adev->external_rev_id)) -- cgit v1.2.3 From e149a2f6ed2c84d10087e4a4f9bb1e7d8893a965 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Fri, 1 Mar 2019 16:23:55 -0500 Subject: drm/amdgpu: Enable VCN on navi14 Add navi14 vcn firmware, and enable VCN on navi14. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 5 +++++ drivers/gpu/drm/amd/amdgpu/nv.c | 1 + 2 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 2e12eeb314a7..1ff0f9e9df1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -47,11 +47,13 @@ #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" +#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); MODULE_FIRMWARE(FIRMWARE_RAVEN2); MODULE_FIRMWARE(FIRMWARE_NAVI10); +MODULE_FIRMWARE(FIRMWARE_NAVI14); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -80,6 +82,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; + case CHIP_NAVI14: + fw_name = FIRMWARE_NAVI14; + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 102927c5f8f2..eadf86b5f0f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -418,6 +418,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); break; default: return -EINVAL; -- cgit v1.2.3 From 8687b47e3ae0fade2b37ab2653a0eb66a5db9e59 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 20 Mar 2019 12:37:45 +0800 Subject: drm/amdgpu: declare asd firmware for navi14 So the dependency gets properly tracked. Signed-off-by: Xiaojie Yuan Reviewed-by: Snow Zhang Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index f6a22da2b397..2ccd9489a41d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -44,6 +44,7 @@ MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); MODULE_FIRMWARE("amdgpu/navi10_sos.bin"); MODULE_FIRMWARE("amdgpu/navi10_asd.bin"); MODULE_FIRMWARE("amdgpu/navi14_sos.bin"); +MODULE_FIRMWARE("amdgpu/navi14_asd.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 -- cgit v1.2.3 From 408c49de9b8858a6bad725b6d123b0ea22f38df3 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 22 Mar 2019 13:03:01 +0800 Subject: drm/amdgpu/mmhub2: set clock gating for navi14 same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 0f9549f19ade..a5c7ed1f37eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -406,6 +406,7 @@ int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: mmhub_v2_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); mmhub_v2_0_update_medium_grain_light_sleep(adev, -- cgit v1.2.3 From 29e6fd7c86bf13840be40ca507eae25ab1a860d5 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 22 Mar 2019 13:10:03 +0800 Subject: drm/amdgpu/athub2: set clock gating for navi14 same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/athub_v2_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index 89b32b6b81c8..7e6c0bc3e8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -74,6 +74,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: athub_v2_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); athub_v2_0_update_medium_grain_light_sleep(adev, -- cgit v1.2.3 From d0c39f8cbfaa4bd599328920d7d13541fd9bbb10 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 20 Mar 2019 16:12:54 +0800 Subject: drm/amdgpu: enable clock gatings for navi14 Set appropriate CG flags for navi14. Signed-off-by: Xiaojie Yuan Acked-by: Alex Deucher Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index eadf86b5f0f0..9e5a01663354 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -575,7 +575,20 @@ static int nv_common_early_init(void *handle) adev->external_rev_id = adev->rev_id + 0x1; break; case CHIP_NAVI14: - adev->cg_flags = 0; + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_ATHUB_MGCG | + AMD_CG_SUPPORT_ATHUB_LS | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x1; /* ??? */ break; -- cgit v1.2.3 From 5e0f378d8afb8d2f62a01933b43a296aa117adea Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 28 Mar 2019 16:43:16 +0800 Subject: drm/amdgpu: enable async gfx ring for navi14 Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 74066e1466f7..f9bef3154b99 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -389,7 +389,7 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); } - if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) { + if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { /* create MQD for each KGQ */ for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; @@ -437,7 +437,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) struct amdgpu_ring *ring = NULL; int i; - if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) { + if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; kfree(adev->gfx.me.mqd_backup[i]); @@ -456,7 +456,7 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) } ring = &adev->gfx.kiq.ring; - if (adev->asic_type == CHIP_NAVI10 && amdgpu_async_gfx_ring) + if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]); kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, -- cgit v1.2.3 From c5cc14e34d62cac69d39a556c1b4761099404dfe Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 18 Apr 2019 17:46:17 +0800 Subject: drm/amd/display: disable display writeback for navi14 not used. Signed-off-by: Xiaojie Yuan Acked-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 293268d33929..b537536d6488 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -702,7 +702,7 @@ static const struct resource_caps res_cap_nv14 = { .num_audio = 6, .num_stream_encoder = 5, .num_pll = 5, - .num_dwb = 1, + .num_dwb = 0, .num_ddc = 5, }; -- cgit v1.2.3 From 03917df7e510b4987c0b5bf7e05edf356c03e939 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 2 Jul 2019 12:49:41 -0500 Subject: drm/amdgpu/nv: set vcn pg flag for navi14 Enable VCN power gating by default. Signed-off-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 9e5a01663354..3813ed6b9e79 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -589,7 +589,7 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; - adev->pg_flags = 0; + adev->pg_flags = AMD_PG_SUPPORT_VCN; adev->external_rev_id = adev->rev_id + 0x1; /* ??? */ break; default: -- cgit v1.2.3 From e017bb8035ce890ff5a4cded2e8b91b814fb7b21 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 13 May 2019 17:02:12 +0800 Subject: drm/amd/powerplay: disable gfxoff for navi14 gfxoff doesn't work on navi14 yet, so disable it for now Signed-off-by: Xiaojie Yuan Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index d977d68320c9..1ac9db3aa5bc 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -559,6 +559,9 @@ static int smu_early_init(void *handle) smu->pm_enabled = !!amdgpu_dpm; mutex_init(&smu->mutex); + if (adev->asic_type == CHIP_NAVI14) + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + return smu_set_funcs(adev); } -- cgit v1.2.3 From 0377b08823418e418bcd50d950d2baaeb8b7a1aa Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 2 Jul 2019 12:52:52 -0500 Subject: drm/amdgpu/vcn: enable indirect DPG SRAM mode for navi14 Enable VCN dynamic powergating for navi14. Signed-off-by: Xiaojie Yuan Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 3 +++ drivers/gpu/drm/amd/amdgpu/nv.c | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 1ff0f9e9df1e..3e1a360dee54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -84,6 +84,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) break; case CHIP_NAVI14: fw_name = FIRMWARE_NAVI14; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 3813ed6b9e79..800321f99857 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -589,7 +589,8 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS; - adev->pg_flags = AMD_PG_SUPPORT_VCN; + adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x1; /* ??? */ break; default: -- cgit v1.2.3 From ba02636de54e7c2f8d549401ce9c9f508a05ef7a Mon Sep 17 00:00:00 2001 From: Jack Xiao Date: Fri, 5 Jul 2019 16:00:08 -0500 Subject: drm/amdgpu: enable gfxoff code path for navi14 Based on navi10 gfxoff logic, enable the related code path for navi14. Signed-off-by: Jack Xiao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 +--- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 3 --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 1 + 4 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 409725f40802..1f9105a6c050 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -998,9 +998,7 @@ out: return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ - if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM || - (adev->asic_type == CHIP_NAVI14 && - ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) { + if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 0cf7c3faa91f..ec11bfded772 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4105,6 +4105,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, bool enable = (state == AMD_PG_STATE_GATE) ? true : false; switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: if (!enable) { amdgpu_gfx_off_ctrl(adev, false); cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 1ac9db3aa5bc..d977d68320c9 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -559,9 +559,6 @@ static int smu_early_init(void *handle) smu->pm_enabled = !!amdgpu_dpm; mutex_init(&smu->mutex); - if (adev->asic_type == CHIP_NAVI14) - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - return smu_set_funcs(adev); } diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 0e9eead6ad29..1315958e5d81 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1359,6 +1359,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case CHIP_VEGA20: break; case CHIP_NAVI10: + case CHIP_NAVI14: if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; mutex_lock(&smu->mutex); -- cgit v1.2.3 From 2665ec41718a8514c85f2becf83c58b8c5041ed6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 2 Jul 2019 14:35:36 -0500 Subject: drm/amdgpu: disable concurrent flushes on Navi14 Same thing applies to navi14 as navi10. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 57b3d8a9bef3..529065b83885 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -368,7 +368,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, * are broken on Navi10 and Navi14. */ if (needs_flush && (adev->asic_type < CHIP_VEGA10 || - adev->asic_type == CHIP_NAVI10)) + adev->asic_type == CHIP_NAVI10 || + adev->asic_type == CHIP_NAVI14)) continue; /* Good, we can use this VMID. Remember this submission as -- cgit v1.2.3 From d1daf8502e890dc4fc3fc17b57ae106397ad316c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 2 Jul 2019 14:42:25 -0500 Subject: drm/amdgpu: consolidate navi14 IP init It's the same as navi10. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 800321f99857..10ec0e81ee58 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -383,6 +383,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_NAVI10: + case CHIP_NAVI14: amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); @@ -405,21 +406,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->enable_mes) amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); break; - case CHIP_NAVI14: - amdgpu_device_ip_block_add(adev, &nv_common_ip_block); - amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); - if (is_support_sw_smu(adev)) - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); - else if (amdgpu_device_has_dc_support(adev)) - amdgpu_device_ip_block_add(adev, &dm_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); - break; default: return -EINVAL; } -- cgit v1.2.3 From 0e96cf7f676972b1154c4c2b68e878266a405a84 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 29 Aug 2018 16:49:19 +0800 Subject: drm/amdgpu: add mmhub 9.4.1 header files for Acrturus mmhub is the GPU memory hub used by SDMA and VCN. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../include/asic_reg/mmhub/mmhub_9_4_1_default.h | 3933 ++ .../include/asic_reg/mmhub/mmhub_9_4_1_offset.h | 7753 ++++ .../include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h | 44884 +++++++++++++++++++ 3 files changed, 56570 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h new file mode 100644 index 000000000000..ec631c816d18 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h @@ -0,0 +1,3933 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _mmhub_9_4_1_DEFAULT_HEADER +#define _mmhub_9_4_1_DEFAULT_HEADER + + +// addressBlock: mmhub_dagb_dagbdec0 +#define mmDAGB0_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB0_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB0_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB0_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB0_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB0_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB0_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB0_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB0_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB0_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB0_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB0_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB0_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB0_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB0_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB0_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB0_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB0_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB0_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB0_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB0_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB0_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB0_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB0_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB0_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB0_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB0_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB0_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB0_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB0_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB0_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB0_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB0_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_dagb_dagbdec1 +#define mmDAGB1_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB1_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB1_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB1_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB1_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB1_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB1_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB1_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB1_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB1_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB1_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB1_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB1_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB1_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB1_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB1_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB1_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB1_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB1_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB1_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB1_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB1_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB1_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB1_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB1_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB1_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB1_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB1_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB1_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB1_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB1_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB1_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_dagb_dagbdec2 +#define mmDAGB2_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB2_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB2_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB2_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB2_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB2_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB2_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB2_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB2_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB2_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB2_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB2_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB2_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB2_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB2_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB2_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB2_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB2_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB2_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB2_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB2_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB2_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB2_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB2_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB2_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB2_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB2_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB2_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB2_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB2_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB2_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB2_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB2_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB2_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB2_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB2_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB2_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB2_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB2_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB2_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_dagb_dagbdec3 +#define mmDAGB3_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB3_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB3_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB3_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB3_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB3_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB3_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB3_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB3_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB3_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB3_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB3_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB3_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB3_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB3_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB3_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB3_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB3_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB3_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB3_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB3_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB3_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB3_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB3_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB3_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB3_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB3_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB3_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB3_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB3_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB3_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB3_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB3_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB3_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB3_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB3_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB3_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB3_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB3_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB3_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB3_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_dagb_dagbdec4 +#define mmDAGB4_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB4_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB4_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB4_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB4_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB4_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB4_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB4_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB4_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB4_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB4_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB4_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB4_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB4_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB4_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB4_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB4_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB4_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB4_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB4_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB4_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB4_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB4_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB4_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB4_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB4_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB4_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB4_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB4_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB4_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB4_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB4_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB4_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB4_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB4_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB4_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB4_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB4_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB4_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB4_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB4_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_ea_mmeadec0 +#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA0_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA0_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA0_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA0_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA0_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA0_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA0_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA0_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA0_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA0_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA0_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA0_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA0_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA0_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA0_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA0_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA0_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA0_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA0_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA0_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA0_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA0_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA0_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA0_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA0_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA0_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA0_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA0_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA0_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA0_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA0_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA0_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA0_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA0_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA0_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA0_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA0_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA0_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA0_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA0_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA0_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA0_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA0_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA0_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA0_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA0_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA0_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA0_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA0_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA0_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA0_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA0_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA0_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA0_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA0_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA0_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA0_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA0_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA0_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA0_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA0_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA0_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA0_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA0_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA0_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA0_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA0_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA0_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA0_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA0_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA0_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA0_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA0_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA0_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA0_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA0_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA0_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA0_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA0_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA0_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA0_MISC_DEFAULT 0x0c00a070 +#define mmMMEA0_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA0_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA0_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA0_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA0_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA0_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA0_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA0_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA0_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA0_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA0_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA0_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA0_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA0_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA0_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA0_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA0_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA0_MISC2_DEFAULT 0x00000000 +#define mmMMEA0_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA0_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_ea_mmeadec1 +#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA1_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA1_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA1_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA1_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA1_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA1_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA1_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA1_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA1_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA1_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA1_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA1_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA1_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA1_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA1_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA1_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA1_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA1_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA1_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA1_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA1_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA1_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA1_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA1_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA1_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA1_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA1_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA1_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA1_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA1_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA1_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA1_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA1_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA1_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA1_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA1_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA1_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA1_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA1_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA1_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA1_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA1_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA1_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA1_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA1_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA1_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA1_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA1_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA1_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA1_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA1_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA1_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA1_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA1_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA1_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA1_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA1_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA1_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA1_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA1_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA1_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA1_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA1_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA1_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA1_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA1_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA1_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA1_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA1_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA1_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA1_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA1_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA1_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA1_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA1_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA1_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA1_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA1_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA1_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA1_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA1_MISC_DEFAULT 0x0c00a070 +#define mmMMEA1_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA1_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA1_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA1_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA1_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA1_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA1_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA1_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA1_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA1_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA1_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA1_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA1_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA1_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA1_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA1_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA1_MISC2_DEFAULT 0x00000000 +#define mmMMEA1_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA1_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_ea_mmeadec2 +#define mmMMEA2_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA2_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA2_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA2_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA2_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA2_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA2_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA2_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA2_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA2_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA2_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA2_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA2_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA2_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA2_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA2_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA2_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA2_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA2_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA2_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA2_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA2_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA2_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA2_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA2_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA2_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA2_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA2_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA2_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA2_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA2_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA2_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA2_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA2_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA2_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA2_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA2_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA2_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA2_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA2_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA2_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA2_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA2_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA2_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA2_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA2_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA2_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA2_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA2_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA2_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA2_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA2_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA2_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA2_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA2_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA2_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA2_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA2_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA2_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA2_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA2_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA2_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA2_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA2_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA2_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA2_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA2_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA2_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA2_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA2_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA2_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA2_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA2_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA2_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA2_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA2_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA2_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA2_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA2_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA2_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA2_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA2_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA2_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA2_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA2_MISC_DEFAULT 0x0c00a070 +#define mmMMEA2_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA2_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA2_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA2_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA2_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA2_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA2_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA2_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA2_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA2_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA2_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA2_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA2_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA2_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA2_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA2_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA2_MISC2_DEFAULT 0x00000000 +#define mmMMEA2_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA2_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_ea_mmeadec3 +#define mmMMEA3_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA3_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA3_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA3_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA3_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA3_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA3_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA3_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA3_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA3_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA3_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA3_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA3_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA3_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA3_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA3_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA3_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA3_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA3_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA3_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA3_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA3_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA3_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA3_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA3_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA3_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA3_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA3_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA3_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA3_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA3_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA3_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA3_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA3_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA3_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA3_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA3_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA3_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA3_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA3_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA3_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA3_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA3_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA3_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA3_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA3_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA3_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA3_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA3_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA3_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA3_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA3_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA3_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA3_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA3_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA3_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA3_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA3_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA3_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA3_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA3_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA3_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA3_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA3_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA3_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA3_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA3_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA3_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA3_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA3_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA3_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA3_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA3_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA3_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA3_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA3_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA3_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA3_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA3_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA3_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA3_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA3_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA3_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA3_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA3_MISC_DEFAULT 0x0c00a070 +#define mmMMEA3_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA3_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA3_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA3_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA3_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA3_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA3_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA3_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA3_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA3_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA3_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA3_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA3_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA3_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA3_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA3_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA3_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA3_MISC2_DEFAULT 0x00000000 +#define mmMMEA3_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA3_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_ea_mmeadec4 +#define mmMMEA4_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA4_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA4_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA4_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA4_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA4_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA4_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA4_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA4_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA4_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA4_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA4_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA4_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA4_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA4_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA4_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA4_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA4_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA4_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA4_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA4_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA4_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA4_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA4_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA4_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA4_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA4_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA4_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA4_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA4_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA4_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA4_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA4_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA4_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA4_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA4_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA4_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA4_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA4_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA4_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA4_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA4_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA4_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA4_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA4_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA4_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA4_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA4_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA4_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA4_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA4_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA4_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA4_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA4_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA4_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA4_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA4_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA4_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA4_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA4_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA4_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA4_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA4_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA4_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA4_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA4_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA4_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA4_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA4_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA4_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA4_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA4_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA4_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA4_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA4_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA4_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA4_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA4_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA4_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA4_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA4_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA4_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA4_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA4_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA4_MISC_DEFAULT 0x0c00a070 +#define mmMMEA4_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA4_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA4_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA4_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA4_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA4_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA4_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA4_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA4_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA4_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA4_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA4_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA4_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA4_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA4_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA4_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA4_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA4_MISC2_DEFAULT 0x00000000 +#define mmMMEA4_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA4_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_pctldec0 +#define mmPCTL0_CTRL_DEFAULT 0x00011040 +#define mmPCTL0_MMHUB_DEEPSLEEP_IB_DEFAULT 0x00000000 +#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_DEFAULT 0x00000000 +#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB_DEFAULT 0x00000000 +#define mmPCTL0_PG_IGNORE_DEEPSLEEP_DEFAULT 0x00000000 +#define mmPCTL0_PG_IGNORE_DEEPSLEEP_IB_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_MISC_DEFAULT 0x00011000 +#define mmPCTL0_SLICE0_MISC_DEFAULT 0x00000800 +#define mmPCTL0_SLICE1_MISC_DEFAULT 0x00000800 +#define mmPCTL0_SLICE2_MISC_DEFAULT 0x00000800 +#define mmPCTL0_SLICE3_MISC_DEFAULT 0x00000800 +#define mmPCTL0_SLICE4_MISC_DEFAULT 0x00000800 +#define mmPCTL0_UTCL2_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff + + +// addressBlock: mmhub_l1tlb_vml1dec +#define mmVML1_0_MC_VM_MX_L1_TLB0_STATUS_DEFAULT 0x00000000 +#define mmVML1_0_MC_VM_MX_L1_TLB1_STATUS_DEFAULT 0x00000000 +#define mmVML1_0_MC_VM_MX_L1_TLB2_STATUS_DEFAULT 0x00000000 +#define mmVML1_0_MC_VM_MX_L1_TLB3_STATUS_DEFAULT 0x00000000 +#define mmVML1_0_MC_VM_MX_L1_TLB4_STATUS_DEFAULT 0x00000000 +#define mmVML1_0_MC_VM_MX_L1_TLB5_STATUS_DEFAULT 0x00000000 +#define mmVML1_0_MC_VM_MX_L1_TLB6_STATUS_DEFAULT 0x00000000 +#define mmVML1_0_MC_VM_MX_L1_TLB7_STATUS_DEFAULT 0x00000000 + + +// addressBlock: mmhub_l1tlb_vml1pldec +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG_DEFAULT 0x00000000 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 + + +// addressBlock: mmhub_l1tlb_vml1prdec +#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_atcl2dec +#define mmATCL2_0_ATC_L2_CNTL_DEFAULT 0x0001c0c9 +#define mmATCL2_0_ATC_L2_CNTL2_DEFAULT 0x00600100 +#define mmATCL2_0_ATC_L2_CACHE_DATA0_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_CACHE_DATA1_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_CACHE_DATA2_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_CNTL3_DEFAULT 0x000001f8 +#define mmATCL2_0_ATC_L2_STATUS_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_STATUS2_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_STATUS3_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_MISC_CG_DEFAULT 0x00000200 +#define mmATCL2_0_ATC_L2_MEM_POWER_LS_DEFAULT 0x00000208 +#define mmATCL2_0_ATC_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080 +#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_CNTL4_DEFAULT 0x00000000 +#define mmATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000005 + + +// addressBlock: mmhub_utcl2_vml2pfdec +#define mmVML2PF0_VM_L2_CNTL_DEFAULT 0x00080602 +#define mmVML2PF0_VM_L2_CNTL2_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CNTL3_DEFAULT 0x80100007 +#define mmVML2PF0_VM_L2_STATUS_DEFAULT 0x00000000 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_CNTL_DEFAULT 0x00000090 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL_DEFAULT 0x3ffffffc +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2_DEFAULT 0x000a0000 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3_DEFAULT 0xffffffff +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4_DEFAULT 0xffffffff +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CNTL4_DEFAULT 0x000000c1 +#define mmVML2PF0_VM_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CACHE_PARITY_CNTL_DEFAULT 0x00000000 +#define mmVML2PF0_VM_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080 + + +// addressBlock: mmhub_utcl2_vml2vcdec +#define mmVML2VC0_VM_CONTEXT0_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT1_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT2_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT3_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT4_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT5_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT6_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT7_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT8_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT9_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT10_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT11_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT12_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT13_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT14_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXT15_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC0_VM_CONTEXTS_DISABLE_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG0_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG1_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG2_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG3_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG4_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG5_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG6_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG7_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG8_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG9_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG10_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG11_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG12_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG13_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG14_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG15_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG16_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG17_SEM_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG0_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG1_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG2_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG3_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG4_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG5_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG6_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG7_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG8_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG9_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG10_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG11_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG12_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG13_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG14_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG15_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG16_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG17_REQ_DEFAULT 0x017c0000 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG2_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG3_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG9_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG10_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG11_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ACK_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_vmsharedpfdec +#define mmVMSHAREDPF0_MC_VM_NB_MMIOBASE_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_NB_MMIOLIMIT_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_NB_PCI_CTRL_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_NB_PCI_ARB_DEFAULT 0x00000008 +#define mmVMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_FB_OFFSET_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_STEERING_DEFAULT 0x00000001 +#define mmVMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_MEM_POWER_LS_DEFAULT 0x00000208 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_APT_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END_DEFAULT 0x000fffff +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_DEFAULT 0x00000000 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_vmsharedvcdec +#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE_DEFAULT 0x00000000 +#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP_DEFAULT 0x00000000 +#define mmVMSHAREDVC0_MC_VM_AGP_TOP_DEFAULT 0x00000000 +#define mmVMSHAREDVC0_MC_VM_AGP_BOT_DEFAULT 0x00000000 +#define mmVMSHAREDVC0_MC_VM_AGP_BASE_DEFAULT 0x00000000 +#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR_DEFAULT 0x00000000 +#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_DEFAULT 0x00000000 +#define mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL_DEFAULT 0x00002501 + + +// addressBlock: mmhub_utcl2_vmsharedhvdec +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1_DEFAULT 0x00000100 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_UTCL2_CGTT_CLK_CTRL_DEFAULT 0x00000080 +#define mmVMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID_DEFAULT 0x00000000 +#define mmVMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_atcl2pfcntrdec +#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_atcl2pfcntldec +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 + + +// addressBlock: mmhub_utcl2_vml2pldec +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER3_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER4_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER5_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER6_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER7_CFG_DEFAULT 0x00000000 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 + + +// addressBlock: mmhub_utcl2_vml2prdec +#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_HI_DEFAULT 0x00000000 + + +// addressBlock: mmhub_dagb_dagbdec5 +#define mmDAGB5_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB5_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB5_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB5_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB5_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB5_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB5_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB5_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB5_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB5_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB5_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB5_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB5_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB5_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB5_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB5_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB5_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB5_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB5_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB5_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB5_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB5_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB5_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB5_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB5_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB5_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB5_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB5_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB5_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB5_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB5_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB5_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB5_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB5_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB5_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB5_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB5_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB5_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB5_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB5_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB5_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB5_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB5_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_dagb_dagbdec6 +#define mmDAGB6_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB6_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB6_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB6_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB6_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB6_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB6_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB6_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB6_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB6_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB6_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB6_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB6_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB6_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB6_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB6_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB6_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB6_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB6_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB6_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB6_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB6_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB6_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB6_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB6_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB6_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB6_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB6_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB6_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB6_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB6_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB6_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB6_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB6_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB6_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB6_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB6_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB6_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB6_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB6_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB6_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB6_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB6_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_dagb_dagbdec7 +#define mmDAGB7_RDCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RDCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_RD_CNTL_DEFAULT 0x03527df8 +#define mmDAGB7_RD_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB7_RD_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB7_RD_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB7_RD_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB7_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB7_L1TLB_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB7_ATCVM_RD_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB7_RD_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_RD_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB7_RD_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB7_RDCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_RDCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_RDCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_RDCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_RDCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_RDCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI0_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI1_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI2_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI3_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI4_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI5_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI6_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI7_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI8_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI9_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI10_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI11_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI12_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI13_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI14_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WRCLI15_DEFAULT 0xfe5fe0f9 +#define mmDAGB7_WR_CNTL_DEFAULT 0x03527df8 +#define mmDAGB7_WR_GMI_CNTL_DEFAULT 0x00003045 +#define mmDAGB7_WR_ADDR_DAGB_DEFAULT 0x00000039 +#define mmDAGB7_WR_OUTPUT_DAGB_MAX_BURST_DEFAULT 0x88888888 +#define mmDAGB7_WR_OUTPUT_DAGB_LAZY_TIMER_DEFAULT 0x11111111 +#define mmDAGB7_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB7_L1TLB_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB7_ATCVM_WR_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST0_DEFAULT 0x88888888 +#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER0_DEFAULT 0x11111111 +#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST1_DEFAULT 0x88888888 +#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER1_DEFAULT 0x11111111 +#define mmDAGB7_WR_DATA_DAGB_DEFAULT 0x00000001 +#define mmDAGB7_WR_DATA_DAGB_MAX_BURST0_DEFAULT 0x11111111 +#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER0_DEFAULT 0x00000000 +#define mmDAGB7_WR_DATA_DAGB_MAX_BURST1_DEFAULT 0x11111111 +#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER1_DEFAULT 0x00000000 +#define mmDAGB7_WR_VC0_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_VC1_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_VC2_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_VC3_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_VC4_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_VC5_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_VC6_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_VC7_CNTL_DEFAULT 0xff2ff082 +#define mmDAGB7_WR_CNTL_MISC_DEFAULT 0x69a0e408 +#define mmDAGB7_WR_TLB_CREDIT_DEFAULT 0x2f7bdef7 +#define mmDAGB7_WR_DATA_CREDIT_DEFAULT 0x60606070 +#define mmDAGB7_WR_MISC_CREDIT_DEFAULT 0x0078dc88 +#define mmDAGB7_WRCLI_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI_GBLSEND_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI_TLB_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI_OARB_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI_OSD_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI_DBUS_ASK_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_WRCLI_DBUS_GO_PENDING_DEFAULT 0x00000000 +#define mmDAGB7_DAGB_DLY_DEFAULT 0x00000000 +#define mmDAGB7_CNTL_MISC_DEFAULT 0xcf7c1ffa +#define mmDAGB7_CNTL_MISC2_DEFAULT 0x003c0000 +#define mmDAGB7_FIFO_EMPTY_DEFAULT 0x00ffffff +#define mmDAGB7_FIFO_FULL_DEFAULT 0x00000000 +#define mmDAGB7_WR_CREDITS_FULL_DEFAULT 0x1fffffff +#define mmDAGB7_RD_CREDITS_FULL_DEFAULT 0x0003ffff +#define mmDAGB7_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmDAGB7_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmDAGB7_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmDAGB7_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmDAGB7_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmDAGB7_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmDAGB7_RESERVE0_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE1_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE2_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE3_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE4_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE5_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE6_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE7_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE8_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE9_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE10_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE11_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE12_DEFAULT 0xffffffff +#define mmDAGB7_RESERVE13_DEFAULT 0xffffffff + + +// addressBlock: mmhub_ea_mmeadec5 +#define mmMMEA5_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA5_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA5_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA5_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA5_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA5_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA5_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA5_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA5_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA5_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA5_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA5_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA5_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA5_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA5_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA5_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA5_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA5_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA5_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA5_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA5_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA5_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA5_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA5_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA5_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA5_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA5_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA5_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA5_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA5_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA5_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA5_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA5_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA5_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA5_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA5_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA5_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA5_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA5_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA5_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA5_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA5_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA5_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA5_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA5_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA5_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA5_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA5_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA5_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA5_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA5_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA5_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA5_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA5_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA5_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA5_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA5_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA5_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA5_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA5_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA5_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA5_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA5_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA5_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA5_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA5_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA5_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA5_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA5_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA5_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA5_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA5_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA5_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA5_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA5_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA5_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA5_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA5_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA5_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA5_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA5_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA5_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA5_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA5_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA5_MISC_DEFAULT 0x0c00a070 +#define mmMMEA5_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA5_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA5_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA5_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA5_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA5_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA5_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA5_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA5_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA5_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA5_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA5_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA5_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA5_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA5_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA5_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA5_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA5_MISC2_DEFAULT 0x00000000 +#define mmMMEA5_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA5_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_ea_mmeadec6 +#define mmMMEA6_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA6_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA6_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA6_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA6_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA6_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA6_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA6_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA6_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA6_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA6_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA6_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA6_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA6_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA6_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA6_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA6_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA6_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA6_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA6_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA6_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA6_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA6_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA6_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA6_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA6_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA6_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA6_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA6_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA6_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA6_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA6_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA6_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA6_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA6_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA6_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA6_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA6_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA6_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA6_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA6_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA6_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA6_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA6_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA6_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA6_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA6_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA6_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA6_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA6_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA6_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA6_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA6_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA6_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA6_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA6_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA6_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA6_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA6_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA6_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA6_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA6_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA6_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA6_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA6_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA6_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA6_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA6_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA6_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA6_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA6_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA6_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA6_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA6_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA6_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA6_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA6_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA6_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA6_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA6_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA6_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA6_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA6_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA6_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA6_MISC_DEFAULT 0x0c00a070 +#define mmMMEA6_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA6_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA6_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA6_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA6_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA6_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA6_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA6_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA6_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA6_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA6_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA6_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA6_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA6_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA6_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA6_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA6_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA6_MISC2_DEFAULT 0x00000000 +#define mmMMEA6_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA6_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_ea_mmeadec7 +#define mmMMEA7_DRAM_RD_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA7_DRAM_RD_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA7_DRAM_WR_CLI2GRP_MAP0_DEFAULT 0x55555555 +#define mmMMEA7_DRAM_WR_CLI2GRP_MAP1_DEFAULT 0x55555555 +#define mmMMEA7_DRAM_RD_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA7_DRAM_WR_GRP2VC_MAP_DEFAULT 0x00000e25 +#define mmMMEA7_DRAM_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA7_DRAM_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA7_DRAM_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA7_DRAM_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA7_DRAM_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA7_DRAM_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA7_DRAM_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA7_DRAM_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA7_DRAM_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA7_DRAM_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA7_DRAM_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA7_DRAM_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA7_DRAM_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA7_GMI_RD_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA7_GMI_RD_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA7_GMI_WR_CLI2GRP_MAP0_DEFAULT 0x00000000 +#define mmMMEA7_GMI_WR_CLI2GRP_MAP1_DEFAULT 0x00000000 +#define mmMMEA7_GMI_RD_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA7_GMI_WR_GRP2VC_MAP_DEFAULT 0x00000fff +#define mmMMEA7_GMI_RD_LAZY_DEFAULT 0x78000924 +#define mmMMEA7_GMI_WR_LAZY_DEFAULT 0x78000924 +#define mmMMEA7_GMI_RD_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA7_GMI_WR_CAM_CNTL_DEFAULT 0x16db4444 +#define mmMMEA7_GMI_PAGE_BURST_DEFAULT 0x20002000 +#define mmMMEA7_GMI_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA7_GMI_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA7_GMI_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA7_GMI_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA7_GMI_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA7_GMI_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA7_GMI_RD_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA7_GMI_WR_PRI_URGENCY_DEFAULT 0x0000fdb6 +#define mmMMEA7_GMI_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA7_GMI_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA7_ADDRNORM_BASE_ADDR0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_BASE_ADDR1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_BASE_ADDR2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_BASE_ADDR3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_BASE_ADDR4_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR4_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_BASE_ADDR5_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR5_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR5_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORMDRAM_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORMGMI_HOLE_CNTL_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC_BANK_CFG_DEFAULT 0x000003cf +#define mmMMEA7_ADDRDEC_MISC_CFG_DEFAULT 0xfffff000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECDRAM_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK4_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK5_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDECGMI_HARVEST_ENABLE_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA7_ADDRDEC0_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA7_ADDRDEC1_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS0_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS1_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS3_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS01_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS23_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS01_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS23_DEFAULT 0xfffffffe +#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS01_DEFAULT 0x00050408 +#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS23_DEFAULT 0x00050408 +#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS01_DEFAULT 0x04076543 +#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS23_DEFAULT 0x04076543 +#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS01_DEFAULT 0x00000008 +#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS23_DEFAULT 0x00000008 +#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS01_DEFAULT 0x87654321 +#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS23_DEFAULT 0x87654321 +#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS01_DEFAULT 0xa9876543 +#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS23_DEFAULT 0xa9876543 +#define mmMMEA7_ADDRDEC2_RM_SEL_CS01_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_RM_SEL_CS23_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS01_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS23_DEFAULT 0x00000000 +#define mmMMEA7_ADDRNORMDRAM_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA7_ADDRNORMGMI_GLOBAL_CNTL_DEFAULT 0x00600000 +#define mmMMEA7_IO_RD_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA7_IO_RD_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA7_IO_WR_CLI2GRP_MAP0_DEFAULT 0xe4e4e4e4 +#define mmMMEA7_IO_WR_CLI2GRP_MAP1_DEFAULT 0xe4e4e4e4 +#define mmMMEA7_IO_RD_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA7_IO_WR_COMBINE_FLUSH_DEFAULT 0x00007777 +#define mmMMEA7_IO_GROUP_BURST_DEFAULT 0x1f031f03 +#define mmMMEA7_IO_RD_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA7_IO_WR_PRI_AGE_DEFAULT 0x00db6249 +#define mmMMEA7_IO_RD_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA7_IO_WR_PRI_QUEUING_DEFAULT 0x00000db6 +#define mmMMEA7_IO_RD_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA7_IO_WR_PRI_FIXED_DEFAULT 0x00000924 +#define mmMMEA7_IO_RD_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA7_IO_WR_PRI_URGENCY_DEFAULT 0x00000492 +#define mmMMEA7_IO_RD_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA7_IO_WR_PRI_URGENCY_MASKING_DEFAULT 0xffffffff +#define mmMMEA7_IO_RD_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA7_IO_RD_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA7_IO_RD_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA7_IO_WR_PRI_QUANT_PRI1_DEFAULT 0x3f3f3f3f +#define mmMMEA7_IO_WR_PRI_QUANT_PRI2_DEFAULT 0x7f7f7f7f +#define mmMMEA7_IO_WR_PRI_QUANT_PRI3_DEFAULT 0xffffffff +#define mmMMEA7_SDP_ARB_DRAM_DEFAULT 0x00101e40 +#define mmMMEA7_SDP_ARB_GMI_DEFAULT 0x00101e40 +#define mmMMEA7_SDP_ARB_FINAL_DEFAULT 0x00007fff +#define mmMMEA7_SDP_DRAM_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA7_SDP_GMI_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA7_SDP_IO_PRIORITY_DEFAULT 0x00000000 +#define mmMMEA7_SDP_CREDITS_DEFAULT 0x000101bf +#define mmMMEA7_SDP_TAG_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA7_SDP_TAG_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA7_SDP_VCC_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA7_SDP_VCC_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA7_SDP_VCD_RESERVE0_DEFAULT 0x00000000 +#define mmMMEA7_SDP_VCD_RESERVE1_DEFAULT 0x00000000 +#define mmMMEA7_SDP_REQ_CNTL_DEFAULT 0x0000001f +#define mmMMEA7_MISC_DEFAULT 0x0c00a070 +#define mmMMEA7_LATENCY_SAMPLING_DEFAULT 0x00000000 +#define mmMMEA7_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmMMEA7_PERFCOUNTER_HI_DEFAULT 0x00000000 +#define mmMMEA7_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmMMEA7_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmMMEA7_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 +#define mmMMEA7_EDC_CNT_DEFAULT 0x00000000 +#define mmMMEA7_EDC_CNT2_DEFAULT 0x00000000 +#define mmMMEA7_DSM_CNTL_DEFAULT 0x00000000 +#define mmMMEA7_DSM_CNTLA_DEFAULT 0x00000000 +#define mmMMEA7_DSM_CNTLB_DEFAULT 0x00000000 +#define mmMMEA7_DSM_CNTL2_DEFAULT 0x00000000 +#define mmMMEA7_DSM_CNTL2A_DEFAULT 0x00000000 +#define mmMMEA7_DSM_CNTL2B_DEFAULT 0x00000000 +#define mmMMEA7_CGTT_CLK_CTRL_DEFAULT 0x00000100 +#define mmMMEA7_EDC_MODE_DEFAULT 0x00000000 +#define mmMMEA7_ERR_STATUS_DEFAULT 0x00000300 +#define mmMMEA7_MISC2_DEFAULT 0x00000000 +#define mmMMEA7_ADDRDEC_SELECT_DEFAULT 0x00000000 +#define mmMMEA7_EDC_CNT3_DEFAULT 0x00000000 + + +// addressBlock: mmhub_pctldec1 +#define mmPCTL1_CTRL_DEFAULT 0x00011040 +#define mmPCTL1_MMHUB_DEEPSLEEP_IB_DEFAULT 0x00000000 +#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_DEFAULT 0x00000000 +#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB_DEFAULT 0x00000000 +#define mmPCTL1_PG_IGNORE_DEEPSLEEP_DEFAULT 0x00000000 +#define mmPCTL1_PG_IGNORE_DEEPSLEEP_IB_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_CFG_DAGB_BUSY_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_CFG_DS_ALLOW_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_CFG_DS_ALLOW_IB_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_MISC_DEFAULT 0x00011000 +#define mmPCTL1_SLICE0_MISC_DEFAULT 0x00000800 +#define mmPCTL1_SLICE1_MISC_DEFAULT 0x00000800 +#define mmPCTL1_SLICE2_MISC_DEFAULT 0x00000800 +#define mmPCTL1_SLICE3_MISC_DEFAULT 0x00000800 +#define mmPCTL1_SLICE4_MISC_DEFAULT 0x00000800 +#define mmPCTL1_UTCL2_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_RENG_EXECUTE_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_RENG_RAM_INDEX_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_RENG_RAM_DATA_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_DEFAULT 0x00000000 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_DEFAULT 0xffffffff +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_DEFAULT 0xffffffff + + +// addressBlock: mmhub_l1tlb_vml1dec:1 +#define mmVML1_1_MC_VM_MX_L1_TLB0_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TLB1_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TLB2_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TLB3_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TLB4_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TLB5_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TLB6_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TLB7_STATUS_DEFAULT 0x00000000 +#define mmVML1_1_MC_VM_MX_L1_TMZ_CNTL_DEFAULT 0x00000000 + + +// addressBlock: mmhub_l1tlb_vml1pldec:1 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG_DEFAULT 0x00000000 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 + + +// addressBlock: mmhub_l1tlb_vml1prdec:1 +#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_atcl2dec:1 +#define mmATCL2_1_ATC_L2_CNTL_DEFAULT 0x0001c0c9 +#define mmATCL2_1_ATC_L2_CNTL2_DEFAULT 0x00600100 +#define mmATCL2_1_ATC_L2_CACHE_DATA0_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_CACHE_DATA1_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_CACHE_DATA2_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_CNTL3_DEFAULT 0x000001f8 +#define mmATCL2_1_ATC_L2_STATUS_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_STATUS2_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_STATUS3_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_MISC_CG_DEFAULT 0x00000200 +#define mmATCL2_1_ATC_L2_MEM_POWER_LS_DEFAULT 0x00000208 +#define mmATCL2_1_ATC_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080 +#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_CNTL4_DEFAULT 0x00000000 +#define mmATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000005 + + +// addressBlock: mmhub_utcl2_vml2pfdec:1 +#define mmVML2PF1_VM_L2_CNTL_DEFAULT 0x00080602 +#define mmVML2PF1_VM_L2_CNTL2_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CNTL3_DEFAULT 0x80100007 +#define mmVML2PF1_VM_L2_STATUS_DEFAULT 0x00000000 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_CNTL_DEFAULT 0x00000090 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL_DEFAULT 0x3ffffffc +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL2_DEFAULT 0x000a0000 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3_DEFAULT 0xffffffff +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4_DEFAULT 0xffffffff +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_STATUS_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CNTL4_DEFAULT 0x000000c1 +#define mmVML2PF1_VM_L2_MM_GROUP_RT_CLASSES_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CACHE_PARITY_CNTL_DEFAULT 0x00000000 +#define mmVML2PF1_VM_L2_CGTT_CLK_CTRL_DEFAULT 0x00000080 + + +// addressBlock: mmhub_utcl2_vml2vcdec:1 +#define mmVML2VC1_VM_CONTEXT0_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT1_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT2_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT3_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT4_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT5_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT6_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT7_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT8_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT9_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT10_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT11_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT12_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT13_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT14_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXT15_CNTL_DEFAULT 0x007ffe80 +#define mmVML2VC1_VM_CONTEXTS_DISABLE_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG0_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG1_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG2_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG3_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG4_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG5_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG6_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG7_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG8_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG9_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG10_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG11_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG12_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG13_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG14_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG15_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG16_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG17_SEM_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG0_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG1_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG2_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG3_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG4_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG5_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG6_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG7_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG8_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG9_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG10_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG11_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG12_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG13_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG14_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG15_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG16_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG17_REQ_DEFAULT 0x017c0000 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG2_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG3_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG9_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG10_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG11_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ACK_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_DEFAULT 0x00000000 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_vmsharedpfdec:1 +#define mmVMSHAREDPF1_MC_VM_NB_MMIOBASE_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_NB_MMIOLIMIT_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_NB_PCI_CTRL_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_NB_PCI_ARB_DEFAULT 0x00000008 +#define mmVMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_FB_OFFSET_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_STEERING_DEFAULT 0x00000001 +#define mmVMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_MEM_POWER_LS_DEFAULT 0x00000208 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_APT_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END_DEFAULT 0x000fffff +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_SIZE_DEFAULT 0x00000000 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_vmsharedvcdec:1 +#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_BASE_DEFAULT 0x00000000 +#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_TOP_DEFAULT 0x00000000 +#define mmVMSHAREDVC1_MC_VM_AGP_TOP_DEFAULT 0x00000000 +#define mmVMSHAREDVC1_MC_VM_AGP_BOT_DEFAULT 0x00000000 +#define mmVMSHAREDVC1_MC_VM_AGP_BASE_DEFAULT 0x00000000 +#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR_DEFAULT 0x00000000 +#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_DEFAULT 0x00000000 +#define mmVMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL_DEFAULT 0x00002501 + + +// addressBlock: mmhub_utcl2_vmsharedhvdec:1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1_DEFAULT 0x00000100 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_UTCL2_CGTT_CLK_CTRL_DEFAULT 0x00000080 +#define mmVMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID_DEFAULT 0x00000000 +#define mmVMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_atcl2pfcntrdec:1 +#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI_DEFAULT 0x00000000 + + +// addressBlock: mmhub_utcl2_atcl2pfcntldec:1 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 + + +// addressBlock: mmhub_utcl2_vml2pldec:1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER0_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER1_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER2_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER3_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER4_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER5_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER6_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER7_CFG_DEFAULT 0x00000000 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_DEFAULT 0x04000000 + + +// addressBlock: mmhub_utcl2_vml2prdec:1 +#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_LO_DEFAULT 0x00000000 +#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_HI_DEFAULT 0x00000000 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h new file mode 100644 index 000000000000..d8632ccf3494 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h @@ -0,0 +1,7753 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _mmhub_9_4_1_OFFSET_HEADER +#define _mmhub_9_4_1_OFFSET_HEADER + + + +// addressBlock: mmhub_dagb_dagbdec0 +// base address: 0x68000 +#define mmDAGB0_RDCLI0 0x0000 +#define mmDAGB0_RDCLI0_BASE_IDX 1 +#define mmDAGB0_RDCLI1 0x0001 +#define mmDAGB0_RDCLI1_BASE_IDX 1 +#define mmDAGB0_RDCLI2 0x0002 +#define mmDAGB0_RDCLI2_BASE_IDX 1 +#define mmDAGB0_RDCLI3 0x0003 +#define mmDAGB0_RDCLI3_BASE_IDX 1 +#define mmDAGB0_RDCLI4 0x0004 +#define mmDAGB0_RDCLI4_BASE_IDX 1 +#define mmDAGB0_RDCLI5 0x0005 +#define mmDAGB0_RDCLI5_BASE_IDX 1 +#define mmDAGB0_RDCLI6 0x0006 +#define mmDAGB0_RDCLI6_BASE_IDX 1 +#define mmDAGB0_RDCLI7 0x0007 +#define mmDAGB0_RDCLI7_BASE_IDX 1 +#define mmDAGB0_RDCLI8 0x0008 +#define mmDAGB0_RDCLI8_BASE_IDX 1 +#define mmDAGB0_RDCLI9 0x0009 +#define mmDAGB0_RDCLI9_BASE_IDX 1 +#define mmDAGB0_RDCLI10 0x000a +#define mmDAGB0_RDCLI10_BASE_IDX 1 +#define mmDAGB0_RDCLI11 0x000b +#define mmDAGB0_RDCLI11_BASE_IDX 1 +#define mmDAGB0_RDCLI12 0x000c +#define mmDAGB0_RDCLI12_BASE_IDX 1 +#define mmDAGB0_RDCLI13 0x000d +#define mmDAGB0_RDCLI13_BASE_IDX 1 +#define mmDAGB0_RDCLI14 0x000e +#define mmDAGB0_RDCLI14_BASE_IDX 1 +#define mmDAGB0_RDCLI15 0x000f +#define mmDAGB0_RDCLI15_BASE_IDX 1 +#define mmDAGB0_RD_CNTL 0x0010 +#define mmDAGB0_RD_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_GMI_CNTL 0x0011 +#define mmDAGB0_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_ADDR_DAGB 0x0012 +#define mmDAGB0_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST 0x0013 +#define mmDAGB0_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER 0x0014 +#define mmDAGB0_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB0_RD_CGTT_CLK_CTRL 0x0015 +#define mmDAGB0_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL 0x0016 +#define mmDAGB0_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL 0x0017 +#define mmDAGB0_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0 0x0018 +#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0 0x0019 +#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1 0x001a +#define mmDAGB0_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1 0x001b +#define mmDAGB0_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB0_RD_VC0_CNTL 0x001c +#define mmDAGB0_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_VC1_CNTL 0x001d +#define mmDAGB0_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_VC2_CNTL 0x001e +#define mmDAGB0_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_VC3_CNTL 0x001f +#define mmDAGB0_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_VC4_CNTL 0x0020 +#define mmDAGB0_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_VC5_CNTL 0x0021 +#define mmDAGB0_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_VC6_CNTL 0x0022 +#define mmDAGB0_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_VC7_CNTL 0x0023 +#define mmDAGB0_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB0_RD_CNTL_MISC 0x0024 +#define mmDAGB0_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB0_RD_TLB_CREDIT 0x0025 +#define mmDAGB0_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB0_RDCLI_ASK_PENDING 0x0026 +#define mmDAGB0_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB0_RDCLI_GO_PENDING 0x0027 +#define mmDAGB0_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB0_RDCLI_GBLSEND_PENDING 0x0028 +#define mmDAGB0_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB0_RDCLI_TLB_PENDING 0x0029 +#define mmDAGB0_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB0_RDCLI_OARB_PENDING 0x002a +#define mmDAGB0_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB0_RDCLI_OSD_PENDING 0x002b +#define mmDAGB0_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI0 0x002c +#define mmDAGB0_WRCLI0_BASE_IDX 1 +#define mmDAGB0_WRCLI1 0x002d +#define mmDAGB0_WRCLI1_BASE_IDX 1 +#define mmDAGB0_WRCLI2 0x002e +#define mmDAGB0_WRCLI2_BASE_IDX 1 +#define mmDAGB0_WRCLI3 0x002f +#define mmDAGB0_WRCLI3_BASE_IDX 1 +#define mmDAGB0_WRCLI4 0x0030 +#define mmDAGB0_WRCLI4_BASE_IDX 1 +#define mmDAGB0_WRCLI5 0x0031 +#define mmDAGB0_WRCLI5_BASE_IDX 1 +#define mmDAGB0_WRCLI6 0x0032 +#define mmDAGB0_WRCLI6_BASE_IDX 1 +#define mmDAGB0_WRCLI7 0x0033 +#define mmDAGB0_WRCLI7_BASE_IDX 1 +#define mmDAGB0_WRCLI8 0x0034 +#define mmDAGB0_WRCLI8_BASE_IDX 1 +#define mmDAGB0_WRCLI9 0x0035 +#define mmDAGB0_WRCLI9_BASE_IDX 1 +#define mmDAGB0_WRCLI10 0x0036 +#define mmDAGB0_WRCLI10_BASE_IDX 1 +#define mmDAGB0_WRCLI11 0x0037 +#define mmDAGB0_WRCLI11_BASE_IDX 1 +#define mmDAGB0_WRCLI12 0x0038 +#define mmDAGB0_WRCLI12_BASE_IDX 1 +#define mmDAGB0_WRCLI13 0x0039 +#define mmDAGB0_WRCLI13_BASE_IDX 1 +#define mmDAGB0_WRCLI14 0x003a +#define mmDAGB0_WRCLI14_BASE_IDX 1 +#define mmDAGB0_WRCLI15 0x003b +#define mmDAGB0_WRCLI15_BASE_IDX 1 +#define mmDAGB0_WR_CNTL 0x003c +#define mmDAGB0_WR_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_GMI_CNTL 0x003d +#define mmDAGB0_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_ADDR_DAGB 0x003e +#define mmDAGB0_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST 0x003f +#define mmDAGB0_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER 0x0040 +#define mmDAGB0_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB0_WR_CGTT_CLK_CTRL 0x0041 +#define mmDAGB0_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL 0x0042 +#define mmDAGB0_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL 0x0043 +#define mmDAGB0_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0 0x0044 +#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0 0x0045 +#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1 0x0046 +#define mmDAGB0_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1 0x0047 +#define mmDAGB0_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB0_WR_DATA_DAGB 0x0048 +#define mmDAGB0_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0 0x0049 +#define mmDAGB0_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0 0x004a +#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1 0x004b +#define mmDAGB0_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1 0x004c +#define mmDAGB0_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB0_WR_VC0_CNTL 0x004d +#define mmDAGB0_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_VC1_CNTL 0x004e +#define mmDAGB0_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_VC2_CNTL 0x004f +#define mmDAGB0_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_VC3_CNTL 0x0050 +#define mmDAGB0_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_VC4_CNTL 0x0051 +#define mmDAGB0_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_VC5_CNTL 0x0052 +#define mmDAGB0_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_VC6_CNTL 0x0053 +#define mmDAGB0_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_VC7_CNTL 0x0054 +#define mmDAGB0_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB0_WR_CNTL_MISC 0x0055 +#define mmDAGB0_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB0_WR_TLB_CREDIT 0x0056 +#define mmDAGB0_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB0_WR_DATA_CREDIT 0x0057 +#define mmDAGB0_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB0_WR_MISC_CREDIT 0x0058 +#define mmDAGB0_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB0_WRCLI_ASK_PENDING 0x005d +#define mmDAGB0_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI_GO_PENDING 0x005e +#define mmDAGB0_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI_GBLSEND_PENDING 0x005f +#define mmDAGB0_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI_TLB_PENDING 0x0060 +#define mmDAGB0_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI_OARB_PENDING 0x0061 +#define mmDAGB0_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI_OSD_PENDING 0x0062 +#define mmDAGB0_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI_DBUS_ASK_PENDING 0x0063 +#define mmDAGB0_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB0_WRCLI_DBUS_GO_PENDING 0x0064 +#define mmDAGB0_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB0_DAGB_DLY 0x0065 +#define mmDAGB0_DAGB_DLY_BASE_IDX 1 +#define mmDAGB0_CNTL_MISC 0x0066 +#define mmDAGB0_CNTL_MISC_BASE_IDX 1 +#define mmDAGB0_CNTL_MISC2 0x0067 +#define mmDAGB0_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB0_FIFO_EMPTY 0x0068 +#define mmDAGB0_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB0_FIFO_FULL 0x0069 +#define mmDAGB0_FIFO_FULL_BASE_IDX 1 +#define mmDAGB0_WR_CREDITS_FULL 0x006a +#define mmDAGB0_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB0_RD_CREDITS_FULL 0x006b +#define mmDAGB0_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB0_PERFCOUNTER_LO 0x006c +#define mmDAGB0_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB0_PERFCOUNTER_HI 0x006d +#define mmDAGB0_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB0_PERFCOUNTER0_CFG 0x006e +#define mmDAGB0_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB0_PERFCOUNTER1_CFG 0x006f +#define mmDAGB0_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB0_PERFCOUNTER2_CFG 0x0070 +#define mmDAGB0_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB0_PERFCOUNTER_RSLT_CNTL 0x0071 +#define mmDAGB0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB0_RESERVE0 0x0072 +#define mmDAGB0_RESERVE0_BASE_IDX 1 +#define mmDAGB0_RESERVE1 0x0073 +#define mmDAGB0_RESERVE1_BASE_IDX 1 +#define mmDAGB0_RESERVE2 0x0074 +#define mmDAGB0_RESERVE2_BASE_IDX 1 +#define mmDAGB0_RESERVE3 0x0075 +#define mmDAGB0_RESERVE3_BASE_IDX 1 +#define mmDAGB0_RESERVE4 0x0076 +#define mmDAGB0_RESERVE4_BASE_IDX 1 +#define mmDAGB0_RESERVE5 0x0077 +#define mmDAGB0_RESERVE5_BASE_IDX 1 +#define mmDAGB0_RESERVE6 0x0078 +#define mmDAGB0_RESERVE6_BASE_IDX 1 +#define mmDAGB0_RESERVE7 0x0079 +#define mmDAGB0_RESERVE7_BASE_IDX 1 +#define mmDAGB0_RESERVE8 0x007a +#define mmDAGB0_RESERVE8_BASE_IDX 1 +#define mmDAGB0_RESERVE9 0x007b +#define mmDAGB0_RESERVE9_BASE_IDX 1 +#define mmDAGB0_RESERVE10 0x007c +#define mmDAGB0_RESERVE10_BASE_IDX 1 +#define mmDAGB0_RESERVE11 0x007d +#define mmDAGB0_RESERVE11_BASE_IDX 1 +#define mmDAGB0_RESERVE12 0x007e +#define mmDAGB0_RESERVE12_BASE_IDX 1 +#define mmDAGB0_RESERVE13 0x007f +#define mmDAGB0_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_dagb_dagbdec1 +// base address: 0x68200 +#define mmDAGB1_RDCLI0 0x0080 +#define mmDAGB1_RDCLI0_BASE_IDX 1 +#define mmDAGB1_RDCLI1 0x0081 +#define mmDAGB1_RDCLI1_BASE_IDX 1 +#define mmDAGB1_RDCLI2 0x0082 +#define mmDAGB1_RDCLI2_BASE_IDX 1 +#define mmDAGB1_RDCLI3 0x0083 +#define mmDAGB1_RDCLI3_BASE_IDX 1 +#define mmDAGB1_RDCLI4 0x0084 +#define mmDAGB1_RDCLI4_BASE_IDX 1 +#define mmDAGB1_RDCLI5 0x0085 +#define mmDAGB1_RDCLI5_BASE_IDX 1 +#define mmDAGB1_RDCLI6 0x0086 +#define mmDAGB1_RDCLI6_BASE_IDX 1 +#define mmDAGB1_RDCLI7 0x0087 +#define mmDAGB1_RDCLI7_BASE_IDX 1 +#define mmDAGB1_RDCLI8 0x0088 +#define mmDAGB1_RDCLI8_BASE_IDX 1 +#define mmDAGB1_RDCLI9 0x0089 +#define mmDAGB1_RDCLI9_BASE_IDX 1 +#define mmDAGB1_RDCLI10 0x008a +#define mmDAGB1_RDCLI10_BASE_IDX 1 +#define mmDAGB1_RDCLI11 0x008b +#define mmDAGB1_RDCLI11_BASE_IDX 1 +#define mmDAGB1_RDCLI12 0x008c +#define mmDAGB1_RDCLI12_BASE_IDX 1 +#define mmDAGB1_RDCLI13 0x008d +#define mmDAGB1_RDCLI13_BASE_IDX 1 +#define mmDAGB1_RDCLI14 0x008e +#define mmDAGB1_RDCLI14_BASE_IDX 1 +#define mmDAGB1_RDCLI15 0x008f +#define mmDAGB1_RDCLI15_BASE_IDX 1 +#define mmDAGB1_RD_CNTL 0x0090 +#define mmDAGB1_RD_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_GMI_CNTL 0x0091 +#define mmDAGB1_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_ADDR_DAGB 0x0092 +#define mmDAGB1_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST 0x0093 +#define mmDAGB1_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER 0x0094 +#define mmDAGB1_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB1_RD_CGTT_CLK_CTRL 0x0095 +#define mmDAGB1_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL 0x0096 +#define mmDAGB1_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL 0x0097 +#define mmDAGB1_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0 0x0098 +#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0 0x0099 +#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1 0x009a +#define mmDAGB1_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1 0x009b +#define mmDAGB1_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB1_RD_VC0_CNTL 0x009c +#define mmDAGB1_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_VC1_CNTL 0x009d +#define mmDAGB1_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_VC2_CNTL 0x009e +#define mmDAGB1_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_VC3_CNTL 0x009f +#define mmDAGB1_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_VC4_CNTL 0x00a0 +#define mmDAGB1_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_VC5_CNTL 0x00a1 +#define mmDAGB1_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_VC6_CNTL 0x00a2 +#define mmDAGB1_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_VC7_CNTL 0x00a3 +#define mmDAGB1_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB1_RD_CNTL_MISC 0x00a4 +#define mmDAGB1_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB1_RD_TLB_CREDIT 0x00a5 +#define mmDAGB1_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB1_RDCLI_ASK_PENDING 0x00a6 +#define mmDAGB1_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB1_RDCLI_GO_PENDING 0x00a7 +#define mmDAGB1_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB1_RDCLI_GBLSEND_PENDING 0x00a8 +#define mmDAGB1_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB1_RDCLI_TLB_PENDING 0x00a9 +#define mmDAGB1_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB1_RDCLI_OARB_PENDING 0x00aa +#define mmDAGB1_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB1_RDCLI_OSD_PENDING 0x00ab +#define mmDAGB1_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI0 0x00ac +#define mmDAGB1_WRCLI0_BASE_IDX 1 +#define mmDAGB1_WRCLI1 0x00ad +#define mmDAGB1_WRCLI1_BASE_IDX 1 +#define mmDAGB1_WRCLI2 0x00ae +#define mmDAGB1_WRCLI2_BASE_IDX 1 +#define mmDAGB1_WRCLI3 0x00af +#define mmDAGB1_WRCLI3_BASE_IDX 1 +#define mmDAGB1_WRCLI4 0x00b0 +#define mmDAGB1_WRCLI4_BASE_IDX 1 +#define mmDAGB1_WRCLI5 0x00b1 +#define mmDAGB1_WRCLI5_BASE_IDX 1 +#define mmDAGB1_WRCLI6 0x00b2 +#define mmDAGB1_WRCLI6_BASE_IDX 1 +#define mmDAGB1_WRCLI7 0x00b3 +#define mmDAGB1_WRCLI7_BASE_IDX 1 +#define mmDAGB1_WRCLI8 0x00b4 +#define mmDAGB1_WRCLI8_BASE_IDX 1 +#define mmDAGB1_WRCLI9 0x00b5 +#define mmDAGB1_WRCLI9_BASE_IDX 1 +#define mmDAGB1_WRCLI10 0x00b6 +#define mmDAGB1_WRCLI10_BASE_IDX 1 +#define mmDAGB1_WRCLI11 0x00b7 +#define mmDAGB1_WRCLI11_BASE_IDX 1 +#define mmDAGB1_WRCLI12 0x00b8 +#define mmDAGB1_WRCLI12_BASE_IDX 1 +#define mmDAGB1_WRCLI13 0x00b9 +#define mmDAGB1_WRCLI13_BASE_IDX 1 +#define mmDAGB1_WRCLI14 0x00ba +#define mmDAGB1_WRCLI14_BASE_IDX 1 +#define mmDAGB1_WRCLI15 0x00bb +#define mmDAGB1_WRCLI15_BASE_IDX 1 +#define mmDAGB1_WR_CNTL 0x00bc +#define mmDAGB1_WR_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_GMI_CNTL 0x00bd +#define mmDAGB1_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_ADDR_DAGB 0x00be +#define mmDAGB1_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST 0x00bf +#define mmDAGB1_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER 0x00c0 +#define mmDAGB1_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB1_WR_CGTT_CLK_CTRL 0x00c1 +#define mmDAGB1_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL 0x00c2 +#define mmDAGB1_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL 0x00c3 +#define mmDAGB1_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0 0x00c4 +#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0 0x00c5 +#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1 0x00c6 +#define mmDAGB1_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1 0x00c7 +#define mmDAGB1_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB1_WR_DATA_DAGB 0x00c8 +#define mmDAGB1_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0 0x00c9 +#define mmDAGB1_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0 0x00ca +#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1 0x00cb +#define mmDAGB1_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1 0x00cc +#define mmDAGB1_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB1_WR_VC0_CNTL 0x00cd +#define mmDAGB1_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_VC1_CNTL 0x00ce +#define mmDAGB1_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_VC2_CNTL 0x00cf +#define mmDAGB1_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_VC3_CNTL 0x00d0 +#define mmDAGB1_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_VC4_CNTL 0x00d1 +#define mmDAGB1_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_VC5_CNTL 0x00d2 +#define mmDAGB1_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_VC6_CNTL 0x00d3 +#define mmDAGB1_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_VC7_CNTL 0x00d4 +#define mmDAGB1_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB1_WR_CNTL_MISC 0x00d5 +#define mmDAGB1_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB1_WR_TLB_CREDIT 0x00d6 +#define mmDAGB1_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB1_WR_DATA_CREDIT 0x00d7 +#define mmDAGB1_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB1_WR_MISC_CREDIT 0x00d8 +#define mmDAGB1_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB1_WRCLI_ASK_PENDING 0x00dd +#define mmDAGB1_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI_GO_PENDING 0x00de +#define mmDAGB1_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI_GBLSEND_PENDING 0x00df +#define mmDAGB1_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI_TLB_PENDING 0x00e0 +#define mmDAGB1_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI_OARB_PENDING 0x00e1 +#define mmDAGB1_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI_OSD_PENDING 0x00e2 +#define mmDAGB1_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI_DBUS_ASK_PENDING 0x00e3 +#define mmDAGB1_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB1_WRCLI_DBUS_GO_PENDING 0x00e4 +#define mmDAGB1_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB1_DAGB_DLY 0x00e5 +#define mmDAGB1_DAGB_DLY_BASE_IDX 1 +#define mmDAGB1_CNTL_MISC 0x00e6 +#define mmDAGB1_CNTL_MISC_BASE_IDX 1 +#define mmDAGB1_CNTL_MISC2 0x00e7 +#define mmDAGB1_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB1_FIFO_EMPTY 0x00e8 +#define mmDAGB1_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB1_FIFO_FULL 0x00e9 +#define mmDAGB1_FIFO_FULL_BASE_IDX 1 +#define mmDAGB1_WR_CREDITS_FULL 0x00ea +#define mmDAGB1_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB1_RD_CREDITS_FULL 0x00eb +#define mmDAGB1_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB1_PERFCOUNTER_LO 0x00ec +#define mmDAGB1_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB1_PERFCOUNTER_HI 0x00ed +#define mmDAGB1_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB1_PERFCOUNTER0_CFG 0x00ee +#define mmDAGB1_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB1_PERFCOUNTER1_CFG 0x00ef +#define mmDAGB1_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB1_PERFCOUNTER2_CFG 0x00f0 +#define mmDAGB1_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB1_PERFCOUNTER_RSLT_CNTL 0x00f1 +#define mmDAGB1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB1_RESERVE0 0x00f2 +#define mmDAGB1_RESERVE0_BASE_IDX 1 +#define mmDAGB1_RESERVE1 0x00f3 +#define mmDAGB1_RESERVE1_BASE_IDX 1 +#define mmDAGB1_RESERVE2 0x00f4 +#define mmDAGB1_RESERVE2_BASE_IDX 1 +#define mmDAGB1_RESERVE3 0x00f5 +#define mmDAGB1_RESERVE3_BASE_IDX 1 +#define mmDAGB1_RESERVE4 0x00f6 +#define mmDAGB1_RESERVE4_BASE_IDX 1 +#define mmDAGB1_RESERVE5 0x00f7 +#define mmDAGB1_RESERVE5_BASE_IDX 1 +#define mmDAGB1_RESERVE6 0x00f8 +#define mmDAGB1_RESERVE6_BASE_IDX 1 +#define mmDAGB1_RESERVE7 0x00f9 +#define mmDAGB1_RESERVE7_BASE_IDX 1 +#define mmDAGB1_RESERVE8 0x00fa +#define mmDAGB1_RESERVE8_BASE_IDX 1 +#define mmDAGB1_RESERVE9 0x00fb +#define mmDAGB1_RESERVE9_BASE_IDX 1 +#define mmDAGB1_RESERVE10 0x00fc +#define mmDAGB1_RESERVE10_BASE_IDX 1 +#define mmDAGB1_RESERVE11 0x00fd +#define mmDAGB1_RESERVE11_BASE_IDX 1 +#define mmDAGB1_RESERVE12 0x00fe +#define mmDAGB1_RESERVE12_BASE_IDX 1 +#define mmDAGB1_RESERVE13 0x00ff +#define mmDAGB1_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_dagb_dagbdec2 +// base address: 0x68400 +#define mmDAGB2_RDCLI0 0x0100 +#define mmDAGB2_RDCLI0_BASE_IDX 1 +#define mmDAGB2_RDCLI1 0x0101 +#define mmDAGB2_RDCLI1_BASE_IDX 1 +#define mmDAGB2_RDCLI2 0x0102 +#define mmDAGB2_RDCLI2_BASE_IDX 1 +#define mmDAGB2_RDCLI3 0x0103 +#define mmDAGB2_RDCLI3_BASE_IDX 1 +#define mmDAGB2_RDCLI4 0x0104 +#define mmDAGB2_RDCLI4_BASE_IDX 1 +#define mmDAGB2_RDCLI5 0x0105 +#define mmDAGB2_RDCLI5_BASE_IDX 1 +#define mmDAGB2_RDCLI6 0x0106 +#define mmDAGB2_RDCLI6_BASE_IDX 1 +#define mmDAGB2_RDCLI7 0x0107 +#define mmDAGB2_RDCLI7_BASE_IDX 1 +#define mmDAGB2_RDCLI8 0x0108 +#define mmDAGB2_RDCLI8_BASE_IDX 1 +#define mmDAGB2_RDCLI9 0x0109 +#define mmDAGB2_RDCLI9_BASE_IDX 1 +#define mmDAGB2_RDCLI10 0x010a +#define mmDAGB2_RDCLI10_BASE_IDX 1 +#define mmDAGB2_RDCLI11 0x010b +#define mmDAGB2_RDCLI11_BASE_IDX 1 +#define mmDAGB2_RDCLI12 0x010c +#define mmDAGB2_RDCLI12_BASE_IDX 1 +#define mmDAGB2_RDCLI13 0x010d +#define mmDAGB2_RDCLI13_BASE_IDX 1 +#define mmDAGB2_RDCLI14 0x010e +#define mmDAGB2_RDCLI14_BASE_IDX 1 +#define mmDAGB2_RDCLI15 0x010f +#define mmDAGB2_RDCLI15_BASE_IDX 1 +#define mmDAGB2_RD_CNTL 0x0110 +#define mmDAGB2_RD_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_GMI_CNTL 0x0111 +#define mmDAGB2_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_ADDR_DAGB 0x0112 +#define mmDAGB2_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB2_RD_OUTPUT_DAGB_MAX_BURST 0x0113 +#define mmDAGB2_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER 0x0114 +#define mmDAGB2_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB2_RD_CGTT_CLK_CTRL 0x0115 +#define mmDAGB2_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB2_L1TLB_RD_CGTT_CLK_CTRL 0x0116 +#define mmDAGB2_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB2_ATCVM_RD_CGTT_CLK_CTRL 0x0117 +#define mmDAGB2_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST0 0x0118 +#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER0 0x0119 +#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST1 0x011a +#define mmDAGB2_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER1 0x011b +#define mmDAGB2_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB2_RD_VC0_CNTL 0x011c +#define mmDAGB2_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_VC1_CNTL 0x011d +#define mmDAGB2_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_VC2_CNTL 0x011e +#define mmDAGB2_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_VC3_CNTL 0x011f +#define mmDAGB2_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_VC4_CNTL 0x0120 +#define mmDAGB2_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_VC5_CNTL 0x0121 +#define mmDAGB2_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_VC6_CNTL 0x0122 +#define mmDAGB2_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_VC7_CNTL 0x0123 +#define mmDAGB2_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB2_RD_CNTL_MISC 0x0124 +#define mmDAGB2_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB2_RD_TLB_CREDIT 0x0125 +#define mmDAGB2_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB2_RDCLI_ASK_PENDING 0x0126 +#define mmDAGB2_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB2_RDCLI_GO_PENDING 0x0127 +#define mmDAGB2_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB2_RDCLI_GBLSEND_PENDING 0x0128 +#define mmDAGB2_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB2_RDCLI_TLB_PENDING 0x0129 +#define mmDAGB2_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB2_RDCLI_OARB_PENDING 0x012a +#define mmDAGB2_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB2_RDCLI_OSD_PENDING 0x012b +#define mmDAGB2_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI0 0x012c +#define mmDAGB2_WRCLI0_BASE_IDX 1 +#define mmDAGB2_WRCLI1 0x012d +#define mmDAGB2_WRCLI1_BASE_IDX 1 +#define mmDAGB2_WRCLI2 0x012e +#define mmDAGB2_WRCLI2_BASE_IDX 1 +#define mmDAGB2_WRCLI3 0x012f +#define mmDAGB2_WRCLI3_BASE_IDX 1 +#define mmDAGB2_WRCLI4 0x0130 +#define mmDAGB2_WRCLI4_BASE_IDX 1 +#define mmDAGB2_WRCLI5 0x0131 +#define mmDAGB2_WRCLI5_BASE_IDX 1 +#define mmDAGB2_WRCLI6 0x0132 +#define mmDAGB2_WRCLI6_BASE_IDX 1 +#define mmDAGB2_WRCLI7 0x0133 +#define mmDAGB2_WRCLI7_BASE_IDX 1 +#define mmDAGB2_WRCLI8 0x0134 +#define mmDAGB2_WRCLI8_BASE_IDX 1 +#define mmDAGB2_WRCLI9 0x0135 +#define mmDAGB2_WRCLI9_BASE_IDX 1 +#define mmDAGB2_WRCLI10 0x0136 +#define mmDAGB2_WRCLI10_BASE_IDX 1 +#define mmDAGB2_WRCLI11 0x0137 +#define mmDAGB2_WRCLI11_BASE_IDX 1 +#define mmDAGB2_WRCLI12 0x0138 +#define mmDAGB2_WRCLI12_BASE_IDX 1 +#define mmDAGB2_WRCLI13 0x0139 +#define mmDAGB2_WRCLI13_BASE_IDX 1 +#define mmDAGB2_WRCLI14 0x013a +#define mmDAGB2_WRCLI14_BASE_IDX 1 +#define mmDAGB2_WRCLI15 0x013b +#define mmDAGB2_WRCLI15_BASE_IDX 1 +#define mmDAGB2_WR_CNTL 0x013c +#define mmDAGB2_WR_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_GMI_CNTL 0x013d +#define mmDAGB2_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_ADDR_DAGB 0x013e +#define mmDAGB2_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB2_WR_OUTPUT_DAGB_MAX_BURST 0x013f +#define mmDAGB2_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER 0x0140 +#define mmDAGB2_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB2_WR_CGTT_CLK_CTRL 0x0141 +#define mmDAGB2_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB2_L1TLB_WR_CGTT_CLK_CTRL 0x0142 +#define mmDAGB2_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB2_ATCVM_WR_CGTT_CLK_CTRL 0x0143 +#define mmDAGB2_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST0 0x0144 +#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER0 0x0145 +#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST1 0x0146 +#define mmDAGB2_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER1 0x0147 +#define mmDAGB2_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB2_WR_DATA_DAGB 0x0148 +#define mmDAGB2_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB2_WR_DATA_DAGB_MAX_BURST0 0x0149 +#define mmDAGB2_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER0 0x014a +#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB2_WR_DATA_DAGB_MAX_BURST1 0x014b +#define mmDAGB2_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER1 0x014c +#define mmDAGB2_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB2_WR_VC0_CNTL 0x014d +#define mmDAGB2_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_VC1_CNTL 0x014e +#define mmDAGB2_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_VC2_CNTL 0x014f +#define mmDAGB2_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_VC3_CNTL 0x0150 +#define mmDAGB2_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_VC4_CNTL 0x0151 +#define mmDAGB2_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_VC5_CNTL 0x0152 +#define mmDAGB2_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_VC6_CNTL 0x0153 +#define mmDAGB2_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_VC7_CNTL 0x0154 +#define mmDAGB2_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB2_WR_CNTL_MISC 0x0155 +#define mmDAGB2_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB2_WR_TLB_CREDIT 0x0156 +#define mmDAGB2_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB2_WR_DATA_CREDIT 0x0157 +#define mmDAGB2_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB2_WR_MISC_CREDIT 0x0158 +#define mmDAGB2_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB2_WRCLI_ASK_PENDING 0x015d +#define mmDAGB2_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI_GO_PENDING 0x015e +#define mmDAGB2_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI_GBLSEND_PENDING 0x015f +#define mmDAGB2_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI_TLB_PENDING 0x0160 +#define mmDAGB2_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI_OARB_PENDING 0x0161 +#define mmDAGB2_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI_OSD_PENDING 0x0162 +#define mmDAGB2_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI_DBUS_ASK_PENDING 0x0163 +#define mmDAGB2_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB2_WRCLI_DBUS_GO_PENDING 0x0164 +#define mmDAGB2_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB2_DAGB_DLY 0x0165 +#define mmDAGB2_DAGB_DLY_BASE_IDX 1 +#define mmDAGB2_CNTL_MISC 0x0166 +#define mmDAGB2_CNTL_MISC_BASE_IDX 1 +#define mmDAGB2_CNTL_MISC2 0x0167 +#define mmDAGB2_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB2_FIFO_EMPTY 0x0168 +#define mmDAGB2_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB2_FIFO_FULL 0x0169 +#define mmDAGB2_FIFO_FULL_BASE_IDX 1 +#define mmDAGB2_WR_CREDITS_FULL 0x016a +#define mmDAGB2_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB2_RD_CREDITS_FULL 0x016b +#define mmDAGB2_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB2_PERFCOUNTER_LO 0x016c +#define mmDAGB2_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB2_PERFCOUNTER_HI 0x016d +#define mmDAGB2_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB2_PERFCOUNTER0_CFG 0x016e +#define mmDAGB2_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB2_PERFCOUNTER1_CFG 0x016f +#define mmDAGB2_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB2_PERFCOUNTER2_CFG 0x0170 +#define mmDAGB2_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB2_PERFCOUNTER_RSLT_CNTL 0x0171 +#define mmDAGB2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB2_RESERVE0 0x0172 +#define mmDAGB2_RESERVE0_BASE_IDX 1 +#define mmDAGB2_RESERVE1 0x0173 +#define mmDAGB2_RESERVE1_BASE_IDX 1 +#define mmDAGB2_RESERVE2 0x0174 +#define mmDAGB2_RESERVE2_BASE_IDX 1 +#define mmDAGB2_RESERVE3 0x0175 +#define mmDAGB2_RESERVE3_BASE_IDX 1 +#define mmDAGB2_RESERVE4 0x0176 +#define mmDAGB2_RESERVE4_BASE_IDX 1 +#define mmDAGB2_RESERVE5 0x0177 +#define mmDAGB2_RESERVE5_BASE_IDX 1 +#define mmDAGB2_RESERVE6 0x0178 +#define mmDAGB2_RESERVE6_BASE_IDX 1 +#define mmDAGB2_RESERVE7 0x0179 +#define mmDAGB2_RESERVE7_BASE_IDX 1 +#define mmDAGB2_RESERVE8 0x017a +#define mmDAGB2_RESERVE8_BASE_IDX 1 +#define mmDAGB2_RESERVE9 0x017b +#define mmDAGB2_RESERVE9_BASE_IDX 1 +#define mmDAGB2_RESERVE10 0x017c +#define mmDAGB2_RESERVE10_BASE_IDX 1 +#define mmDAGB2_RESERVE11 0x017d +#define mmDAGB2_RESERVE11_BASE_IDX 1 +#define mmDAGB2_RESERVE12 0x017e +#define mmDAGB2_RESERVE12_BASE_IDX 1 +#define mmDAGB2_RESERVE13 0x017f +#define mmDAGB2_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_dagb_dagbdec3 +// base address: 0x68600 +#define mmDAGB3_RDCLI0 0x0180 +#define mmDAGB3_RDCLI0_BASE_IDX 1 +#define mmDAGB3_RDCLI1 0x0181 +#define mmDAGB3_RDCLI1_BASE_IDX 1 +#define mmDAGB3_RDCLI2 0x0182 +#define mmDAGB3_RDCLI2_BASE_IDX 1 +#define mmDAGB3_RDCLI3 0x0183 +#define mmDAGB3_RDCLI3_BASE_IDX 1 +#define mmDAGB3_RDCLI4 0x0184 +#define mmDAGB3_RDCLI4_BASE_IDX 1 +#define mmDAGB3_RDCLI5 0x0185 +#define mmDAGB3_RDCLI5_BASE_IDX 1 +#define mmDAGB3_RDCLI6 0x0186 +#define mmDAGB3_RDCLI6_BASE_IDX 1 +#define mmDAGB3_RDCLI7 0x0187 +#define mmDAGB3_RDCLI7_BASE_IDX 1 +#define mmDAGB3_RDCLI8 0x0188 +#define mmDAGB3_RDCLI8_BASE_IDX 1 +#define mmDAGB3_RDCLI9 0x0189 +#define mmDAGB3_RDCLI9_BASE_IDX 1 +#define mmDAGB3_RDCLI10 0x018a +#define mmDAGB3_RDCLI10_BASE_IDX 1 +#define mmDAGB3_RDCLI11 0x018b +#define mmDAGB3_RDCLI11_BASE_IDX 1 +#define mmDAGB3_RDCLI12 0x018c +#define mmDAGB3_RDCLI12_BASE_IDX 1 +#define mmDAGB3_RDCLI13 0x018d +#define mmDAGB3_RDCLI13_BASE_IDX 1 +#define mmDAGB3_RDCLI14 0x018e +#define mmDAGB3_RDCLI14_BASE_IDX 1 +#define mmDAGB3_RDCLI15 0x018f +#define mmDAGB3_RDCLI15_BASE_IDX 1 +#define mmDAGB3_RD_CNTL 0x0190 +#define mmDAGB3_RD_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_GMI_CNTL 0x0191 +#define mmDAGB3_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_ADDR_DAGB 0x0192 +#define mmDAGB3_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB3_RD_OUTPUT_DAGB_MAX_BURST 0x0193 +#define mmDAGB3_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER 0x0194 +#define mmDAGB3_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB3_RD_CGTT_CLK_CTRL 0x0195 +#define mmDAGB3_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB3_L1TLB_RD_CGTT_CLK_CTRL 0x0196 +#define mmDAGB3_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB3_ATCVM_RD_CGTT_CLK_CTRL 0x0197 +#define mmDAGB3_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST0 0x0198 +#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER0 0x0199 +#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST1 0x019a +#define mmDAGB3_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER1 0x019b +#define mmDAGB3_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB3_RD_VC0_CNTL 0x019c +#define mmDAGB3_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_VC1_CNTL 0x019d +#define mmDAGB3_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_VC2_CNTL 0x019e +#define mmDAGB3_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_VC3_CNTL 0x019f +#define mmDAGB3_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_VC4_CNTL 0x01a0 +#define mmDAGB3_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_VC5_CNTL 0x01a1 +#define mmDAGB3_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_VC6_CNTL 0x01a2 +#define mmDAGB3_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_VC7_CNTL 0x01a3 +#define mmDAGB3_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB3_RD_CNTL_MISC 0x01a4 +#define mmDAGB3_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB3_RD_TLB_CREDIT 0x01a5 +#define mmDAGB3_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB3_RDCLI_ASK_PENDING 0x01a6 +#define mmDAGB3_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB3_RDCLI_GO_PENDING 0x01a7 +#define mmDAGB3_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB3_RDCLI_GBLSEND_PENDING 0x01a8 +#define mmDAGB3_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB3_RDCLI_TLB_PENDING 0x01a9 +#define mmDAGB3_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB3_RDCLI_OARB_PENDING 0x01aa +#define mmDAGB3_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB3_RDCLI_OSD_PENDING 0x01ab +#define mmDAGB3_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI0 0x01ac +#define mmDAGB3_WRCLI0_BASE_IDX 1 +#define mmDAGB3_WRCLI1 0x01ad +#define mmDAGB3_WRCLI1_BASE_IDX 1 +#define mmDAGB3_WRCLI2 0x01ae +#define mmDAGB3_WRCLI2_BASE_IDX 1 +#define mmDAGB3_WRCLI3 0x01af +#define mmDAGB3_WRCLI3_BASE_IDX 1 +#define mmDAGB3_WRCLI4 0x01b0 +#define mmDAGB3_WRCLI4_BASE_IDX 1 +#define mmDAGB3_WRCLI5 0x01b1 +#define mmDAGB3_WRCLI5_BASE_IDX 1 +#define mmDAGB3_WRCLI6 0x01b2 +#define mmDAGB3_WRCLI6_BASE_IDX 1 +#define mmDAGB3_WRCLI7 0x01b3 +#define mmDAGB3_WRCLI7_BASE_IDX 1 +#define mmDAGB3_WRCLI8 0x01b4 +#define mmDAGB3_WRCLI8_BASE_IDX 1 +#define mmDAGB3_WRCLI9 0x01b5 +#define mmDAGB3_WRCLI9_BASE_IDX 1 +#define mmDAGB3_WRCLI10 0x01b6 +#define mmDAGB3_WRCLI10_BASE_IDX 1 +#define mmDAGB3_WRCLI11 0x01b7 +#define mmDAGB3_WRCLI11_BASE_IDX 1 +#define mmDAGB3_WRCLI12 0x01b8 +#define mmDAGB3_WRCLI12_BASE_IDX 1 +#define mmDAGB3_WRCLI13 0x01b9 +#define mmDAGB3_WRCLI13_BASE_IDX 1 +#define mmDAGB3_WRCLI14 0x01ba +#define mmDAGB3_WRCLI14_BASE_IDX 1 +#define mmDAGB3_WRCLI15 0x01bb +#define mmDAGB3_WRCLI15_BASE_IDX 1 +#define mmDAGB3_WR_CNTL 0x01bc +#define mmDAGB3_WR_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_GMI_CNTL 0x01bd +#define mmDAGB3_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_ADDR_DAGB 0x01be +#define mmDAGB3_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB3_WR_OUTPUT_DAGB_MAX_BURST 0x01bf +#define mmDAGB3_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER 0x01c0 +#define mmDAGB3_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB3_WR_CGTT_CLK_CTRL 0x01c1 +#define mmDAGB3_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB3_L1TLB_WR_CGTT_CLK_CTRL 0x01c2 +#define mmDAGB3_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB3_ATCVM_WR_CGTT_CLK_CTRL 0x01c3 +#define mmDAGB3_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST0 0x01c4 +#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER0 0x01c5 +#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST1 0x01c6 +#define mmDAGB3_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER1 0x01c7 +#define mmDAGB3_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB3_WR_DATA_DAGB 0x01c8 +#define mmDAGB3_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB3_WR_DATA_DAGB_MAX_BURST0 0x01c9 +#define mmDAGB3_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER0 0x01ca +#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB3_WR_DATA_DAGB_MAX_BURST1 0x01cb +#define mmDAGB3_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER1 0x01cc +#define mmDAGB3_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB3_WR_VC0_CNTL 0x01cd +#define mmDAGB3_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_VC1_CNTL 0x01ce +#define mmDAGB3_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_VC2_CNTL 0x01cf +#define mmDAGB3_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_VC3_CNTL 0x01d0 +#define mmDAGB3_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_VC4_CNTL 0x01d1 +#define mmDAGB3_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_VC5_CNTL 0x01d2 +#define mmDAGB3_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_VC6_CNTL 0x01d3 +#define mmDAGB3_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_VC7_CNTL 0x01d4 +#define mmDAGB3_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB3_WR_CNTL_MISC 0x01d5 +#define mmDAGB3_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB3_WR_TLB_CREDIT 0x01d6 +#define mmDAGB3_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB3_WR_DATA_CREDIT 0x01d7 +#define mmDAGB3_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB3_WR_MISC_CREDIT 0x01d8 +#define mmDAGB3_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB3_WRCLI_ASK_PENDING 0x01dd +#define mmDAGB3_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI_GO_PENDING 0x01de +#define mmDAGB3_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI_GBLSEND_PENDING 0x01df +#define mmDAGB3_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI_TLB_PENDING 0x01e0 +#define mmDAGB3_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI_OARB_PENDING 0x01e1 +#define mmDAGB3_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI_OSD_PENDING 0x01e2 +#define mmDAGB3_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI_DBUS_ASK_PENDING 0x01e3 +#define mmDAGB3_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB3_WRCLI_DBUS_GO_PENDING 0x01e4 +#define mmDAGB3_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB3_DAGB_DLY 0x01e5 +#define mmDAGB3_DAGB_DLY_BASE_IDX 1 +#define mmDAGB3_CNTL_MISC 0x01e6 +#define mmDAGB3_CNTL_MISC_BASE_IDX 1 +#define mmDAGB3_CNTL_MISC2 0x01e7 +#define mmDAGB3_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB3_FIFO_EMPTY 0x01e8 +#define mmDAGB3_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB3_FIFO_FULL 0x01e9 +#define mmDAGB3_FIFO_FULL_BASE_IDX 1 +#define mmDAGB3_WR_CREDITS_FULL 0x01ea +#define mmDAGB3_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB3_RD_CREDITS_FULL 0x01eb +#define mmDAGB3_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB3_PERFCOUNTER_LO 0x01ec +#define mmDAGB3_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB3_PERFCOUNTER_HI 0x01ed +#define mmDAGB3_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB3_PERFCOUNTER0_CFG 0x01ee +#define mmDAGB3_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB3_PERFCOUNTER1_CFG 0x01ef +#define mmDAGB3_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB3_PERFCOUNTER2_CFG 0x01f0 +#define mmDAGB3_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB3_PERFCOUNTER_RSLT_CNTL 0x01f1 +#define mmDAGB3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB3_RESERVE0 0x01f2 +#define mmDAGB3_RESERVE0_BASE_IDX 1 +#define mmDAGB3_RESERVE1 0x01f3 +#define mmDAGB3_RESERVE1_BASE_IDX 1 +#define mmDAGB3_RESERVE2 0x01f4 +#define mmDAGB3_RESERVE2_BASE_IDX 1 +#define mmDAGB3_RESERVE3 0x01f5 +#define mmDAGB3_RESERVE3_BASE_IDX 1 +#define mmDAGB3_RESERVE4 0x01f6 +#define mmDAGB3_RESERVE4_BASE_IDX 1 +#define mmDAGB3_RESERVE5 0x01f7 +#define mmDAGB3_RESERVE5_BASE_IDX 1 +#define mmDAGB3_RESERVE6 0x01f8 +#define mmDAGB3_RESERVE6_BASE_IDX 1 +#define mmDAGB3_RESERVE7 0x01f9 +#define mmDAGB3_RESERVE7_BASE_IDX 1 +#define mmDAGB3_RESERVE8 0x01fa +#define mmDAGB3_RESERVE8_BASE_IDX 1 +#define mmDAGB3_RESERVE9 0x01fb +#define mmDAGB3_RESERVE9_BASE_IDX 1 +#define mmDAGB3_RESERVE10 0x01fc +#define mmDAGB3_RESERVE10_BASE_IDX 1 +#define mmDAGB3_RESERVE11 0x01fd +#define mmDAGB3_RESERVE11_BASE_IDX 1 +#define mmDAGB3_RESERVE12 0x01fe +#define mmDAGB3_RESERVE12_BASE_IDX 1 +#define mmDAGB3_RESERVE13 0x01ff +#define mmDAGB3_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_dagb_dagbdec4 +// base address: 0x68800 +#define mmDAGB4_RDCLI0 0x0200 +#define mmDAGB4_RDCLI0_BASE_IDX 1 +#define mmDAGB4_RDCLI1 0x0201 +#define mmDAGB4_RDCLI1_BASE_IDX 1 +#define mmDAGB4_RDCLI2 0x0202 +#define mmDAGB4_RDCLI2_BASE_IDX 1 +#define mmDAGB4_RDCLI3 0x0203 +#define mmDAGB4_RDCLI3_BASE_IDX 1 +#define mmDAGB4_RDCLI4 0x0204 +#define mmDAGB4_RDCLI4_BASE_IDX 1 +#define mmDAGB4_RDCLI5 0x0205 +#define mmDAGB4_RDCLI5_BASE_IDX 1 +#define mmDAGB4_RDCLI6 0x0206 +#define mmDAGB4_RDCLI6_BASE_IDX 1 +#define mmDAGB4_RDCLI7 0x0207 +#define mmDAGB4_RDCLI7_BASE_IDX 1 +#define mmDAGB4_RDCLI8 0x0208 +#define mmDAGB4_RDCLI8_BASE_IDX 1 +#define mmDAGB4_RDCLI9 0x0209 +#define mmDAGB4_RDCLI9_BASE_IDX 1 +#define mmDAGB4_RDCLI10 0x020a +#define mmDAGB4_RDCLI10_BASE_IDX 1 +#define mmDAGB4_RDCLI11 0x020b +#define mmDAGB4_RDCLI11_BASE_IDX 1 +#define mmDAGB4_RDCLI12 0x020c +#define mmDAGB4_RDCLI12_BASE_IDX 1 +#define mmDAGB4_RDCLI13 0x020d +#define mmDAGB4_RDCLI13_BASE_IDX 1 +#define mmDAGB4_RDCLI14 0x020e +#define mmDAGB4_RDCLI14_BASE_IDX 1 +#define mmDAGB4_RDCLI15 0x020f +#define mmDAGB4_RDCLI15_BASE_IDX 1 +#define mmDAGB4_RD_CNTL 0x0210 +#define mmDAGB4_RD_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_GMI_CNTL 0x0211 +#define mmDAGB4_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_ADDR_DAGB 0x0212 +#define mmDAGB4_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB4_RD_OUTPUT_DAGB_MAX_BURST 0x0213 +#define mmDAGB4_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER 0x0214 +#define mmDAGB4_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB4_RD_CGTT_CLK_CTRL 0x0215 +#define mmDAGB4_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB4_L1TLB_RD_CGTT_CLK_CTRL 0x0216 +#define mmDAGB4_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB4_ATCVM_RD_CGTT_CLK_CTRL 0x0217 +#define mmDAGB4_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST0 0x0218 +#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER0 0x0219 +#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST1 0x021a +#define mmDAGB4_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER1 0x021b +#define mmDAGB4_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB4_RD_VC0_CNTL 0x021c +#define mmDAGB4_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_VC1_CNTL 0x021d +#define mmDAGB4_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_VC2_CNTL 0x021e +#define mmDAGB4_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_VC3_CNTL 0x021f +#define mmDAGB4_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_VC4_CNTL 0x0220 +#define mmDAGB4_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_VC5_CNTL 0x0221 +#define mmDAGB4_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_VC6_CNTL 0x0222 +#define mmDAGB4_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_VC7_CNTL 0x0223 +#define mmDAGB4_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB4_RD_CNTL_MISC 0x0224 +#define mmDAGB4_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB4_RD_TLB_CREDIT 0x0225 +#define mmDAGB4_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB4_RDCLI_ASK_PENDING 0x0226 +#define mmDAGB4_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB4_RDCLI_GO_PENDING 0x0227 +#define mmDAGB4_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB4_RDCLI_GBLSEND_PENDING 0x0228 +#define mmDAGB4_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB4_RDCLI_TLB_PENDING 0x0229 +#define mmDAGB4_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB4_RDCLI_OARB_PENDING 0x022a +#define mmDAGB4_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB4_RDCLI_OSD_PENDING 0x022b +#define mmDAGB4_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI0 0x022c +#define mmDAGB4_WRCLI0_BASE_IDX 1 +#define mmDAGB4_WRCLI1 0x022d +#define mmDAGB4_WRCLI1_BASE_IDX 1 +#define mmDAGB4_WRCLI2 0x022e +#define mmDAGB4_WRCLI2_BASE_IDX 1 +#define mmDAGB4_WRCLI3 0x022f +#define mmDAGB4_WRCLI3_BASE_IDX 1 +#define mmDAGB4_WRCLI4 0x0230 +#define mmDAGB4_WRCLI4_BASE_IDX 1 +#define mmDAGB4_WRCLI5 0x0231 +#define mmDAGB4_WRCLI5_BASE_IDX 1 +#define mmDAGB4_WRCLI6 0x0232 +#define mmDAGB4_WRCLI6_BASE_IDX 1 +#define mmDAGB4_WRCLI7 0x0233 +#define mmDAGB4_WRCLI7_BASE_IDX 1 +#define mmDAGB4_WRCLI8 0x0234 +#define mmDAGB4_WRCLI8_BASE_IDX 1 +#define mmDAGB4_WRCLI9 0x0235 +#define mmDAGB4_WRCLI9_BASE_IDX 1 +#define mmDAGB4_WRCLI10 0x0236 +#define mmDAGB4_WRCLI10_BASE_IDX 1 +#define mmDAGB4_WRCLI11 0x0237 +#define mmDAGB4_WRCLI11_BASE_IDX 1 +#define mmDAGB4_WRCLI12 0x0238 +#define mmDAGB4_WRCLI12_BASE_IDX 1 +#define mmDAGB4_WRCLI13 0x0239 +#define mmDAGB4_WRCLI13_BASE_IDX 1 +#define mmDAGB4_WRCLI14 0x023a +#define mmDAGB4_WRCLI14_BASE_IDX 1 +#define mmDAGB4_WRCLI15 0x023b +#define mmDAGB4_WRCLI15_BASE_IDX 1 +#define mmDAGB4_WR_CNTL 0x023c +#define mmDAGB4_WR_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_GMI_CNTL 0x023d +#define mmDAGB4_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_ADDR_DAGB 0x023e +#define mmDAGB4_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB4_WR_OUTPUT_DAGB_MAX_BURST 0x023f +#define mmDAGB4_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER 0x0240 +#define mmDAGB4_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB4_WR_CGTT_CLK_CTRL 0x0241 +#define mmDAGB4_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB4_L1TLB_WR_CGTT_CLK_CTRL 0x0242 +#define mmDAGB4_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB4_ATCVM_WR_CGTT_CLK_CTRL 0x0243 +#define mmDAGB4_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST0 0x0244 +#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER0 0x0245 +#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST1 0x0246 +#define mmDAGB4_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER1 0x0247 +#define mmDAGB4_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB4_WR_DATA_DAGB 0x0248 +#define mmDAGB4_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB4_WR_DATA_DAGB_MAX_BURST0 0x0249 +#define mmDAGB4_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER0 0x024a +#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB4_WR_DATA_DAGB_MAX_BURST1 0x024b +#define mmDAGB4_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER1 0x024c +#define mmDAGB4_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB4_WR_VC0_CNTL 0x024d +#define mmDAGB4_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_VC1_CNTL 0x024e +#define mmDAGB4_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_VC2_CNTL 0x024f +#define mmDAGB4_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_VC3_CNTL 0x0250 +#define mmDAGB4_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_VC4_CNTL 0x0251 +#define mmDAGB4_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_VC5_CNTL 0x0252 +#define mmDAGB4_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_VC6_CNTL 0x0253 +#define mmDAGB4_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_VC7_CNTL 0x0254 +#define mmDAGB4_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB4_WR_CNTL_MISC 0x0255 +#define mmDAGB4_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB4_WR_TLB_CREDIT 0x0256 +#define mmDAGB4_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB4_WR_DATA_CREDIT 0x0257 +#define mmDAGB4_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB4_WR_MISC_CREDIT 0x0258 +#define mmDAGB4_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB4_WRCLI_ASK_PENDING 0x025d +#define mmDAGB4_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI_GO_PENDING 0x025e +#define mmDAGB4_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI_GBLSEND_PENDING 0x025f +#define mmDAGB4_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI_TLB_PENDING 0x0260 +#define mmDAGB4_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI_OARB_PENDING 0x0261 +#define mmDAGB4_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI_OSD_PENDING 0x0262 +#define mmDAGB4_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI_DBUS_ASK_PENDING 0x0263 +#define mmDAGB4_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB4_WRCLI_DBUS_GO_PENDING 0x0264 +#define mmDAGB4_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB4_DAGB_DLY 0x0265 +#define mmDAGB4_DAGB_DLY_BASE_IDX 1 +#define mmDAGB4_CNTL_MISC 0x0266 +#define mmDAGB4_CNTL_MISC_BASE_IDX 1 +#define mmDAGB4_CNTL_MISC2 0x0267 +#define mmDAGB4_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB4_FIFO_EMPTY 0x0268 +#define mmDAGB4_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB4_FIFO_FULL 0x0269 +#define mmDAGB4_FIFO_FULL_BASE_IDX 1 +#define mmDAGB4_WR_CREDITS_FULL 0x026a +#define mmDAGB4_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB4_RD_CREDITS_FULL 0x026b +#define mmDAGB4_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB4_PERFCOUNTER_LO 0x026c +#define mmDAGB4_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB4_PERFCOUNTER_HI 0x026d +#define mmDAGB4_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB4_PERFCOUNTER0_CFG 0x026e +#define mmDAGB4_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB4_PERFCOUNTER1_CFG 0x026f +#define mmDAGB4_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB4_PERFCOUNTER2_CFG 0x0270 +#define mmDAGB4_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB4_PERFCOUNTER_RSLT_CNTL 0x0271 +#define mmDAGB4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB4_RESERVE0 0x0272 +#define mmDAGB4_RESERVE0_BASE_IDX 1 +#define mmDAGB4_RESERVE1 0x0273 +#define mmDAGB4_RESERVE1_BASE_IDX 1 +#define mmDAGB4_RESERVE2 0x0274 +#define mmDAGB4_RESERVE2_BASE_IDX 1 +#define mmDAGB4_RESERVE3 0x0275 +#define mmDAGB4_RESERVE3_BASE_IDX 1 +#define mmDAGB4_RESERVE4 0x0276 +#define mmDAGB4_RESERVE4_BASE_IDX 1 +#define mmDAGB4_RESERVE5 0x0277 +#define mmDAGB4_RESERVE5_BASE_IDX 1 +#define mmDAGB4_RESERVE6 0x0278 +#define mmDAGB4_RESERVE6_BASE_IDX 1 +#define mmDAGB4_RESERVE7 0x0279 +#define mmDAGB4_RESERVE7_BASE_IDX 1 +#define mmDAGB4_RESERVE8 0x027a +#define mmDAGB4_RESERVE8_BASE_IDX 1 +#define mmDAGB4_RESERVE9 0x027b +#define mmDAGB4_RESERVE9_BASE_IDX 1 +#define mmDAGB4_RESERVE10 0x027c +#define mmDAGB4_RESERVE10_BASE_IDX 1 +#define mmDAGB4_RESERVE11 0x027d +#define mmDAGB4_RESERVE11_BASE_IDX 1 +#define mmDAGB4_RESERVE12 0x027e +#define mmDAGB4_RESERVE12_BASE_IDX 1 +#define mmDAGB4_RESERVE13 0x027f +#define mmDAGB4_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec0 +// base address: 0x68a00 +#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0 0x0280 +#define mmMMEA0_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1 0x0281 +#define mmMMEA0_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0 0x0282 +#define mmMMEA0_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1 0x0283 +#define mmMMEA0_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_GRP2VC_MAP 0x0284 +#define mmMMEA0_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_GRP2VC_MAP 0x0285 +#define mmMMEA0_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_LAZY 0x0286 +#define mmMMEA0_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_LAZY 0x0287 +#define mmMMEA0_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_CAM_CNTL 0x0288 +#define mmMMEA0_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_CAM_CNTL 0x0289 +#define mmMMEA0_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA0_DRAM_PAGE_BURST 0x028a +#define mmMMEA0_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_PRI_AGE 0x028b +#define mmMMEA0_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_PRI_AGE 0x028c +#define mmMMEA0_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_PRI_QUEUING 0x028d +#define mmMMEA0_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_PRI_QUEUING 0x028e +#define mmMMEA0_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_PRI_FIXED 0x028f +#define mmMMEA0_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_PRI_FIXED 0x0290 +#define mmMMEA0_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_PRI_URGENCY 0x0291 +#define mmMMEA0_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_PRI_URGENCY 0x0292 +#define mmMMEA0_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1 0x0293 +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2 0x0294 +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3 0x0295 +#define mmMMEA0_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1 0x0296 +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2 0x0297 +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3 0x0298 +#define mmMMEA0_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA0_GMI_RD_CLI2GRP_MAP0 0x0299 +#define mmMMEA0_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA0_GMI_RD_CLI2GRP_MAP1 0x029a +#define mmMMEA0_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA0_GMI_WR_CLI2GRP_MAP0 0x029b +#define mmMMEA0_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA0_GMI_WR_CLI2GRP_MAP1 0x029c +#define mmMMEA0_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA0_GMI_RD_GRP2VC_MAP 0x029d +#define mmMMEA0_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA0_GMI_WR_GRP2VC_MAP 0x029e +#define mmMMEA0_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA0_GMI_RD_LAZY 0x029f +#define mmMMEA0_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA0_GMI_WR_LAZY 0x02a0 +#define mmMMEA0_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA0_GMI_RD_CAM_CNTL 0x02a1 +#define mmMMEA0_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA0_GMI_WR_CAM_CNTL 0x02a2 +#define mmMMEA0_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA0_GMI_PAGE_BURST 0x02a3 +#define mmMMEA0_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_AGE 0x02a4 +#define mmMMEA0_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_AGE 0x02a5 +#define mmMMEA0_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_QUEUING 0x02a6 +#define mmMMEA0_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_QUEUING 0x02a7 +#define mmMMEA0_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_FIXED 0x02a8 +#define mmMMEA0_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_FIXED 0x02a9 +#define mmMMEA0_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_URGENCY 0x02aa +#define mmMMEA0_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_URGENCY 0x02ab +#define mmMMEA0_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_URGENCY_MASKING 0x02ac +#define mmMMEA0_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_URGENCY_MASKING 0x02ad +#define mmMMEA0_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI1 0x02ae +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI2 0x02af +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI3 0x02b0 +#define mmMMEA0_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI1 0x02b1 +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI2 0x02b2 +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI3 0x02b3 +#define mmMMEA0_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_BASE_ADDR0 0x02b4 +#define mmMMEA0_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR0 0x02b5 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_BASE_ADDR1 0x02b6 +#define mmMMEA0_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR1 0x02b7 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR1 0x02b8 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_BASE_ADDR2 0x02b9 +#define mmMMEA0_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR2 0x02ba +#define mmMMEA0_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_BASE_ADDR3 0x02bb +#define mmMMEA0_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR3 0x02bc +#define mmMMEA0_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR3 0x02bd +#define mmMMEA0_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_BASE_ADDR4 0x02be +#define mmMMEA0_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR4 0x02bf +#define mmMMEA0_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_BASE_ADDR5 0x02c0 +#define mmMMEA0_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR5 0x02c1 +#define mmMMEA0_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR5 0x02c2 +#define mmMMEA0_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL 0x02c3 +#define mmMMEA0_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA0_ADDRNORMGMI_HOLE_CNTL 0x02c4 +#define mmMMEA0_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x02c5 +#define mmMMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG 0x02c6 +#define mmMMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA0_ADDRDEC_BANK_CFG 0x02c7 +#define mmMMEA0_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA0_ADDRDEC_MISC_CFG 0x02c8 +#define mmMMEA0_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0 0x02c9 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1 0x02ca +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2 0x02cb +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3 0x02cc +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4 0x02cd +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK5 0x02ce +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC 0x02cf +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2 0x02d0 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0 0x02d1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1 0x02d2 +#define mmMMEA0_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE 0x02d3 +#define mmMMEA0_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK0 0x02d4 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK1 0x02d5 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK2 0x02d6 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK3 0x02d7 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK4 0x02d8 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK5 0x02d9 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC 0x02da +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC2 0x02db +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS0 0x02dc +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS1 0x02dd +#define mmMMEA0_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA0_ADDRDECGMI_HARVEST_ENABLE 0x02de +#define mmMMEA0_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0 0x02df +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1 0x02e0 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2 0x02e1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3 0x02e2 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0 0x02e3 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1 0x02e4 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2 0x02e5 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3 0x02e6 +#define mmMMEA0_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01 0x02e7 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23 0x02e8 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01 0x02e9 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23 0x02ea +#define mmMMEA0_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01 0x02eb +#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23 0x02ec +#define mmMMEA0_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01 0x02ed +#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23 0x02ee +#define mmMMEA0_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS01 0x02ef +#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS23 0x02f0 +#define mmMMEA0_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01 0x02f1 +#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23 0x02f2 +#define mmMMEA0_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01 0x02f3 +#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23 0x02f4 +#define mmMMEA0_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_RM_SEL_CS01 0x02f5 +#define mmMMEA0_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_RM_SEL_CS23 0x02f6 +#define mmMMEA0_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01 0x02f7 +#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23 0x02f8 +#define mmMMEA0_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0 0x02f9 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1 0x02fa +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2 0x02fb +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3 0x02fc +#define mmMMEA0_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0 0x02fd +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1 0x02fe +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2 0x02ff +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3 0x0300 +#define mmMMEA0_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01 0x0301 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23 0x0302 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01 0x0303 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23 0x0304 +#define mmMMEA0_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01 0x0305 +#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23 0x0306 +#define mmMMEA0_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01 0x0307 +#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23 0x0308 +#define mmMMEA0_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS01 0x0309 +#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS23 0x030a +#define mmMMEA0_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01 0x030b +#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23 0x030c +#define mmMMEA0_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01 0x030d +#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23 0x030e +#define mmMMEA0_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_RM_SEL_CS01 0x030f +#define mmMMEA0_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_RM_SEL_CS23 0x0310 +#define mmMMEA0_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01 0x0311 +#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23 0x0312 +#define mmMMEA0_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS0 0x0313 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS1 0x0314 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS2 0x0315 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS3 0x0316 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS0 0x0317 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS1 0x0318 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS2 0x0319 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS3 0x031a +#define mmMMEA0_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS01 0x031b +#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS23 0x031c +#define mmMMEA0_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS01 0x031d +#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS23 0x031e +#define mmMMEA0_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS01 0x031f +#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS23 0x0320 +#define mmMMEA0_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS01 0x0321 +#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS23 0x0322 +#define mmMMEA0_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS01 0x0323 +#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS23 0x0324 +#define mmMMEA0_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS01 0x0325 +#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS23 0x0326 +#define mmMMEA0_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS01 0x0327 +#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS23 0x0328 +#define mmMMEA0_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_RM_SEL_CS01 0x0329 +#define mmMMEA0_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_RM_SEL_CS23 0x032a +#define mmMMEA0_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS01 0x032b +#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS23 0x032c +#define mmMMEA0_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA0_ADDRNORMDRAM_GLOBAL_CNTL 0x032d +#define mmMMEA0_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA0_ADDRNORMGMI_GLOBAL_CNTL 0x032e +#define mmMMEA0_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA0_IO_RD_CLI2GRP_MAP0 0x0355 +#define mmMMEA0_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA0_IO_RD_CLI2GRP_MAP1 0x0356 +#define mmMMEA0_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA0_IO_WR_CLI2GRP_MAP0 0x0357 +#define mmMMEA0_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA0_IO_WR_CLI2GRP_MAP1 0x0358 +#define mmMMEA0_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA0_IO_RD_COMBINE_FLUSH 0x0359 +#define mmMMEA0_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA0_IO_WR_COMBINE_FLUSH 0x035a +#define mmMMEA0_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA0_IO_GROUP_BURST 0x035b +#define mmMMEA0_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_AGE 0x035c +#define mmMMEA0_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_AGE 0x035d +#define mmMMEA0_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_QUEUING 0x035e +#define mmMMEA0_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_QUEUING 0x035f +#define mmMMEA0_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_FIXED 0x0360 +#define mmMMEA0_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_FIXED 0x0361 +#define mmMMEA0_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_URGENCY 0x0362 +#define mmMMEA0_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_URGENCY 0x0363 +#define mmMMEA0_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_URGENCY_MASKING 0x0364 +#define mmMMEA0_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_URGENCY_MASKING 0x0365 +#define mmMMEA0_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_QUANT_PRI1 0x0366 +#define mmMMEA0_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_QUANT_PRI2 0x0367 +#define mmMMEA0_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA0_IO_RD_PRI_QUANT_PRI3 0x0368 +#define mmMMEA0_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_QUANT_PRI1 0x0369 +#define mmMMEA0_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_QUANT_PRI2 0x036a +#define mmMMEA0_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA0_IO_WR_PRI_QUANT_PRI3 0x036b +#define mmMMEA0_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA0_SDP_ARB_DRAM 0x036c +#define mmMMEA0_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA0_SDP_ARB_GMI 0x036d +#define mmMMEA0_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA0_SDP_ARB_FINAL 0x036e +#define mmMMEA0_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA0_SDP_DRAM_PRIORITY 0x036f +#define mmMMEA0_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA0_SDP_GMI_PRIORITY 0x0370 +#define mmMMEA0_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA0_SDP_IO_PRIORITY 0x0371 +#define mmMMEA0_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA0_SDP_CREDITS 0x0372 +#define mmMMEA0_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA0_SDP_TAG_RESERVE0 0x0373 +#define mmMMEA0_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA0_SDP_TAG_RESERVE1 0x0374 +#define mmMMEA0_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA0_SDP_VCC_RESERVE0 0x0375 +#define mmMMEA0_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA0_SDP_VCC_RESERVE1 0x0376 +#define mmMMEA0_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA0_SDP_VCD_RESERVE0 0x0377 +#define mmMMEA0_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA0_SDP_VCD_RESERVE1 0x0378 +#define mmMMEA0_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA0_SDP_REQ_CNTL 0x0379 +#define mmMMEA0_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA0_MISC 0x037a +#define mmMMEA0_MISC_BASE_IDX 1 +#define mmMMEA0_LATENCY_SAMPLING 0x037b +#define mmMMEA0_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA0_PERFCOUNTER_LO 0x037c +#define mmMMEA0_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA0_PERFCOUNTER_HI 0x037d +#define mmMMEA0_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA0_PERFCOUNTER0_CFG 0x037e +#define mmMMEA0_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA0_PERFCOUNTER1_CFG 0x037f +#define mmMMEA0_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA0_PERFCOUNTER_RSLT_CNTL 0x0380 +#define mmMMEA0_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA0_EDC_CNT 0x0386 +#define mmMMEA0_EDC_CNT_BASE_IDX 1 +#define mmMMEA0_EDC_CNT2 0x0387 +#define mmMMEA0_EDC_CNT2_BASE_IDX 1 +#define mmMMEA0_DSM_CNTL 0x0388 +#define mmMMEA0_DSM_CNTL_BASE_IDX 1 +#define mmMMEA0_DSM_CNTLA 0x0389 +#define mmMMEA0_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA0_DSM_CNTLB 0x038a +#define mmMMEA0_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA0_DSM_CNTL2 0x038b +#define mmMMEA0_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA0_DSM_CNTL2A 0x038c +#define mmMMEA0_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA0_DSM_CNTL2B 0x038d +#define mmMMEA0_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA0_CGTT_CLK_CTRL 0x038f +#define mmMMEA0_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA0_EDC_MODE 0x0390 +#define mmMMEA0_EDC_MODE_BASE_IDX 1 +#define mmMMEA0_ERR_STATUS 0x0391 +#define mmMMEA0_ERR_STATUS_BASE_IDX 1 +#define mmMMEA0_MISC2 0x0392 +#define mmMMEA0_MISC2_BASE_IDX 1 +#define mmMMEA0_ADDRDEC_SELECT 0x0393 +#define mmMMEA0_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA0_EDC_CNT3 0x0394 +#define mmMMEA0_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec1 +// base address: 0x68f00 +#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0 0x03c0 +#define mmMMEA1_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1 0x03c1 +#define mmMMEA1_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0 0x03c2 +#define mmMMEA1_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1 0x03c3 +#define mmMMEA1_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_GRP2VC_MAP 0x03c4 +#define mmMMEA1_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_GRP2VC_MAP 0x03c5 +#define mmMMEA1_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_LAZY 0x03c6 +#define mmMMEA1_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_LAZY 0x03c7 +#define mmMMEA1_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_CAM_CNTL 0x03c8 +#define mmMMEA1_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_CAM_CNTL 0x03c9 +#define mmMMEA1_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA1_DRAM_PAGE_BURST 0x03ca +#define mmMMEA1_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_PRI_AGE 0x03cb +#define mmMMEA1_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_PRI_AGE 0x03cc +#define mmMMEA1_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_PRI_QUEUING 0x03cd +#define mmMMEA1_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_PRI_QUEUING 0x03ce +#define mmMMEA1_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_PRI_FIXED 0x03cf +#define mmMMEA1_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_PRI_FIXED 0x03d0 +#define mmMMEA1_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_PRI_URGENCY 0x03d1 +#define mmMMEA1_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_PRI_URGENCY 0x03d2 +#define mmMMEA1_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1 0x03d3 +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2 0x03d4 +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3 0x03d5 +#define mmMMEA1_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1 0x03d6 +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2 0x03d7 +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3 0x03d8 +#define mmMMEA1_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA1_GMI_RD_CLI2GRP_MAP0 0x03d9 +#define mmMMEA1_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA1_GMI_RD_CLI2GRP_MAP1 0x03da +#define mmMMEA1_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA1_GMI_WR_CLI2GRP_MAP0 0x03db +#define mmMMEA1_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA1_GMI_WR_CLI2GRP_MAP1 0x03dc +#define mmMMEA1_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA1_GMI_RD_GRP2VC_MAP 0x03dd +#define mmMMEA1_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA1_GMI_WR_GRP2VC_MAP 0x03de +#define mmMMEA1_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA1_GMI_RD_LAZY 0x03df +#define mmMMEA1_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA1_GMI_WR_LAZY 0x03e0 +#define mmMMEA1_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA1_GMI_RD_CAM_CNTL 0x03e1 +#define mmMMEA1_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA1_GMI_WR_CAM_CNTL 0x03e2 +#define mmMMEA1_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA1_GMI_PAGE_BURST 0x03e3 +#define mmMMEA1_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_AGE 0x03e4 +#define mmMMEA1_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_AGE 0x03e5 +#define mmMMEA1_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_QUEUING 0x03e6 +#define mmMMEA1_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_QUEUING 0x03e7 +#define mmMMEA1_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_FIXED 0x03e8 +#define mmMMEA1_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_FIXED 0x03e9 +#define mmMMEA1_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_URGENCY 0x03ea +#define mmMMEA1_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_URGENCY 0x03eb +#define mmMMEA1_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_URGENCY_MASKING 0x03ec +#define mmMMEA1_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_URGENCY_MASKING 0x03ed +#define mmMMEA1_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI1 0x03ee +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI2 0x03ef +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI3 0x03f0 +#define mmMMEA1_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI1 0x03f1 +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI2 0x03f2 +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI3 0x03f3 +#define mmMMEA1_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_BASE_ADDR0 0x03f4 +#define mmMMEA1_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR0 0x03f5 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_BASE_ADDR1 0x03f6 +#define mmMMEA1_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR1 0x03f7 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR1 0x03f8 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_BASE_ADDR2 0x03f9 +#define mmMMEA1_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR2 0x03fa +#define mmMMEA1_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_BASE_ADDR3 0x03fb +#define mmMMEA1_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR3 0x03fc +#define mmMMEA1_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR3 0x03fd +#define mmMMEA1_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_BASE_ADDR4 0x03fe +#define mmMMEA1_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR4 0x03ff +#define mmMMEA1_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_BASE_ADDR5 0x0400 +#define mmMMEA1_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR5 0x0401 +#define mmMMEA1_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR5 0x0402 +#define mmMMEA1_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL 0x0403 +#define mmMMEA1_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA1_ADDRNORMGMI_HOLE_CNTL 0x0404 +#define mmMMEA1_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x0405 +#define mmMMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG 0x0406 +#define mmMMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA1_ADDRDEC_BANK_CFG 0x0407 +#define mmMMEA1_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA1_ADDRDEC_MISC_CFG 0x0408 +#define mmMMEA1_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0 0x0409 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1 0x040a +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2 0x040b +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3 0x040c +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4 0x040d +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK5 0x040e +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC 0x040f +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2 0x0410 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0 0x0411 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1 0x0412 +#define mmMMEA1_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE 0x0413 +#define mmMMEA1_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK0 0x0414 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK1 0x0415 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK2 0x0416 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK3 0x0417 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK4 0x0418 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK5 0x0419 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC 0x041a +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC2 0x041b +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS0 0x041c +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS1 0x041d +#define mmMMEA1_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA1_ADDRDECGMI_HARVEST_ENABLE 0x041e +#define mmMMEA1_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0 0x041f +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1 0x0420 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2 0x0421 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3 0x0422 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0 0x0423 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1 0x0424 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2 0x0425 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3 0x0426 +#define mmMMEA1_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01 0x0427 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23 0x0428 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01 0x0429 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23 0x042a +#define mmMMEA1_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01 0x042b +#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23 0x042c +#define mmMMEA1_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01 0x042d +#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23 0x042e +#define mmMMEA1_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS01 0x042f +#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS23 0x0430 +#define mmMMEA1_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01 0x0431 +#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23 0x0432 +#define mmMMEA1_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01 0x0433 +#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23 0x0434 +#define mmMMEA1_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_RM_SEL_CS01 0x0435 +#define mmMMEA1_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_RM_SEL_CS23 0x0436 +#define mmMMEA1_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01 0x0437 +#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23 0x0438 +#define mmMMEA1_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0 0x0439 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1 0x043a +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2 0x043b +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3 0x043c +#define mmMMEA1_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0 0x043d +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1 0x043e +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2 0x043f +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3 0x0440 +#define mmMMEA1_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01 0x0441 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23 0x0442 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01 0x0443 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23 0x0444 +#define mmMMEA1_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01 0x0445 +#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23 0x0446 +#define mmMMEA1_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01 0x0447 +#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23 0x0448 +#define mmMMEA1_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS01 0x0449 +#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS23 0x044a +#define mmMMEA1_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01 0x044b +#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23 0x044c +#define mmMMEA1_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01 0x044d +#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23 0x044e +#define mmMMEA1_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_RM_SEL_CS01 0x044f +#define mmMMEA1_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_RM_SEL_CS23 0x0450 +#define mmMMEA1_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01 0x0451 +#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23 0x0452 +#define mmMMEA1_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS0 0x0453 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS1 0x0454 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS2 0x0455 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS3 0x0456 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS0 0x0457 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS1 0x0458 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS2 0x0459 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS3 0x045a +#define mmMMEA1_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS01 0x045b +#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS23 0x045c +#define mmMMEA1_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS01 0x045d +#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS23 0x045e +#define mmMMEA1_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS01 0x045f +#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS23 0x0460 +#define mmMMEA1_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS01 0x0461 +#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS23 0x0462 +#define mmMMEA1_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS01 0x0463 +#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS23 0x0464 +#define mmMMEA1_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS01 0x0465 +#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS23 0x0466 +#define mmMMEA1_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS01 0x0467 +#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS23 0x0468 +#define mmMMEA1_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_RM_SEL_CS01 0x0469 +#define mmMMEA1_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_RM_SEL_CS23 0x046a +#define mmMMEA1_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS01 0x046b +#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS23 0x046c +#define mmMMEA1_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA1_ADDRNORMDRAM_GLOBAL_CNTL 0x046d +#define mmMMEA1_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA1_ADDRNORMGMI_GLOBAL_CNTL 0x046e +#define mmMMEA1_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA1_IO_RD_CLI2GRP_MAP0 0x0495 +#define mmMMEA1_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA1_IO_RD_CLI2GRP_MAP1 0x0496 +#define mmMMEA1_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA1_IO_WR_CLI2GRP_MAP0 0x0497 +#define mmMMEA1_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA1_IO_WR_CLI2GRP_MAP1 0x0498 +#define mmMMEA1_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA1_IO_RD_COMBINE_FLUSH 0x0499 +#define mmMMEA1_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA1_IO_WR_COMBINE_FLUSH 0x049a +#define mmMMEA1_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA1_IO_GROUP_BURST 0x049b +#define mmMMEA1_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_AGE 0x049c +#define mmMMEA1_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_AGE 0x049d +#define mmMMEA1_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_QUEUING 0x049e +#define mmMMEA1_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_QUEUING 0x049f +#define mmMMEA1_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_FIXED 0x04a0 +#define mmMMEA1_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_FIXED 0x04a1 +#define mmMMEA1_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_URGENCY 0x04a2 +#define mmMMEA1_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_URGENCY 0x04a3 +#define mmMMEA1_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_URGENCY_MASKING 0x04a4 +#define mmMMEA1_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_URGENCY_MASKING 0x04a5 +#define mmMMEA1_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_QUANT_PRI1 0x04a6 +#define mmMMEA1_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_QUANT_PRI2 0x04a7 +#define mmMMEA1_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA1_IO_RD_PRI_QUANT_PRI3 0x04a8 +#define mmMMEA1_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_QUANT_PRI1 0x04a9 +#define mmMMEA1_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_QUANT_PRI2 0x04aa +#define mmMMEA1_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA1_IO_WR_PRI_QUANT_PRI3 0x04ab +#define mmMMEA1_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA1_SDP_ARB_DRAM 0x04ac +#define mmMMEA1_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA1_SDP_ARB_GMI 0x04ad +#define mmMMEA1_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA1_SDP_ARB_FINAL 0x04ae +#define mmMMEA1_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA1_SDP_DRAM_PRIORITY 0x04af +#define mmMMEA1_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA1_SDP_GMI_PRIORITY 0x04b0 +#define mmMMEA1_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA1_SDP_IO_PRIORITY 0x04b1 +#define mmMMEA1_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA1_SDP_CREDITS 0x04b2 +#define mmMMEA1_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA1_SDP_TAG_RESERVE0 0x04b3 +#define mmMMEA1_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA1_SDP_TAG_RESERVE1 0x04b4 +#define mmMMEA1_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA1_SDP_VCC_RESERVE0 0x04b5 +#define mmMMEA1_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA1_SDP_VCC_RESERVE1 0x04b6 +#define mmMMEA1_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA1_SDP_VCD_RESERVE0 0x04b7 +#define mmMMEA1_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA1_SDP_VCD_RESERVE1 0x04b8 +#define mmMMEA1_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA1_SDP_REQ_CNTL 0x04b9 +#define mmMMEA1_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA1_MISC 0x04ba +#define mmMMEA1_MISC_BASE_IDX 1 +#define mmMMEA1_LATENCY_SAMPLING 0x04bb +#define mmMMEA1_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA1_PERFCOUNTER_LO 0x04bc +#define mmMMEA1_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA1_PERFCOUNTER_HI 0x04bd +#define mmMMEA1_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA1_PERFCOUNTER0_CFG 0x04be +#define mmMMEA1_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA1_PERFCOUNTER1_CFG 0x04bf +#define mmMMEA1_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA1_PERFCOUNTER_RSLT_CNTL 0x04c0 +#define mmMMEA1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA1_EDC_CNT 0x04c6 +#define mmMMEA1_EDC_CNT_BASE_IDX 1 +#define mmMMEA1_EDC_CNT2 0x04c7 +#define mmMMEA1_EDC_CNT2_BASE_IDX 1 +#define mmMMEA1_DSM_CNTL 0x04c8 +#define mmMMEA1_DSM_CNTL_BASE_IDX 1 +#define mmMMEA1_DSM_CNTLA 0x04c9 +#define mmMMEA1_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA1_DSM_CNTLB 0x04ca +#define mmMMEA1_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA1_DSM_CNTL2 0x04cb +#define mmMMEA1_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA1_DSM_CNTL2A 0x04cc +#define mmMMEA1_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA1_DSM_CNTL2B 0x04cd +#define mmMMEA1_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA1_CGTT_CLK_CTRL 0x04cf +#define mmMMEA1_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA1_EDC_MODE 0x04d0 +#define mmMMEA1_EDC_MODE_BASE_IDX 1 +#define mmMMEA1_ERR_STATUS 0x04d1 +#define mmMMEA1_ERR_STATUS_BASE_IDX 1 +#define mmMMEA1_MISC2 0x04d2 +#define mmMMEA1_MISC2_BASE_IDX 1 +#define mmMMEA1_ADDRDEC_SELECT 0x04d3 +#define mmMMEA1_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA1_EDC_CNT3 0x04d4 +#define mmMMEA1_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec2 +// base address: 0x69400 +#define mmMMEA2_DRAM_RD_CLI2GRP_MAP0 0x0500 +#define mmMMEA2_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_CLI2GRP_MAP1 0x0501 +#define mmMMEA2_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_CLI2GRP_MAP0 0x0502 +#define mmMMEA2_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_CLI2GRP_MAP1 0x0503 +#define mmMMEA2_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_GRP2VC_MAP 0x0504 +#define mmMMEA2_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_GRP2VC_MAP 0x0505 +#define mmMMEA2_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_LAZY 0x0506 +#define mmMMEA2_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_LAZY 0x0507 +#define mmMMEA2_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_CAM_CNTL 0x0508 +#define mmMMEA2_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_CAM_CNTL 0x0509 +#define mmMMEA2_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA2_DRAM_PAGE_BURST 0x050a +#define mmMMEA2_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_PRI_AGE 0x050b +#define mmMMEA2_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_PRI_AGE 0x050c +#define mmMMEA2_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_PRI_QUEUING 0x050d +#define mmMMEA2_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_PRI_QUEUING 0x050e +#define mmMMEA2_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_PRI_FIXED 0x050f +#define mmMMEA2_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_PRI_FIXED 0x0510 +#define mmMMEA2_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_PRI_URGENCY 0x0511 +#define mmMMEA2_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_PRI_URGENCY 0x0512 +#define mmMMEA2_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI1 0x0513 +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI2 0x0514 +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI3 0x0515 +#define mmMMEA2_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI1 0x0516 +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI2 0x0517 +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI3 0x0518 +#define mmMMEA2_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA2_GMI_RD_CLI2GRP_MAP0 0x0519 +#define mmMMEA2_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA2_GMI_RD_CLI2GRP_MAP1 0x051a +#define mmMMEA2_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA2_GMI_WR_CLI2GRP_MAP0 0x051b +#define mmMMEA2_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA2_GMI_WR_CLI2GRP_MAP1 0x051c +#define mmMMEA2_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA2_GMI_RD_GRP2VC_MAP 0x051d +#define mmMMEA2_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA2_GMI_WR_GRP2VC_MAP 0x051e +#define mmMMEA2_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA2_GMI_RD_LAZY 0x051f +#define mmMMEA2_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA2_GMI_WR_LAZY 0x0520 +#define mmMMEA2_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA2_GMI_RD_CAM_CNTL 0x0521 +#define mmMMEA2_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA2_GMI_WR_CAM_CNTL 0x0522 +#define mmMMEA2_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA2_GMI_PAGE_BURST 0x0523 +#define mmMMEA2_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_AGE 0x0524 +#define mmMMEA2_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_AGE 0x0525 +#define mmMMEA2_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_QUEUING 0x0526 +#define mmMMEA2_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_QUEUING 0x0527 +#define mmMMEA2_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_FIXED 0x0528 +#define mmMMEA2_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_FIXED 0x0529 +#define mmMMEA2_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_URGENCY 0x052a +#define mmMMEA2_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_URGENCY 0x052b +#define mmMMEA2_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_URGENCY_MASKING 0x052c +#define mmMMEA2_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_URGENCY_MASKING 0x052d +#define mmMMEA2_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI1 0x052e +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI2 0x052f +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI3 0x0530 +#define mmMMEA2_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI1 0x0531 +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI2 0x0532 +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI3 0x0533 +#define mmMMEA2_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_BASE_ADDR0 0x0534 +#define mmMMEA2_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR0 0x0535 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_BASE_ADDR1 0x0536 +#define mmMMEA2_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR1 0x0537 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR1 0x0538 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_BASE_ADDR2 0x0539 +#define mmMMEA2_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR2 0x053a +#define mmMMEA2_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_BASE_ADDR3 0x053b +#define mmMMEA2_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR3 0x053c +#define mmMMEA2_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR3 0x053d +#define mmMMEA2_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_BASE_ADDR4 0x053e +#define mmMMEA2_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR4 0x053f +#define mmMMEA2_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_BASE_ADDR5 0x0540 +#define mmMMEA2_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR5 0x0541 +#define mmMMEA2_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR5 0x0542 +#define mmMMEA2_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA2_ADDRNORMDRAM_HOLE_CNTL 0x0543 +#define mmMMEA2_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA2_ADDRNORMGMI_HOLE_CNTL 0x0544 +#define mmMMEA2_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x0545 +#define mmMMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG 0x0546 +#define mmMMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA2_ADDRDEC_BANK_CFG 0x0547 +#define mmMMEA2_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA2_ADDRDEC_MISC_CFG 0x0548 +#define mmMMEA2_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK0 0x0549 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK1 0x054a +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK2 0x054b +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK3 0x054c +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK4 0x054d +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK5 0x054e +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC 0x054f +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC2 0x0550 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS0 0x0551 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS1 0x0552 +#define mmMMEA2_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA2_ADDRDECDRAM_HARVEST_ENABLE 0x0553 +#define mmMMEA2_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK0 0x0554 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK1 0x0555 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK2 0x0556 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK3 0x0557 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK4 0x0558 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK5 0x0559 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC 0x055a +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC2 0x055b +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS0 0x055c +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS1 0x055d +#define mmMMEA2_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA2_ADDRDECGMI_HARVEST_ENABLE 0x055e +#define mmMMEA2_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS0 0x055f +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS1 0x0560 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS2 0x0561 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS3 0x0562 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS0 0x0563 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS1 0x0564 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS2 0x0565 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS3 0x0566 +#define mmMMEA2_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS01 0x0567 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS23 0x0568 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS01 0x0569 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS23 0x056a +#define mmMMEA2_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS01 0x056b +#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS23 0x056c +#define mmMMEA2_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS01 0x056d +#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS23 0x056e +#define mmMMEA2_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS01 0x056f +#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS23 0x0570 +#define mmMMEA2_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS01 0x0571 +#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS23 0x0572 +#define mmMMEA2_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS01 0x0573 +#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS23 0x0574 +#define mmMMEA2_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_RM_SEL_CS01 0x0575 +#define mmMMEA2_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_RM_SEL_CS23 0x0576 +#define mmMMEA2_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS01 0x0577 +#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS23 0x0578 +#define mmMMEA2_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS0 0x0579 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS1 0x057a +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS2 0x057b +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS3 0x057c +#define mmMMEA2_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS0 0x057d +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS1 0x057e +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS2 0x057f +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS3 0x0580 +#define mmMMEA2_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS01 0x0581 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS23 0x0582 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS01 0x0583 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS23 0x0584 +#define mmMMEA2_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS01 0x0585 +#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS23 0x0586 +#define mmMMEA2_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS01 0x0587 +#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS23 0x0588 +#define mmMMEA2_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS01 0x0589 +#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS23 0x058a +#define mmMMEA2_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS01 0x058b +#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS23 0x058c +#define mmMMEA2_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS01 0x058d +#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS23 0x058e +#define mmMMEA2_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_RM_SEL_CS01 0x058f +#define mmMMEA2_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_RM_SEL_CS23 0x0590 +#define mmMMEA2_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS01 0x0591 +#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS23 0x0592 +#define mmMMEA2_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS0 0x0593 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS1 0x0594 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS2 0x0595 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS3 0x0596 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS0 0x0597 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS1 0x0598 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS2 0x0599 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS3 0x059a +#define mmMMEA2_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS01 0x059b +#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS23 0x059c +#define mmMMEA2_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS01 0x059d +#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS23 0x059e +#define mmMMEA2_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS01 0x059f +#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS23 0x05a0 +#define mmMMEA2_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS01 0x05a1 +#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS23 0x05a2 +#define mmMMEA2_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS01 0x05a3 +#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS23 0x05a4 +#define mmMMEA2_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS01 0x05a5 +#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS23 0x05a6 +#define mmMMEA2_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS01 0x05a7 +#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS23 0x05a8 +#define mmMMEA2_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_RM_SEL_CS01 0x05a9 +#define mmMMEA2_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_RM_SEL_CS23 0x05aa +#define mmMMEA2_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS01 0x05ab +#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS23 0x05ac +#define mmMMEA2_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA2_ADDRNORMDRAM_GLOBAL_CNTL 0x05ad +#define mmMMEA2_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA2_ADDRNORMGMI_GLOBAL_CNTL 0x05ae +#define mmMMEA2_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA2_IO_RD_CLI2GRP_MAP0 0x05d5 +#define mmMMEA2_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA2_IO_RD_CLI2GRP_MAP1 0x05d6 +#define mmMMEA2_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA2_IO_WR_CLI2GRP_MAP0 0x05d7 +#define mmMMEA2_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA2_IO_WR_CLI2GRP_MAP1 0x05d8 +#define mmMMEA2_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA2_IO_RD_COMBINE_FLUSH 0x05d9 +#define mmMMEA2_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA2_IO_WR_COMBINE_FLUSH 0x05da +#define mmMMEA2_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA2_IO_GROUP_BURST 0x05db +#define mmMMEA2_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_AGE 0x05dc +#define mmMMEA2_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_AGE 0x05dd +#define mmMMEA2_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_QUEUING 0x05de +#define mmMMEA2_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_QUEUING 0x05df +#define mmMMEA2_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_FIXED 0x05e0 +#define mmMMEA2_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_FIXED 0x05e1 +#define mmMMEA2_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_URGENCY 0x05e2 +#define mmMMEA2_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_URGENCY 0x05e3 +#define mmMMEA2_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_URGENCY_MASKING 0x05e4 +#define mmMMEA2_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_URGENCY_MASKING 0x05e5 +#define mmMMEA2_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_QUANT_PRI1 0x05e6 +#define mmMMEA2_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_QUANT_PRI2 0x05e7 +#define mmMMEA2_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA2_IO_RD_PRI_QUANT_PRI3 0x05e8 +#define mmMMEA2_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_QUANT_PRI1 0x05e9 +#define mmMMEA2_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_QUANT_PRI2 0x05ea +#define mmMMEA2_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA2_IO_WR_PRI_QUANT_PRI3 0x05eb +#define mmMMEA2_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA2_SDP_ARB_DRAM 0x05ec +#define mmMMEA2_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA2_SDP_ARB_GMI 0x05ed +#define mmMMEA2_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA2_SDP_ARB_FINAL 0x05ee +#define mmMMEA2_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA2_SDP_DRAM_PRIORITY 0x05ef +#define mmMMEA2_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA2_SDP_GMI_PRIORITY 0x05f0 +#define mmMMEA2_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA2_SDP_IO_PRIORITY 0x05f1 +#define mmMMEA2_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA2_SDP_CREDITS 0x05f2 +#define mmMMEA2_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA2_SDP_TAG_RESERVE0 0x05f3 +#define mmMMEA2_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA2_SDP_TAG_RESERVE1 0x05f4 +#define mmMMEA2_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA2_SDP_VCC_RESERVE0 0x05f5 +#define mmMMEA2_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA2_SDP_VCC_RESERVE1 0x05f6 +#define mmMMEA2_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA2_SDP_VCD_RESERVE0 0x05f7 +#define mmMMEA2_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA2_SDP_VCD_RESERVE1 0x05f8 +#define mmMMEA2_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA2_SDP_REQ_CNTL 0x05f9 +#define mmMMEA2_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA2_MISC 0x05fa +#define mmMMEA2_MISC_BASE_IDX 1 +#define mmMMEA2_LATENCY_SAMPLING 0x05fb +#define mmMMEA2_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA2_PERFCOUNTER_LO 0x05fc +#define mmMMEA2_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA2_PERFCOUNTER_HI 0x05fd +#define mmMMEA2_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA2_PERFCOUNTER0_CFG 0x05fe +#define mmMMEA2_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA2_PERFCOUNTER1_CFG 0x05ff +#define mmMMEA2_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA2_PERFCOUNTER_RSLT_CNTL 0x0600 +#define mmMMEA2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA2_EDC_CNT 0x0606 +#define mmMMEA2_EDC_CNT_BASE_IDX 1 +#define mmMMEA2_EDC_CNT2 0x0607 +#define mmMMEA2_EDC_CNT2_BASE_IDX 1 +#define mmMMEA2_DSM_CNTL 0x0608 +#define mmMMEA2_DSM_CNTL_BASE_IDX 1 +#define mmMMEA2_DSM_CNTLA 0x0609 +#define mmMMEA2_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA2_DSM_CNTLB 0x060a +#define mmMMEA2_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA2_DSM_CNTL2 0x060b +#define mmMMEA2_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA2_DSM_CNTL2A 0x060c +#define mmMMEA2_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA2_DSM_CNTL2B 0x060d +#define mmMMEA2_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA2_CGTT_CLK_CTRL 0x060f +#define mmMMEA2_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA2_EDC_MODE 0x0610 +#define mmMMEA2_EDC_MODE_BASE_IDX 1 +#define mmMMEA2_ERR_STATUS 0x0611 +#define mmMMEA2_ERR_STATUS_BASE_IDX 1 +#define mmMMEA2_MISC2 0x0612 +#define mmMMEA2_MISC2_BASE_IDX 1 +#define mmMMEA2_ADDRDEC_SELECT 0x0613 +#define mmMMEA2_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA2_EDC_CNT3 0x0614 +#define mmMMEA2_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec3 +// base address: 0x69900 +#define mmMMEA3_DRAM_RD_CLI2GRP_MAP0 0x0640 +#define mmMMEA3_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_CLI2GRP_MAP1 0x0641 +#define mmMMEA3_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_CLI2GRP_MAP0 0x0642 +#define mmMMEA3_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_CLI2GRP_MAP1 0x0643 +#define mmMMEA3_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_GRP2VC_MAP 0x0644 +#define mmMMEA3_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_GRP2VC_MAP 0x0645 +#define mmMMEA3_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_LAZY 0x0646 +#define mmMMEA3_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_LAZY 0x0647 +#define mmMMEA3_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_CAM_CNTL 0x0648 +#define mmMMEA3_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_CAM_CNTL 0x0649 +#define mmMMEA3_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA3_DRAM_PAGE_BURST 0x064a +#define mmMMEA3_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_PRI_AGE 0x064b +#define mmMMEA3_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_PRI_AGE 0x064c +#define mmMMEA3_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_PRI_QUEUING 0x064d +#define mmMMEA3_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_PRI_QUEUING 0x064e +#define mmMMEA3_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_PRI_FIXED 0x064f +#define mmMMEA3_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_PRI_FIXED 0x0650 +#define mmMMEA3_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_PRI_URGENCY 0x0651 +#define mmMMEA3_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_PRI_URGENCY 0x0652 +#define mmMMEA3_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI1 0x0653 +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI2 0x0654 +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI3 0x0655 +#define mmMMEA3_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI1 0x0656 +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI2 0x0657 +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI3 0x0658 +#define mmMMEA3_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA3_GMI_RD_CLI2GRP_MAP0 0x0659 +#define mmMMEA3_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA3_GMI_RD_CLI2GRP_MAP1 0x065a +#define mmMMEA3_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA3_GMI_WR_CLI2GRP_MAP0 0x065b +#define mmMMEA3_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA3_GMI_WR_CLI2GRP_MAP1 0x065c +#define mmMMEA3_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA3_GMI_RD_GRP2VC_MAP 0x065d +#define mmMMEA3_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA3_GMI_WR_GRP2VC_MAP 0x065e +#define mmMMEA3_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA3_GMI_RD_LAZY 0x065f +#define mmMMEA3_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA3_GMI_WR_LAZY 0x0660 +#define mmMMEA3_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA3_GMI_RD_CAM_CNTL 0x0661 +#define mmMMEA3_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA3_GMI_WR_CAM_CNTL 0x0662 +#define mmMMEA3_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA3_GMI_PAGE_BURST 0x0663 +#define mmMMEA3_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_AGE 0x0664 +#define mmMMEA3_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_AGE 0x0665 +#define mmMMEA3_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_QUEUING 0x0666 +#define mmMMEA3_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_QUEUING 0x0667 +#define mmMMEA3_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_FIXED 0x0668 +#define mmMMEA3_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_FIXED 0x0669 +#define mmMMEA3_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_URGENCY 0x066a +#define mmMMEA3_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_URGENCY 0x066b +#define mmMMEA3_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_URGENCY_MASKING 0x066c +#define mmMMEA3_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_URGENCY_MASKING 0x066d +#define mmMMEA3_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI1 0x066e +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI2 0x066f +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI3 0x0670 +#define mmMMEA3_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI1 0x0671 +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI2 0x0672 +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI3 0x0673 +#define mmMMEA3_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_BASE_ADDR0 0x0674 +#define mmMMEA3_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR0 0x0675 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_BASE_ADDR1 0x0676 +#define mmMMEA3_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR1 0x0677 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR1 0x0678 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_BASE_ADDR2 0x0679 +#define mmMMEA3_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR2 0x067a +#define mmMMEA3_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_BASE_ADDR3 0x067b +#define mmMMEA3_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR3 0x067c +#define mmMMEA3_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR3 0x067d +#define mmMMEA3_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_BASE_ADDR4 0x067e +#define mmMMEA3_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR4 0x067f +#define mmMMEA3_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_BASE_ADDR5 0x0680 +#define mmMMEA3_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR5 0x0681 +#define mmMMEA3_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR5 0x0682 +#define mmMMEA3_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA3_ADDRNORMDRAM_HOLE_CNTL 0x0683 +#define mmMMEA3_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA3_ADDRNORMGMI_HOLE_CNTL 0x0684 +#define mmMMEA3_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x0685 +#define mmMMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG 0x0686 +#define mmMMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA3_ADDRDEC_BANK_CFG 0x0687 +#define mmMMEA3_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA3_ADDRDEC_MISC_CFG 0x0688 +#define mmMMEA3_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK0 0x0689 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK1 0x068a +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK2 0x068b +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK3 0x068c +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK4 0x068d +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK5 0x068e +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC 0x068f +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC2 0x0690 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS0 0x0691 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS1 0x0692 +#define mmMMEA3_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA3_ADDRDECDRAM_HARVEST_ENABLE 0x0693 +#define mmMMEA3_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK0 0x0694 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK1 0x0695 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK2 0x0696 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK3 0x0697 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK4 0x0698 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK5 0x0699 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC 0x069a +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC2 0x069b +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS0 0x069c +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS1 0x069d +#define mmMMEA3_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA3_ADDRDECGMI_HARVEST_ENABLE 0x069e +#define mmMMEA3_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS0 0x069f +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS1 0x06a0 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS2 0x06a1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS3 0x06a2 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS0 0x06a3 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS1 0x06a4 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS2 0x06a5 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS3 0x06a6 +#define mmMMEA3_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS01 0x06a7 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS23 0x06a8 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS01 0x06a9 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS23 0x06aa +#define mmMMEA3_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS01 0x06ab +#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS23 0x06ac +#define mmMMEA3_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS01 0x06ad +#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS23 0x06ae +#define mmMMEA3_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS01 0x06af +#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS23 0x06b0 +#define mmMMEA3_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS01 0x06b1 +#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS23 0x06b2 +#define mmMMEA3_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS01 0x06b3 +#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS23 0x06b4 +#define mmMMEA3_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_RM_SEL_CS01 0x06b5 +#define mmMMEA3_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_RM_SEL_CS23 0x06b6 +#define mmMMEA3_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS01 0x06b7 +#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS23 0x06b8 +#define mmMMEA3_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS0 0x06b9 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS1 0x06ba +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS2 0x06bb +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS3 0x06bc +#define mmMMEA3_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS0 0x06bd +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS1 0x06be +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS2 0x06bf +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS3 0x06c0 +#define mmMMEA3_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS01 0x06c1 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS23 0x06c2 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS01 0x06c3 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS23 0x06c4 +#define mmMMEA3_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS01 0x06c5 +#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS23 0x06c6 +#define mmMMEA3_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS01 0x06c7 +#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS23 0x06c8 +#define mmMMEA3_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS01 0x06c9 +#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS23 0x06ca +#define mmMMEA3_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS01 0x06cb +#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS23 0x06cc +#define mmMMEA3_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS01 0x06cd +#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS23 0x06ce +#define mmMMEA3_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_RM_SEL_CS01 0x06cf +#define mmMMEA3_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_RM_SEL_CS23 0x06d0 +#define mmMMEA3_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS01 0x06d1 +#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS23 0x06d2 +#define mmMMEA3_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS0 0x06d3 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS1 0x06d4 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS2 0x06d5 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS3 0x06d6 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS0 0x06d7 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS1 0x06d8 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS2 0x06d9 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS3 0x06da +#define mmMMEA3_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS01 0x06db +#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS23 0x06dc +#define mmMMEA3_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS01 0x06dd +#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS23 0x06de +#define mmMMEA3_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS01 0x06df +#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS23 0x06e0 +#define mmMMEA3_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS01 0x06e1 +#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS23 0x06e2 +#define mmMMEA3_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS01 0x06e3 +#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS23 0x06e4 +#define mmMMEA3_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS01 0x06e5 +#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS23 0x06e6 +#define mmMMEA3_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS01 0x06e7 +#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS23 0x06e8 +#define mmMMEA3_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_RM_SEL_CS01 0x06e9 +#define mmMMEA3_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_RM_SEL_CS23 0x06ea +#define mmMMEA3_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS01 0x06eb +#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS23 0x06ec +#define mmMMEA3_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA3_ADDRNORMDRAM_GLOBAL_CNTL 0x06ed +#define mmMMEA3_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA3_ADDRNORMGMI_GLOBAL_CNTL 0x06ee +#define mmMMEA3_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA3_IO_RD_CLI2GRP_MAP0 0x0715 +#define mmMMEA3_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA3_IO_RD_CLI2GRP_MAP1 0x0716 +#define mmMMEA3_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA3_IO_WR_CLI2GRP_MAP0 0x0717 +#define mmMMEA3_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA3_IO_WR_CLI2GRP_MAP1 0x0718 +#define mmMMEA3_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA3_IO_RD_COMBINE_FLUSH 0x0719 +#define mmMMEA3_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA3_IO_WR_COMBINE_FLUSH 0x071a +#define mmMMEA3_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA3_IO_GROUP_BURST 0x071b +#define mmMMEA3_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_AGE 0x071c +#define mmMMEA3_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_AGE 0x071d +#define mmMMEA3_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_QUEUING 0x071e +#define mmMMEA3_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_QUEUING 0x071f +#define mmMMEA3_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_FIXED 0x0720 +#define mmMMEA3_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_FIXED 0x0721 +#define mmMMEA3_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_URGENCY 0x0722 +#define mmMMEA3_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_URGENCY 0x0723 +#define mmMMEA3_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_URGENCY_MASKING 0x0724 +#define mmMMEA3_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_URGENCY_MASKING 0x0725 +#define mmMMEA3_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_QUANT_PRI1 0x0726 +#define mmMMEA3_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_QUANT_PRI2 0x0727 +#define mmMMEA3_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA3_IO_RD_PRI_QUANT_PRI3 0x0728 +#define mmMMEA3_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_QUANT_PRI1 0x0729 +#define mmMMEA3_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_QUANT_PRI2 0x072a +#define mmMMEA3_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA3_IO_WR_PRI_QUANT_PRI3 0x072b +#define mmMMEA3_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA3_SDP_ARB_DRAM 0x072c +#define mmMMEA3_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA3_SDP_ARB_GMI 0x072d +#define mmMMEA3_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA3_SDP_ARB_FINAL 0x072e +#define mmMMEA3_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA3_SDP_DRAM_PRIORITY 0x072f +#define mmMMEA3_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA3_SDP_GMI_PRIORITY 0x0730 +#define mmMMEA3_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA3_SDP_IO_PRIORITY 0x0731 +#define mmMMEA3_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA3_SDP_CREDITS 0x0732 +#define mmMMEA3_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA3_SDP_TAG_RESERVE0 0x0733 +#define mmMMEA3_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA3_SDP_TAG_RESERVE1 0x0734 +#define mmMMEA3_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA3_SDP_VCC_RESERVE0 0x0735 +#define mmMMEA3_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA3_SDP_VCC_RESERVE1 0x0736 +#define mmMMEA3_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA3_SDP_VCD_RESERVE0 0x0737 +#define mmMMEA3_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA3_SDP_VCD_RESERVE1 0x0738 +#define mmMMEA3_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA3_SDP_REQ_CNTL 0x0739 +#define mmMMEA3_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA3_MISC 0x073a +#define mmMMEA3_MISC_BASE_IDX 1 +#define mmMMEA3_LATENCY_SAMPLING 0x073b +#define mmMMEA3_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA3_PERFCOUNTER_LO 0x073c +#define mmMMEA3_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA3_PERFCOUNTER_HI 0x073d +#define mmMMEA3_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA3_PERFCOUNTER0_CFG 0x073e +#define mmMMEA3_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA3_PERFCOUNTER1_CFG 0x073f +#define mmMMEA3_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA3_PERFCOUNTER_RSLT_CNTL 0x0740 +#define mmMMEA3_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA3_EDC_CNT 0x0746 +#define mmMMEA3_EDC_CNT_BASE_IDX 1 +#define mmMMEA3_EDC_CNT2 0x0747 +#define mmMMEA3_EDC_CNT2_BASE_IDX 1 +#define mmMMEA3_DSM_CNTL 0x0748 +#define mmMMEA3_DSM_CNTL_BASE_IDX 1 +#define mmMMEA3_DSM_CNTLA 0x0749 +#define mmMMEA3_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA3_DSM_CNTLB 0x074a +#define mmMMEA3_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA3_DSM_CNTL2 0x074b +#define mmMMEA3_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA3_DSM_CNTL2A 0x074c +#define mmMMEA3_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA3_DSM_CNTL2B 0x074d +#define mmMMEA3_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA3_CGTT_CLK_CTRL 0x074f +#define mmMMEA3_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA3_EDC_MODE 0x0750 +#define mmMMEA3_EDC_MODE_BASE_IDX 1 +#define mmMMEA3_ERR_STATUS 0x0751 +#define mmMMEA3_ERR_STATUS_BASE_IDX 1 +#define mmMMEA3_MISC2 0x0752 +#define mmMMEA3_MISC2_BASE_IDX 1 +#define mmMMEA3_ADDRDEC_SELECT 0x0753 +#define mmMMEA3_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA3_EDC_CNT3 0x0754 +#define mmMMEA3_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec4 +// base address: 0x69e00 +#define mmMMEA4_DRAM_RD_CLI2GRP_MAP0 0x0780 +#define mmMMEA4_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_CLI2GRP_MAP1 0x0781 +#define mmMMEA4_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_CLI2GRP_MAP0 0x0782 +#define mmMMEA4_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_CLI2GRP_MAP1 0x0783 +#define mmMMEA4_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_GRP2VC_MAP 0x0784 +#define mmMMEA4_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_GRP2VC_MAP 0x0785 +#define mmMMEA4_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_LAZY 0x0786 +#define mmMMEA4_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_LAZY 0x0787 +#define mmMMEA4_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_CAM_CNTL 0x0788 +#define mmMMEA4_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_CAM_CNTL 0x0789 +#define mmMMEA4_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA4_DRAM_PAGE_BURST 0x078a +#define mmMMEA4_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_PRI_AGE 0x078b +#define mmMMEA4_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_PRI_AGE 0x078c +#define mmMMEA4_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_PRI_QUEUING 0x078d +#define mmMMEA4_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_PRI_QUEUING 0x078e +#define mmMMEA4_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_PRI_FIXED 0x078f +#define mmMMEA4_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_PRI_FIXED 0x0790 +#define mmMMEA4_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_PRI_URGENCY 0x0791 +#define mmMMEA4_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_PRI_URGENCY 0x0792 +#define mmMMEA4_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI1 0x0793 +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI2 0x0794 +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI3 0x0795 +#define mmMMEA4_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI1 0x0796 +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI2 0x0797 +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI3 0x0798 +#define mmMMEA4_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA4_GMI_RD_CLI2GRP_MAP0 0x0799 +#define mmMMEA4_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA4_GMI_RD_CLI2GRP_MAP1 0x079a +#define mmMMEA4_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA4_GMI_WR_CLI2GRP_MAP0 0x079b +#define mmMMEA4_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA4_GMI_WR_CLI2GRP_MAP1 0x079c +#define mmMMEA4_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA4_GMI_RD_GRP2VC_MAP 0x079d +#define mmMMEA4_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA4_GMI_WR_GRP2VC_MAP 0x079e +#define mmMMEA4_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA4_GMI_RD_LAZY 0x079f +#define mmMMEA4_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA4_GMI_WR_LAZY 0x07a0 +#define mmMMEA4_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA4_GMI_RD_CAM_CNTL 0x07a1 +#define mmMMEA4_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA4_GMI_WR_CAM_CNTL 0x07a2 +#define mmMMEA4_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA4_GMI_PAGE_BURST 0x07a3 +#define mmMMEA4_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_AGE 0x07a4 +#define mmMMEA4_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_AGE 0x07a5 +#define mmMMEA4_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_QUEUING 0x07a6 +#define mmMMEA4_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_QUEUING 0x07a7 +#define mmMMEA4_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_FIXED 0x07a8 +#define mmMMEA4_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_FIXED 0x07a9 +#define mmMMEA4_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_URGENCY 0x07aa +#define mmMMEA4_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_URGENCY 0x07ab +#define mmMMEA4_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_URGENCY_MASKING 0x07ac +#define mmMMEA4_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_URGENCY_MASKING 0x07ad +#define mmMMEA4_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI1 0x07ae +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI2 0x07af +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI3 0x07b0 +#define mmMMEA4_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI1 0x07b1 +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI2 0x07b2 +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI3 0x07b3 +#define mmMMEA4_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_BASE_ADDR0 0x07b4 +#define mmMMEA4_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR0 0x07b5 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_BASE_ADDR1 0x07b6 +#define mmMMEA4_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR1 0x07b7 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR1 0x07b8 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_BASE_ADDR2 0x07b9 +#define mmMMEA4_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR2 0x07ba +#define mmMMEA4_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_BASE_ADDR3 0x07bb +#define mmMMEA4_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR3 0x07bc +#define mmMMEA4_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR3 0x07bd +#define mmMMEA4_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_BASE_ADDR4 0x07be +#define mmMMEA4_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR4 0x07bf +#define mmMMEA4_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_BASE_ADDR5 0x07c0 +#define mmMMEA4_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR5 0x07c1 +#define mmMMEA4_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR5 0x07c2 +#define mmMMEA4_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA4_ADDRNORMDRAM_HOLE_CNTL 0x07c3 +#define mmMMEA4_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA4_ADDRNORMGMI_HOLE_CNTL 0x07c4 +#define mmMMEA4_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x07c5 +#define mmMMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG 0x07c6 +#define mmMMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA4_ADDRDEC_BANK_CFG 0x07c7 +#define mmMMEA4_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA4_ADDRDEC_MISC_CFG 0x07c8 +#define mmMMEA4_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK0 0x07c9 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK1 0x07ca +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK2 0x07cb +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK3 0x07cc +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK4 0x07cd +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK5 0x07ce +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC 0x07cf +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC2 0x07d0 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS0 0x07d1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS1 0x07d2 +#define mmMMEA4_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA4_ADDRDECDRAM_HARVEST_ENABLE 0x07d3 +#define mmMMEA4_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK0 0x07d4 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK1 0x07d5 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK2 0x07d6 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK3 0x07d7 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK4 0x07d8 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK5 0x07d9 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC 0x07da +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC2 0x07db +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS0 0x07dc +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS1 0x07dd +#define mmMMEA4_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA4_ADDRDECGMI_HARVEST_ENABLE 0x07de +#define mmMMEA4_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS0 0x07df +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS1 0x07e0 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS2 0x07e1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS3 0x07e2 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS0 0x07e3 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS1 0x07e4 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS2 0x07e5 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS3 0x07e6 +#define mmMMEA4_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS01 0x07e7 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS23 0x07e8 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS01 0x07e9 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS23 0x07ea +#define mmMMEA4_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS01 0x07eb +#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS23 0x07ec +#define mmMMEA4_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS01 0x07ed +#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS23 0x07ee +#define mmMMEA4_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS01 0x07ef +#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS23 0x07f0 +#define mmMMEA4_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS01 0x07f1 +#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS23 0x07f2 +#define mmMMEA4_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS01 0x07f3 +#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS23 0x07f4 +#define mmMMEA4_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_RM_SEL_CS01 0x07f5 +#define mmMMEA4_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_RM_SEL_CS23 0x07f6 +#define mmMMEA4_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS01 0x07f7 +#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS23 0x07f8 +#define mmMMEA4_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS0 0x07f9 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS1 0x07fa +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS2 0x07fb +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS3 0x07fc +#define mmMMEA4_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS0 0x07fd +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS1 0x07fe +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS2 0x07ff +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS3 0x0800 +#define mmMMEA4_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS01 0x0801 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS23 0x0802 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS01 0x0803 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS23 0x0804 +#define mmMMEA4_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS01 0x0805 +#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS23 0x0806 +#define mmMMEA4_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS01 0x0807 +#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS23 0x0808 +#define mmMMEA4_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS01 0x0809 +#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS23 0x080a +#define mmMMEA4_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS01 0x080b +#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS23 0x080c +#define mmMMEA4_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS01 0x080d +#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS23 0x080e +#define mmMMEA4_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_RM_SEL_CS01 0x080f +#define mmMMEA4_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_RM_SEL_CS23 0x0810 +#define mmMMEA4_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS01 0x0811 +#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS23 0x0812 +#define mmMMEA4_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS0 0x0813 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS1 0x0814 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS2 0x0815 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS3 0x0816 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS0 0x0817 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS1 0x0818 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS2 0x0819 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS3 0x081a +#define mmMMEA4_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS01 0x081b +#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS23 0x081c +#define mmMMEA4_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS01 0x081d +#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS23 0x081e +#define mmMMEA4_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS01 0x081f +#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS23 0x0820 +#define mmMMEA4_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS01 0x0821 +#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS23 0x0822 +#define mmMMEA4_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS01 0x0823 +#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS23 0x0824 +#define mmMMEA4_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS01 0x0825 +#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS23 0x0826 +#define mmMMEA4_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS01 0x0827 +#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS23 0x0828 +#define mmMMEA4_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_RM_SEL_CS01 0x0829 +#define mmMMEA4_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_RM_SEL_CS23 0x082a +#define mmMMEA4_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS01 0x082b +#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS23 0x082c +#define mmMMEA4_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA4_ADDRNORMDRAM_GLOBAL_CNTL 0x082d +#define mmMMEA4_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA4_ADDRNORMGMI_GLOBAL_CNTL 0x082e +#define mmMMEA4_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA4_IO_RD_CLI2GRP_MAP0 0x0855 +#define mmMMEA4_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA4_IO_RD_CLI2GRP_MAP1 0x0856 +#define mmMMEA4_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA4_IO_WR_CLI2GRP_MAP0 0x0857 +#define mmMMEA4_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA4_IO_WR_CLI2GRP_MAP1 0x0858 +#define mmMMEA4_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA4_IO_RD_COMBINE_FLUSH 0x0859 +#define mmMMEA4_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA4_IO_WR_COMBINE_FLUSH 0x085a +#define mmMMEA4_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA4_IO_GROUP_BURST 0x085b +#define mmMMEA4_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_AGE 0x085c +#define mmMMEA4_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_AGE 0x085d +#define mmMMEA4_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_QUEUING 0x085e +#define mmMMEA4_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_QUEUING 0x085f +#define mmMMEA4_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_FIXED 0x0860 +#define mmMMEA4_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_FIXED 0x0861 +#define mmMMEA4_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_URGENCY 0x0862 +#define mmMMEA4_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_URGENCY 0x0863 +#define mmMMEA4_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_URGENCY_MASKING 0x0864 +#define mmMMEA4_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_URGENCY_MASKING 0x0865 +#define mmMMEA4_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_QUANT_PRI1 0x0866 +#define mmMMEA4_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_QUANT_PRI2 0x0867 +#define mmMMEA4_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA4_IO_RD_PRI_QUANT_PRI3 0x0868 +#define mmMMEA4_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_QUANT_PRI1 0x0869 +#define mmMMEA4_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_QUANT_PRI2 0x086a +#define mmMMEA4_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA4_IO_WR_PRI_QUANT_PRI3 0x086b +#define mmMMEA4_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA4_SDP_ARB_DRAM 0x086c +#define mmMMEA4_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA4_SDP_ARB_GMI 0x086d +#define mmMMEA4_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA4_SDP_ARB_FINAL 0x086e +#define mmMMEA4_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA4_SDP_DRAM_PRIORITY 0x086f +#define mmMMEA4_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA4_SDP_GMI_PRIORITY 0x0870 +#define mmMMEA4_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA4_SDP_IO_PRIORITY 0x0871 +#define mmMMEA4_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA4_SDP_CREDITS 0x0872 +#define mmMMEA4_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA4_SDP_TAG_RESERVE0 0x0873 +#define mmMMEA4_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA4_SDP_TAG_RESERVE1 0x0874 +#define mmMMEA4_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA4_SDP_VCC_RESERVE0 0x0875 +#define mmMMEA4_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA4_SDP_VCC_RESERVE1 0x0876 +#define mmMMEA4_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA4_SDP_VCD_RESERVE0 0x0877 +#define mmMMEA4_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA4_SDP_VCD_RESERVE1 0x0878 +#define mmMMEA4_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA4_SDP_REQ_CNTL 0x0879 +#define mmMMEA4_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA4_MISC 0x087a +#define mmMMEA4_MISC_BASE_IDX 1 +#define mmMMEA4_LATENCY_SAMPLING 0x087b +#define mmMMEA4_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA4_PERFCOUNTER_LO 0x087c +#define mmMMEA4_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA4_PERFCOUNTER_HI 0x087d +#define mmMMEA4_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA4_PERFCOUNTER0_CFG 0x087e +#define mmMMEA4_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA4_PERFCOUNTER1_CFG 0x087f +#define mmMMEA4_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA4_PERFCOUNTER_RSLT_CNTL 0x0880 +#define mmMMEA4_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA4_EDC_CNT 0x0886 +#define mmMMEA4_EDC_CNT_BASE_IDX 1 +#define mmMMEA4_EDC_CNT2 0x0887 +#define mmMMEA4_EDC_CNT2_BASE_IDX 1 +#define mmMMEA4_DSM_CNTL 0x0888 +#define mmMMEA4_DSM_CNTL_BASE_IDX 1 +#define mmMMEA4_DSM_CNTLA 0x0889 +#define mmMMEA4_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA4_DSM_CNTLB 0x088a +#define mmMMEA4_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA4_DSM_CNTL2 0x088b +#define mmMMEA4_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA4_DSM_CNTL2A 0x088c +#define mmMMEA4_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA4_DSM_CNTL2B 0x088d +#define mmMMEA4_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA4_CGTT_CLK_CTRL 0x088f +#define mmMMEA4_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA4_EDC_MODE 0x0890 +#define mmMMEA4_EDC_MODE_BASE_IDX 1 +#define mmMMEA4_ERR_STATUS 0x0891 +#define mmMMEA4_ERR_STATUS_BASE_IDX 1 +#define mmMMEA4_MISC2 0x0892 +#define mmMMEA4_MISC2_BASE_IDX 1 +#define mmMMEA4_ADDRDEC_SELECT 0x0893 +#define mmMMEA4_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA4_EDC_CNT3 0x0894 +#define mmMMEA4_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_pctldec0 +// base address: 0x6a300 +#define mmPCTL0_CTRL 0x08c0 +#define mmPCTL0_CTRL_BASE_IDX 1 +#define mmPCTL0_MMHUB_DEEPSLEEP_IB 0x08c1 +#define mmPCTL0_MMHUB_DEEPSLEEP_IB_BASE_IDX 1 +#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE 0x08c2 +#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_BASE_IDX 1 +#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB 0x08c3 +#define mmPCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB_BASE_IDX 1 +#define mmPCTL0_PG_IGNORE_DEEPSLEEP 0x08c4 +#define mmPCTL0_PG_IGNORE_DEEPSLEEP_BASE_IDX 1 +#define mmPCTL0_PG_IGNORE_DEEPSLEEP_IB 0x08c5 +#define mmPCTL0_PG_IGNORE_DEEPSLEEP_IB_BASE_IDX 1 +#define mmPCTL0_SLICE0_CFG_DAGB_BUSY 0x08c6 +#define mmPCTL0_SLICE0_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL0_SLICE0_CFG_DS_ALLOW 0x08c7 +#define mmPCTL0_SLICE0_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL0_SLICE0_CFG_DS_ALLOW_IB 0x08c8 +#define mmPCTL0_SLICE0_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL0_SLICE1_CFG_DAGB_BUSY 0x08c9 +#define mmPCTL0_SLICE1_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL0_SLICE1_CFG_DS_ALLOW 0x08ca +#define mmPCTL0_SLICE1_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL0_SLICE1_CFG_DS_ALLOW_IB 0x08cb +#define mmPCTL0_SLICE1_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL0_SLICE2_CFG_DAGB_BUSY 0x08cc +#define mmPCTL0_SLICE2_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL0_SLICE2_CFG_DS_ALLOW 0x08cd +#define mmPCTL0_SLICE2_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL0_SLICE2_CFG_DS_ALLOW_IB 0x08ce +#define mmPCTL0_SLICE2_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL0_SLICE3_CFG_DAGB_BUSY 0x08cf +#define mmPCTL0_SLICE3_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL0_SLICE3_CFG_DS_ALLOW 0x08d0 +#define mmPCTL0_SLICE3_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL0_SLICE3_CFG_DS_ALLOW_IB 0x08d1 +#define mmPCTL0_SLICE3_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL0_SLICE4_CFG_DAGB_BUSY 0x08d2 +#define mmPCTL0_SLICE4_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL0_SLICE4_CFG_DS_ALLOW 0x08d3 +#define mmPCTL0_SLICE4_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL0_SLICE4_CFG_DS_ALLOW_IB 0x08d4 +#define mmPCTL0_SLICE4_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL0_UTCL2_MISC 0x08d5 +#define mmPCTL0_UTCL2_MISC_BASE_IDX 1 +#define mmPCTL0_SLICE0_MISC 0x08d6 +#define mmPCTL0_SLICE0_MISC_BASE_IDX 1 +#define mmPCTL0_SLICE1_MISC 0x08d7 +#define mmPCTL0_SLICE1_MISC_BASE_IDX 1 +#define mmPCTL0_SLICE2_MISC 0x08d8 +#define mmPCTL0_SLICE2_MISC_BASE_IDX 1 +#define mmPCTL0_SLICE3_MISC 0x08d9 +#define mmPCTL0_SLICE3_MISC_BASE_IDX 1 +#define mmPCTL0_SLICE4_MISC 0x08da +#define mmPCTL0_SLICE4_MISC_BASE_IDX 1 +#define mmPCTL0_UTCL2_RENG_EXECUTE 0x08db +#define mmPCTL0_UTCL2_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL0_SLICE0_RENG_EXECUTE 0x08dc +#define mmPCTL0_SLICE0_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL0_SLICE1_RENG_EXECUTE 0x08dd +#define mmPCTL0_SLICE1_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL0_SLICE2_RENG_EXECUTE 0x08de +#define mmPCTL0_SLICE2_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL0_SLICE3_RENG_EXECUTE 0x08df +#define mmPCTL0_SLICE3_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL0_SLICE4_RENG_EXECUTE 0x08e0 +#define mmPCTL0_SLICE4_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL0_UTCL2_RENG_RAM_INDEX 0x08e1 +#define mmPCTL0_UTCL2_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL0_UTCL2_RENG_RAM_DATA 0x08e2 +#define mmPCTL0_UTCL2_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL0_SLICE0_RENG_RAM_INDEX 0x08e3 +#define mmPCTL0_SLICE0_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL0_SLICE0_RENG_RAM_DATA 0x08e4 +#define mmPCTL0_SLICE0_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL0_SLICE1_RENG_RAM_INDEX 0x08e5 +#define mmPCTL0_SLICE1_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL0_SLICE1_RENG_RAM_DATA 0x08e6 +#define mmPCTL0_SLICE1_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL0_SLICE2_RENG_RAM_INDEX 0x08e7 +#define mmPCTL0_SLICE2_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL0_SLICE2_RENG_RAM_DATA 0x08e8 +#define mmPCTL0_SLICE2_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL0_SLICE3_RENG_RAM_INDEX 0x08e9 +#define mmPCTL0_SLICE3_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL0_SLICE3_RENG_RAM_DATA 0x08ea +#define mmPCTL0_SLICE3_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL0_SLICE4_RENG_RAM_INDEX 0x08eb +#define mmPCTL0_SLICE4_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL0_SLICE4_RENG_RAM_DATA 0x08ec +#define mmPCTL0_SLICE4_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0 0x08ed +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1 0x08ee +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2 0x08ef +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3 0x08f0 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4 0x08f1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x08f2 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x08f3 +#define mmPCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0 0x08f4 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1 0x08f5 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2 0x08f6 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3 0x08f7 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4 0x08f8 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0 0x08f9 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1 0x08fa +#define mmPCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0 0x08fb +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1 0x08fc +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2 0x08fd +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3 0x08fe +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4 0x08ff +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0900 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0901 +#define mmPCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0 0x0902 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1 0x0903 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2 0x0904 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3 0x0905 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4 0x0906 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0907 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0908 +#define mmPCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0 0x0909 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1 0x090a +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2 0x090b +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3 0x090c +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4 0x090d +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0 0x090e +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1 0x090f +#define mmPCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0 0x0910 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1 0x0911 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2 0x0912 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3 0x0913 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4 0x0914 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0 0x0915 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1 0x0916 +#define mmPCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 + + +// addressBlock: mmhub_l1tlb_vml1dec +// base address: 0x6a500 +#define mmVML1_0_MC_VM_MX_L1_TLB0_STATUS 0x0948 +#define mmVML1_0_MC_VM_MX_L1_TLB0_STATUS_BASE_IDX 1 +#define mmVML1_0_MC_VM_MX_L1_TLB1_STATUS 0x0949 +#define mmVML1_0_MC_VM_MX_L1_TLB1_STATUS_BASE_IDX 1 +#define mmVML1_0_MC_VM_MX_L1_TLB2_STATUS 0x094a +#define mmVML1_0_MC_VM_MX_L1_TLB2_STATUS_BASE_IDX 1 +#define mmVML1_0_MC_VM_MX_L1_TLB3_STATUS 0x094b +#define mmVML1_0_MC_VM_MX_L1_TLB3_STATUS_BASE_IDX 1 +#define mmVML1_0_MC_VM_MX_L1_TLB4_STATUS 0x094c +#define mmVML1_0_MC_VM_MX_L1_TLB4_STATUS_BASE_IDX 1 +#define mmVML1_0_MC_VM_MX_L1_TLB5_STATUS 0x094d +#define mmVML1_0_MC_VM_MX_L1_TLB5_STATUS_BASE_IDX 1 +#define mmVML1_0_MC_VM_MX_L1_TLB6_STATUS 0x094e +#define mmVML1_0_MC_VM_MX_L1_TLB6_STATUS_BASE_IDX 1 +#define mmVML1_0_MC_VM_MX_L1_TLB7_STATUS 0x094f +#define mmVML1_0_MC_VM_MX_L1_TLB7_STATUS_BASE_IDX 1 + + +// addressBlock: mmhub_l1tlb_vml1pldec +// base address: 0x6a580 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG 0x0960 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG 0x0961 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG 0x0962 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG 0x0963 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG_BASE_IDX 1 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL 0x0964 +#define mmVML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_l1tlb_vml1prdec +// base address: 0x6a5c0 +#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO 0x0970 +#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO_BASE_IDX 1 +#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI 0x0971 +#define mmVML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_atcl2dec +// base address: 0x6a600 +#define mmATCL2_0_ATC_L2_CNTL 0x0980 +#define mmATCL2_0_ATC_L2_CNTL_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CNTL2 0x0981 +#define mmATCL2_0_ATC_L2_CNTL2_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CACHE_DATA0 0x0984 +#define mmATCL2_0_ATC_L2_CACHE_DATA0_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CACHE_DATA1 0x0985 +#define mmATCL2_0_ATC_L2_CACHE_DATA1_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CACHE_DATA2 0x0986 +#define mmATCL2_0_ATC_L2_CACHE_DATA2_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CNTL3 0x0987 +#define mmATCL2_0_ATC_L2_CNTL3_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_STATUS 0x0988 +#define mmATCL2_0_ATC_L2_STATUS_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_STATUS2 0x0989 +#define mmATCL2_0_ATC_L2_STATUS2_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_STATUS3 0x098a +#define mmATCL2_0_ATC_L2_STATUS3_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_MISC_CG 0x098b +#define mmATCL2_0_ATC_L2_MISC_CG_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_MEM_POWER_LS 0x098c +#define mmATCL2_0_ATC_L2_MEM_POWER_LS_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CGTT_CLK_CTRL 0x098d +#define mmATCL2_0_ATC_L2_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX 0x098e +#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX 0x098f +#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL 0x0990 +#define mmATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL 0x0991 +#define mmATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_CNTL4 0x0992 +#define mmATCL2_0_ATC_L2_CNTL4_BASE_IDX 1 +#define mmATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES 0x0993 +#define mmATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2pfdec +// base address: 0x6a700 +#define mmVML2PF0_VM_L2_CNTL 0x09c0 +#define mmVML2PF0_VM_L2_CNTL_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CNTL2 0x09c1 +#define mmVML2PF0_VM_L2_CNTL2_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CNTL3 0x09c2 +#define mmVML2PF0_VM_L2_CNTL3_BASE_IDX 1 +#define mmVML2PF0_VM_L2_STATUS 0x09c3 +#define mmVML2PF0_VM_L2_STATUS_BASE_IDX 1 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_CNTL 0x09c4 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 1 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32 0x09c5 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32 0x09c6 +#define mmVML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL 0x09c7 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2 0x09c8 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3 0x09c9 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4 0x09ca +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS 0x09cb +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32 0x09cc +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32 0x09cd +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x09ce +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x09cf +#define mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x09d1 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x09d2 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x09d3 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x09d4 +#define mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x09d5 +#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x09d6 +#define mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CNTL4 0x09d7 +#define mmVML2PF0_VM_L2_CNTL4_BASE_IDX 1 +#define mmVML2PF0_VM_L2_MM_GROUP_RT_CLASSES 0x09d8 +#define mmVML2PF0_VM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1 +#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID 0x09d9 +#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 1 +#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2 0x09da +#define mmVML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CACHE_PARITY_CNTL 0x09db +#define mmVML2PF0_VM_L2_CACHE_PARITY_CNTL_BASE_IDX 1 +#define mmVML2PF0_VM_L2_CGTT_CLK_CTRL 0x09de +#define mmVML2PF0_VM_L2_CGTT_CLK_CTRL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2vcdec +// base address: 0x6a800 +#define mmVML2VC0_VM_CONTEXT0_CNTL 0x0a00 +#define mmVML2VC0_VM_CONTEXT0_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT1_CNTL 0x0a01 +#define mmVML2VC0_VM_CONTEXT1_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT2_CNTL 0x0a02 +#define mmVML2VC0_VM_CONTEXT2_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT3_CNTL 0x0a03 +#define mmVML2VC0_VM_CONTEXT3_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT4_CNTL 0x0a04 +#define mmVML2VC0_VM_CONTEXT4_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT5_CNTL 0x0a05 +#define mmVML2VC0_VM_CONTEXT5_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT6_CNTL 0x0a06 +#define mmVML2VC0_VM_CONTEXT6_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT7_CNTL 0x0a07 +#define mmVML2VC0_VM_CONTEXT7_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT8_CNTL 0x0a08 +#define mmVML2VC0_VM_CONTEXT8_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT9_CNTL 0x0a09 +#define mmVML2VC0_VM_CONTEXT9_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT10_CNTL 0x0a0a +#define mmVML2VC0_VM_CONTEXT10_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT11_CNTL 0x0a0b +#define mmVML2VC0_VM_CONTEXT11_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT12_CNTL 0x0a0c +#define mmVML2VC0_VM_CONTEXT12_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT13_CNTL 0x0a0d +#define mmVML2VC0_VM_CONTEXT13_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT14_CNTL 0x0a0e +#define mmVML2VC0_VM_CONTEXT14_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT15_CNTL 0x0a0f +#define mmVML2VC0_VM_CONTEXT15_CNTL_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXTS_DISABLE 0x0a10 +#define mmVML2VC0_VM_CONTEXTS_DISABLE_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG0_SEM 0x0a11 +#define mmVML2VC0_VM_INVALIDATE_ENG0_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG1_SEM 0x0a12 +#define mmVML2VC0_VM_INVALIDATE_ENG1_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG2_SEM 0x0a13 +#define mmVML2VC0_VM_INVALIDATE_ENG2_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG3_SEM 0x0a14 +#define mmVML2VC0_VM_INVALIDATE_ENG3_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG4_SEM 0x0a15 +#define mmVML2VC0_VM_INVALIDATE_ENG4_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG5_SEM 0x0a16 +#define mmVML2VC0_VM_INVALIDATE_ENG5_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG6_SEM 0x0a17 +#define mmVML2VC0_VM_INVALIDATE_ENG6_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG7_SEM 0x0a18 +#define mmVML2VC0_VM_INVALIDATE_ENG7_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG8_SEM 0x0a19 +#define mmVML2VC0_VM_INVALIDATE_ENG8_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG9_SEM 0x0a1a +#define mmVML2VC0_VM_INVALIDATE_ENG9_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG10_SEM 0x0a1b +#define mmVML2VC0_VM_INVALIDATE_ENG10_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG11_SEM 0x0a1c +#define mmVML2VC0_VM_INVALIDATE_ENG11_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG12_SEM 0x0a1d +#define mmVML2VC0_VM_INVALIDATE_ENG12_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG13_SEM 0x0a1e +#define mmVML2VC0_VM_INVALIDATE_ENG13_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG14_SEM 0x0a1f +#define mmVML2VC0_VM_INVALIDATE_ENG14_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG15_SEM 0x0a20 +#define mmVML2VC0_VM_INVALIDATE_ENG15_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG16_SEM 0x0a21 +#define mmVML2VC0_VM_INVALIDATE_ENG16_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG17_SEM 0x0a22 +#define mmVML2VC0_VM_INVALIDATE_ENG17_SEM_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG0_REQ 0x0a23 +#define mmVML2VC0_VM_INVALIDATE_ENG0_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG1_REQ 0x0a24 +#define mmVML2VC0_VM_INVALIDATE_ENG1_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG2_REQ 0x0a25 +#define mmVML2VC0_VM_INVALIDATE_ENG2_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG3_REQ 0x0a26 +#define mmVML2VC0_VM_INVALIDATE_ENG3_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG4_REQ 0x0a27 +#define mmVML2VC0_VM_INVALIDATE_ENG4_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG5_REQ 0x0a28 +#define mmVML2VC0_VM_INVALIDATE_ENG5_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG6_REQ 0x0a29 +#define mmVML2VC0_VM_INVALIDATE_ENG6_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG7_REQ 0x0a2a +#define mmVML2VC0_VM_INVALIDATE_ENG7_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG8_REQ 0x0a2b +#define mmVML2VC0_VM_INVALIDATE_ENG8_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG9_REQ 0x0a2c +#define mmVML2VC0_VM_INVALIDATE_ENG9_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG10_REQ 0x0a2d +#define mmVML2VC0_VM_INVALIDATE_ENG10_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG11_REQ 0x0a2e +#define mmVML2VC0_VM_INVALIDATE_ENG11_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG12_REQ 0x0a2f +#define mmVML2VC0_VM_INVALIDATE_ENG12_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG13_REQ 0x0a30 +#define mmVML2VC0_VM_INVALIDATE_ENG13_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG14_REQ 0x0a31 +#define mmVML2VC0_VM_INVALIDATE_ENG14_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG15_REQ 0x0a32 +#define mmVML2VC0_VM_INVALIDATE_ENG15_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG16_REQ 0x0a33 +#define mmVML2VC0_VM_INVALIDATE_ENG16_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG17_REQ 0x0a34 +#define mmVML2VC0_VM_INVALIDATE_ENG17_REQ_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ACK 0x0a35 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ACK 0x0a36 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG2_ACK 0x0a37 +#define mmVML2VC0_VM_INVALIDATE_ENG2_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG3_ACK 0x0a38 +#define mmVML2VC0_VM_INVALIDATE_ENG3_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ACK 0x0a39 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ACK 0x0a3a +#define mmVML2VC0_VM_INVALIDATE_ENG5_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ACK 0x0a3b +#define mmVML2VC0_VM_INVALIDATE_ENG6_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ACK 0x0a3c +#define mmVML2VC0_VM_INVALIDATE_ENG7_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ACK 0x0a3d +#define mmVML2VC0_VM_INVALIDATE_ENG8_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG9_ACK 0x0a3e +#define mmVML2VC0_VM_INVALIDATE_ENG9_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG10_ACK 0x0a3f +#define mmVML2VC0_VM_INVALIDATE_ENG10_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG11_ACK 0x0a40 +#define mmVML2VC0_VM_INVALIDATE_ENG11_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ACK 0x0a41 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ACK 0x0a42 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ACK 0x0a43 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ACK 0x0a44 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ACK 0x0a45 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ACK 0x0a46 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ACK_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x0a47 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x0a48 +#define mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x0a49 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x0a4a +#define mmVML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x0a4b +#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x0a4c +#define mmVML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x0a4d +#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x0a4e +#define mmVML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x0a4f +#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x0a50 +#define mmVML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x0a51 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x0a52 +#define mmVML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x0a53 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x0a54 +#define mmVML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x0a55 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x0a56 +#define mmVML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x0a57 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x0a58 +#define mmVML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x0a59 +#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x0a5a +#define mmVML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x0a5b +#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x0a5c +#define mmVML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x0a5d +#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x0a5e +#define mmVML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x0a5f +#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x0a60 +#define mmVML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x0a61 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x0a62 +#define mmVML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x0a63 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x0a64 +#define mmVML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x0a65 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x0a66 +#define mmVML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0a67 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0a68 +#define mmVML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x0a69 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x0a6a +#define mmVML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x0a6b +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x0a6c +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x0a6d +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x0a6e +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x0a6f +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x0a70 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x0a71 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x0a72 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x0a73 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x0a74 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x0a75 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x0a76 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x0a77 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x0a78 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x0a79 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x0a7a +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x0a7b +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x0a7c +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x0a7d +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x0a7e +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x0a7f +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x0a80 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x0a81 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x0a82 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x0a83 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x0a84 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x0a85 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x0a86 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x0a87 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x0a88 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x0a89 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x0a8a +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x0a8b +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x0a8c +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x0a8d +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x0a8e +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x0a8f +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x0a90 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x0a91 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x0a92 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x0a93 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x0a94 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x0a95 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x0a96 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x0a97 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x0a98 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x0a99 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x0a9a +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x0a9b +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x0a9c +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x0a9d +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x0a9e +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x0a9f +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x0aa0 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x0aa1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x0aa2 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x0aa3 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x0aa4 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x0aa5 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x0aa6 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x0aa7 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x0aa8 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x0aa9 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x0aaa +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x0aab +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x0aac +#define mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x0aad +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x0aae +#define mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x0aaf +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x0ab0 +#define mmVML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x0ab1 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x0ab2 +#define mmVML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x0ab3 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x0ab4 +#define mmVML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x0ab5 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x0ab6 +#define mmVML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x0ab7 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x0ab8 +#define mmVML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x0ab9 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x0aba +#define mmVML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x0abb +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x0abc +#define mmVML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x0abd +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x0abe +#define mmVML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x0abf +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x0ac0 +#define mmVML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x0ac1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x0ac2 +#define mmVML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x0ac3 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x0ac4 +#define mmVML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x0ac5 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x0ac6 +#define mmVML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x0ac7 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x0ac8 +#define mmVML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x0ac9 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x0aca +#define mmVML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vmsharedpfdec +// base address: 0x6ab90 +#define mmVMSHAREDPF0_MC_VM_NB_MMIOBASE 0x0ae4 +#define mmVMSHAREDPF0_MC_VM_NB_MMIOBASE_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_NB_MMIOLIMIT 0x0ae5 +#define mmVMSHAREDPF0_MC_VM_NB_MMIOLIMIT_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_NB_PCI_CTRL 0x0ae6 +#define mmVMSHAREDPF0_MC_VM_NB_PCI_CTRL_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_NB_PCI_ARB 0x0ae7 +#define mmVMSHAREDPF0_MC_VM_NB_PCI_ARB_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1 0x0ae8 +#define mmVMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2 0x0ae9 +#define mmVMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2 0x0aea +#define mmVMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_FB_OFFSET 0x0aeb +#define mmVMSHAREDPF0_MC_VM_FB_OFFSET_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x0aec +#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x0aed +#define mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_STEERING 0x0aee +#define mmVMSHAREDPF0_MC_VM_STEERING_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ 0x0aef +#define mmVMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_MEM_POWER_LS 0x0af0 +#define mmVMSHAREDPF0_MC_MEM_POWER_LS_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START 0x0af1 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END 0x0af2 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_APT_CNTL 0x0af3 +#define mmVMSHAREDPF0_MC_VM_APT_CNTL_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START 0x0af4 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END 0x0af5 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x0af6 +#define mmVMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_CNTL 0x0af7 +#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_CNTL_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE 0x0af8 +#define mmVMSHAREDPF0_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL 0x0af9 +#define mmVMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vmsharedvcdec +// base address: 0x6ac00 +#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE 0x0b00 +#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE_BASE_IDX 1 +#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP 0x0b01 +#define mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP_BASE_IDX 1 +#define mmVMSHAREDVC0_MC_VM_AGP_TOP 0x0b02 +#define mmVMSHAREDVC0_MC_VM_AGP_TOP_BASE_IDX 1 +#define mmVMSHAREDVC0_MC_VM_AGP_BOT 0x0b03 +#define mmVMSHAREDVC0_MC_VM_AGP_BOT_BASE_IDX 1 +#define mmVMSHAREDVC0_MC_VM_AGP_BASE 0x0b04 +#define mmVMSHAREDVC0_MC_VM_AGP_BASE_BASE_IDX 1 +#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x0b05 +#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 1 +#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x0b06 +#define mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 1 +#define mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL 0x0b07 +#define mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vmsharedhvdec +// base address: 0x6ac80 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0 0x0b20 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1 0x0b21 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2 0x0b22 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3 0x0b23 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4 0x0b24 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5 0x0b25 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6 0x0b26 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7 0x0b27 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8 0x0b28 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9 0x0b29 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10 0x0b2a +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11 0x0b2b +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12 0x0b2c +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13 0x0b2d +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14 0x0b2e +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15 0x0b2f +#define mmVMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1 0x0b30 +#define mmVMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_0 0x0b31 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_0_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_1 0x0b32 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_2 0x0b33 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_2_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_3 0x0b34 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_LO_3_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_0 0x0b35 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_0_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_1 0x0b36 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_2 0x0b37 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_2_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_3 0x0b38 +#define mmVMSHAREDHV0_MC_VM_MARC_BASE_HI_3_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_0 0x0b39 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_0_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_1 0x0b3a +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_2 0x0b3b +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_2_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_3 0x0b3c +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_LO_3_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_0 0x0b3d +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_0_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_1 0x0b3e +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_2 0x0b3f +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_2_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_3 0x0b40 +#define mmVMSHAREDHV0_MC_VM_MARC_RELOC_HI_3_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_0 0x0b41 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_0_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_1 0x0b42 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_2 0x0b43 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_2_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_3 0x0b44 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_LO_3_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_0 0x0b45 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_0_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_1 0x0b46 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_1_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_2 0x0b47 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_2_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_3 0x0b48 +#define mmVMSHAREDHV0_MC_VM_MARC_LEN_HI_3_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER 0x0b49 +#define mmVMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x0b4a +#define mmVMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL 0x0b4b +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0 0x0b4c +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1 0x0b4d +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2 0x0b4e +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3 0x0b4f +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4 0x0b50 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5 0x0b51 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6 0x0b52 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7 0x0b53 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8 0x0b54 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9 0x0b55 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10 0x0b56 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11 0x0b57 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12 0x0b58 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13 0x0b59 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14 0x0b5a +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14_BASE_IDX 1 +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15 0x0b5b +#define mmVMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15_BASE_IDX 1 +#define mmVMSHAREDHV0_UTCL2_CGTT_CLK_CTRL 0x0b5c +#define mmVMSHAREDHV0_UTCL2_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID 0x0b5d +#define mmVMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmVMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE 0x0b5e +#define mmVMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_atcl2pfcntrdec +// base address: 0x6adc0 +#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO 0x0b70 +#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO_BASE_IDX 1 +#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI 0x0b71 +#define mmATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_atcl2pfcntldec +// base address: 0x6add0 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG 0x0b74 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG 0x0b75 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL 0x0b76 +#define mmATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2pldec +// base address: 0x6ae00 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER0_CFG 0x0b80 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER1_CFG 0x0b81 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER2_CFG 0x0b82 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER3_CFG 0x0b83 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER4_CFG 0x0b84 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER5_CFG 0x0b85 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER6_CFG 0x0b86 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER7_CFG 0x0b87 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x0b88 +#define mmVML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2prdec +// base address: 0x6ae40 +#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_LO 0x0b90 +#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1 +#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_HI 0x0b91 +#define mmVML2PR0_MC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1 + + +// addressBlock: mmhub_dagb_dagbdec5 +// base address: 0x74000 +#define mmDAGB5_RDCLI0 0x3000 +#define mmDAGB5_RDCLI0_BASE_IDX 1 +#define mmDAGB5_RDCLI1 0x3001 +#define mmDAGB5_RDCLI1_BASE_IDX 1 +#define mmDAGB5_RDCLI2 0x3002 +#define mmDAGB5_RDCLI2_BASE_IDX 1 +#define mmDAGB5_RDCLI3 0x3003 +#define mmDAGB5_RDCLI3_BASE_IDX 1 +#define mmDAGB5_RDCLI4 0x3004 +#define mmDAGB5_RDCLI4_BASE_IDX 1 +#define mmDAGB5_RDCLI5 0x3005 +#define mmDAGB5_RDCLI5_BASE_IDX 1 +#define mmDAGB5_RDCLI6 0x3006 +#define mmDAGB5_RDCLI6_BASE_IDX 1 +#define mmDAGB5_RDCLI7 0x3007 +#define mmDAGB5_RDCLI7_BASE_IDX 1 +#define mmDAGB5_RDCLI8 0x3008 +#define mmDAGB5_RDCLI8_BASE_IDX 1 +#define mmDAGB5_RDCLI9 0x3009 +#define mmDAGB5_RDCLI9_BASE_IDX 1 +#define mmDAGB5_RDCLI10 0x300a +#define mmDAGB5_RDCLI10_BASE_IDX 1 +#define mmDAGB5_RDCLI11 0x300b +#define mmDAGB5_RDCLI11_BASE_IDX 1 +#define mmDAGB5_RDCLI12 0x300c +#define mmDAGB5_RDCLI12_BASE_IDX 1 +#define mmDAGB5_RDCLI13 0x300d +#define mmDAGB5_RDCLI13_BASE_IDX 1 +#define mmDAGB5_RDCLI14 0x300e +#define mmDAGB5_RDCLI14_BASE_IDX 1 +#define mmDAGB5_RDCLI15 0x300f +#define mmDAGB5_RDCLI15_BASE_IDX 1 +#define mmDAGB5_RD_CNTL 0x3010 +#define mmDAGB5_RD_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_GMI_CNTL 0x3011 +#define mmDAGB5_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_ADDR_DAGB 0x3012 +#define mmDAGB5_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB5_RD_OUTPUT_DAGB_MAX_BURST 0x3013 +#define mmDAGB5_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB5_RD_OUTPUT_DAGB_LAZY_TIMER 0x3014 +#define mmDAGB5_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB5_RD_CGTT_CLK_CTRL 0x3015 +#define mmDAGB5_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB5_L1TLB_RD_CGTT_CLK_CTRL 0x3016 +#define mmDAGB5_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB5_ATCVM_RD_CGTT_CLK_CTRL 0x3017 +#define mmDAGB5_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST0 0x3018 +#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER0 0x3019 +#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST1 0x301a +#define mmDAGB5_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER1 0x301b +#define mmDAGB5_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB5_RD_VC0_CNTL 0x301c +#define mmDAGB5_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_VC1_CNTL 0x301d +#define mmDAGB5_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_VC2_CNTL 0x301e +#define mmDAGB5_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_VC3_CNTL 0x301f +#define mmDAGB5_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_VC4_CNTL 0x3020 +#define mmDAGB5_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_VC5_CNTL 0x3021 +#define mmDAGB5_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_VC6_CNTL 0x3022 +#define mmDAGB5_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_VC7_CNTL 0x3023 +#define mmDAGB5_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB5_RD_CNTL_MISC 0x3024 +#define mmDAGB5_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB5_RD_TLB_CREDIT 0x3025 +#define mmDAGB5_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB5_RDCLI_ASK_PENDING 0x3026 +#define mmDAGB5_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB5_RDCLI_GO_PENDING 0x3027 +#define mmDAGB5_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB5_RDCLI_GBLSEND_PENDING 0x3028 +#define mmDAGB5_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB5_RDCLI_TLB_PENDING 0x3029 +#define mmDAGB5_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB5_RDCLI_OARB_PENDING 0x302a +#define mmDAGB5_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB5_RDCLI_OSD_PENDING 0x302b +#define mmDAGB5_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI0 0x302c +#define mmDAGB5_WRCLI0_BASE_IDX 1 +#define mmDAGB5_WRCLI1 0x302d +#define mmDAGB5_WRCLI1_BASE_IDX 1 +#define mmDAGB5_WRCLI2 0x302e +#define mmDAGB5_WRCLI2_BASE_IDX 1 +#define mmDAGB5_WRCLI3 0x302f +#define mmDAGB5_WRCLI3_BASE_IDX 1 +#define mmDAGB5_WRCLI4 0x3030 +#define mmDAGB5_WRCLI4_BASE_IDX 1 +#define mmDAGB5_WRCLI5 0x3031 +#define mmDAGB5_WRCLI5_BASE_IDX 1 +#define mmDAGB5_WRCLI6 0x3032 +#define mmDAGB5_WRCLI6_BASE_IDX 1 +#define mmDAGB5_WRCLI7 0x3033 +#define mmDAGB5_WRCLI7_BASE_IDX 1 +#define mmDAGB5_WRCLI8 0x3034 +#define mmDAGB5_WRCLI8_BASE_IDX 1 +#define mmDAGB5_WRCLI9 0x3035 +#define mmDAGB5_WRCLI9_BASE_IDX 1 +#define mmDAGB5_WRCLI10 0x3036 +#define mmDAGB5_WRCLI10_BASE_IDX 1 +#define mmDAGB5_WRCLI11 0x3037 +#define mmDAGB5_WRCLI11_BASE_IDX 1 +#define mmDAGB5_WRCLI12 0x3038 +#define mmDAGB5_WRCLI12_BASE_IDX 1 +#define mmDAGB5_WRCLI13 0x3039 +#define mmDAGB5_WRCLI13_BASE_IDX 1 +#define mmDAGB5_WRCLI14 0x303a +#define mmDAGB5_WRCLI14_BASE_IDX 1 +#define mmDAGB5_WRCLI15 0x303b +#define mmDAGB5_WRCLI15_BASE_IDX 1 +#define mmDAGB5_WR_CNTL 0x303c +#define mmDAGB5_WR_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_GMI_CNTL 0x303d +#define mmDAGB5_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_ADDR_DAGB 0x303e +#define mmDAGB5_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB5_WR_OUTPUT_DAGB_MAX_BURST 0x303f +#define mmDAGB5_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB5_WR_OUTPUT_DAGB_LAZY_TIMER 0x3040 +#define mmDAGB5_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB5_WR_CGTT_CLK_CTRL 0x3041 +#define mmDAGB5_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB5_L1TLB_WR_CGTT_CLK_CTRL 0x3042 +#define mmDAGB5_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB5_ATCVM_WR_CGTT_CLK_CTRL 0x3043 +#define mmDAGB5_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST0 0x3044 +#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER0 0x3045 +#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST1 0x3046 +#define mmDAGB5_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER1 0x3047 +#define mmDAGB5_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB5_WR_DATA_DAGB 0x3048 +#define mmDAGB5_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB5_WR_DATA_DAGB_MAX_BURST0 0x3049 +#define mmDAGB5_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER0 0x304a +#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB5_WR_DATA_DAGB_MAX_BURST1 0x304b +#define mmDAGB5_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER1 0x304c +#define mmDAGB5_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB5_WR_VC0_CNTL 0x304d +#define mmDAGB5_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_VC1_CNTL 0x304e +#define mmDAGB5_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_VC2_CNTL 0x304f +#define mmDAGB5_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_VC3_CNTL 0x3050 +#define mmDAGB5_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_VC4_CNTL 0x3051 +#define mmDAGB5_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_VC5_CNTL 0x3052 +#define mmDAGB5_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_VC6_CNTL 0x3053 +#define mmDAGB5_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_VC7_CNTL 0x3054 +#define mmDAGB5_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB5_WR_CNTL_MISC 0x3055 +#define mmDAGB5_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB5_WR_TLB_CREDIT 0x3056 +#define mmDAGB5_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB5_WR_DATA_CREDIT 0x3057 +#define mmDAGB5_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB5_WR_MISC_CREDIT 0x3058 +#define mmDAGB5_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB5_WRCLI_ASK_PENDING 0x305d +#define mmDAGB5_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI_GO_PENDING 0x305e +#define mmDAGB5_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI_GBLSEND_PENDING 0x305f +#define mmDAGB5_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI_TLB_PENDING 0x3060 +#define mmDAGB5_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI_OARB_PENDING 0x3061 +#define mmDAGB5_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI_OSD_PENDING 0x3062 +#define mmDAGB5_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI_DBUS_ASK_PENDING 0x3063 +#define mmDAGB5_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB5_WRCLI_DBUS_GO_PENDING 0x3064 +#define mmDAGB5_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB5_DAGB_DLY 0x3065 +#define mmDAGB5_DAGB_DLY_BASE_IDX 1 +#define mmDAGB5_CNTL_MISC 0x3066 +#define mmDAGB5_CNTL_MISC_BASE_IDX 1 +#define mmDAGB5_CNTL_MISC2 0x3067 +#define mmDAGB5_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB5_FIFO_EMPTY 0x3068 +#define mmDAGB5_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB5_FIFO_FULL 0x3069 +#define mmDAGB5_FIFO_FULL_BASE_IDX 1 +#define mmDAGB5_WR_CREDITS_FULL 0x306a +#define mmDAGB5_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB5_RD_CREDITS_FULL 0x306b +#define mmDAGB5_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB5_PERFCOUNTER_LO 0x306c +#define mmDAGB5_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB5_PERFCOUNTER_HI 0x306d +#define mmDAGB5_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB5_PERFCOUNTER0_CFG 0x306e +#define mmDAGB5_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB5_PERFCOUNTER1_CFG 0x306f +#define mmDAGB5_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB5_PERFCOUNTER2_CFG 0x3070 +#define mmDAGB5_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB5_PERFCOUNTER_RSLT_CNTL 0x3071 +#define mmDAGB5_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB5_RESERVE0 0x3072 +#define mmDAGB5_RESERVE0_BASE_IDX 1 +#define mmDAGB5_RESERVE1 0x3073 +#define mmDAGB5_RESERVE1_BASE_IDX 1 +#define mmDAGB5_RESERVE2 0x3074 +#define mmDAGB5_RESERVE2_BASE_IDX 1 +#define mmDAGB5_RESERVE3 0x3075 +#define mmDAGB5_RESERVE3_BASE_IDX 1 +#define mmDAGB5_RESERVE4 0x3076 +#define mmDAGB5_RESERVE4_BASE_IDX 1 +#define mmDAGB5_RESERVE5 0x3077 +#define mmDAGB5_RESERVE5_BASE_IDX 1 +#define mmDAGB5_RESERVE6 0x3078 +#define mmDAGB5_RESERVE6_BASE_IDX 1 +#define mmDAGB5_RESERVE7 0x3079 +#define mmDAGB5_RESERVE7_BASE_IDX 1 +#define mmDAGB5_RESERVE8 0x307a +#define mmDAGB5_RESERVE8_BASE_IDX 1 +#define mmDAGB5_RESERVE9 0x307b +#define mmDAGB5_RESERVE9_BASE_IDX 1 +#define mmDAGB5_RESERVE10 0x307c +#define mmDAGB5_RESERVE10_BASE_IDX 1 +#define mmDAGB5_RESERVE11 0x307d +#define mmDAGB5_RESERVE11_BASE_IDX 1 +#define mmDAGB5_RESERVE12 0x307e +#define mmDAGB5_RESERVE12_BASE_IDX 1 +#define mmDAGB5_RESERVE13 0x307f +#define mmDAGB5_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_dagb_dagbdec6 +// base address: 0x74200 +#define mmDAGB6_RDCLI0 0x3080 +#define mmDAGB6_RDCLI0_BASE_IDX 1 +#define mmDAGB6_RDCLI1 0x3081 +#define mmDAGB6_RDCLI1_BASE_IDX 1 +#define mmDAGB6_RDCLI2 0x3082 +#define mmDAGB6_RDCLI2_BASE_IDX 1 +#define mmDAGB6_RDCLI3 0x3083 +#define mmDAGB6_RDCLI3_BASE_IDX 1 +#define mmDAGB6_RDCLI4 0x3084 +#define mmDAGB6_RDCLI4_BASE_IDX 1 +#define mmDAGB6_RDCLI5 0x3085 +#define mmDAGB6_RDCLI5_BASE_IDX 1 +#define mmDAGB6_RDCLI6 0x3086 +#define mmDAGB6_RDCLI6_BASE_IDX 1 +#define mmDAGB6_RDCLI7 0x3087 +#define mmDAGB6_RDCLI7_BASE_IDX 1 +#define mmDAGB6_RDCLI8 0x3088 +#define mmDAGB6_RDCLI8_BASE_IDX 1 +#define mmDAGB6_RDCLI9 0x3089 +#define mmDAGB6_RDCLI9_BASE_IDX 1 +#define mmDAGB6_RDCLI10 0x308a +#define mmDAGB6_RDCLI10_BASE_IDX 1 +#define mmDAGB6_RDCLI11 0x308b +#define mmDAGB6_RDCLI11_BASE_IDX 1 +#define mmDAGB6_RDCLI12 0x308c +#define mmDAGB6_RDCLI12_BASE_IDX 1 +#define mmDAGB6_RDCLI13 0x308d +#define mmDAGB6_RDCLI13_BASE_IDX 1 +#define mmDAGB6_RDCLI14 0x308e +#define mmDAGB6_RDCLI14_BASE_IDX 1 +#define mmDAGB6_RDCLI15 0x308f +#define mmDAGB6_RDCLI15_BASE_IDX 1 +#define mmDAGB6_RD_CNTL 0x3090 +#define mmDAGB6_RD_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_GMI_CNTL 0x3091 +#define mmDAGB6_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_ADDR_DAGB 0x3092 +#define mmDAGB6_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB6_RD_OUTPUT_DAGB_MAX_BURST 0x3093 +#define mmDAGB6_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB6_RD_OUTPUT_DAGB_LAZY_TIMER 0x3094 +#define mmDAGB6_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB6_RD_CGTT_CLK_CTRL 0x3095 +#define mmDAGB6_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB6_L1TLB_RD_CGTT_CLK_CTRL 0x3096 +#define mmDAGB6_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB6_ATCVM_RD_CGTT_CLK_CTRL 0x3097 +#define mmDAGB6_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST0 0x3098 +#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER0 0x3099 +#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST1 0x309a +#define mmDAGB6_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER1 0x309b +#define mmDAGB6_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB6_RD_VC0_CNTL 0x309c +#define mmDAGB6_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_VC1_CNTL 0x309d +#define mmDAGB6_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_VC2_CNTL 0x309e +#define mmDAGB6_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_VC3_CNTL 0x309f +#define mmDAGB6_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_VC4_CNTL 0x30a0 +#define mmDAGB6_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_VC5_CNTL 0x30a1 +#define mmDAGB6_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_VC6_CNTL 0x30a2 +#define mmDAGB6_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_VC7_CNTL 0x30a3 +#define mmDAGB6_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB6_RD_CNTL_MISC 0x30a4 +#define mmDAGB6_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB6_RD_TLB_CREDIT 0x30a5 +#define mmDAGB6_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB6_RDCLI_ASK_PENDING 0x30a6 +#define mmDAGB6_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB6_RDCLI_GO_PENDING 0x30a7 +#define mmDAGB6_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB6_RDCLI_GBLSEND_PENDING 0x30a8 +#define mmDAGB6_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB6_RDCLI_TLB_PENDING 0x30a9 +#define mmDAGB6_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB6_RDCLI_OARB_PENDING 0x30aa +#define mmDAGB6_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB6_RDCLI_OSD_PENDING 0x30ab +#define mmDAGB6_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI0 0x30ac +#define mmDAGB6_WRCLI0_BASE_IDX 1 +#define mmDAGB6_WRCLI1 0x30ad +#define mmDAGB6_WRCLI1_BASE_IDX 1 +#define mmDAGB6_WRCLI2 0x30ae +#define mmDAGB6_WRCLI2_BASE_IDX 1 +#define mmDAGB6_WRCLI3 0x30af +#define mmDAGB6_WRCLI3_BASE_IDX 1 +#define mmDAGB6_WRCLI4 0x30b0 +#define mmDAGB6_WRCLI4_BASE_IDX 1 +#define mmDAGB6_WRCLI5 0x30b1 +#define mmDAGB6_WRCLI5_BASE_IDX 1 +#define mmDAGB6_WRCLI6 0x30b2 +#define mmDAGB6_WRCLI6_BASE_IDX 1 +#define mmDAGB6_WRCLI7 0x30b3 +#define mmDAGB6_WRCLI7_BASE_IDX 1 +#define mmDAGB6_WRCLI8 0x30b4 +#define mmDAGB6_WRCLI8_BASE_IDX 1 +#define mmDAGB6_WRCLI9 0x30b5 +#define mmDAGB6_WRCLI9_BASE_IDX 1 +#define mmDAGB6_WRCLI10 0x30b6 +#define mmDAGB6_WRCLI10_BASE_IDX 1 +#define mmDAGB6_WRCLI11 0x30b7 +#define mmDAGB6_WRCLI11_BASE_IDX 1 +#define mmDAGB6_WRCLI12 0x30b8 +#define mmDAGB6_WRCLI12_BASE_IDX 1 +#define mmDAGB6_WRCLI13 0x30b9 +#define mmDAGB6_WRCLI13_BASE_IDX 1 +#define mmDAGB6_WRCLI14 0x30ba +#define mmDAGB6_WRCLI14_BASE_IDX 1 +#define mmDAGB6_WRCLI15 0x30bb +#define mmDAGB6_WRCLI15_BASE_IDX 1 +#define mmDAGB6_WR_CNTL 0x30bc +#define mmDAGB6_WR_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_GMI_CNTL 0x30bd +#define mmDAGB6_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_ADDR_DAGB 0x30be +#define mmDAGB6_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB6_WR_OUTPUT_DAGB_MAX_BURST 0x30bf +#define mmDAGB6_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB6_WR_OUTPUT_DAGB_LAZY_TIMER 0x30c0 +#define mmDAGB6_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB6_WR_CGTT_CLK_CTRL 0x30c1 +#define mmDAGB6_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB6_L1TLB_WR_CGTT_CLK_CTRL 0x30c2 +#define mmDAGB6_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB6_ATCVM_WR_CGTT_CLK_CTRL 0x30c3 +#define mmDAGB6_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST0 0x30c4 +#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER0 0x30c5 +#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST1 0x30c6 +#define mmDAGB6_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER1 0x30c7 +#define mmDAGB6_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB6_WR_DATA_DAGB 0x30c8 +#define mmDAGB6_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB6_WR_DATA_DAGB_MAX_BURST0 0x30c9 +#define mmDAGB6_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER0 0x30ca +#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB6_WR_DATA_DAGB_MAX_BURST1 0x30cb +#define mmDAGB6_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER1 0x30cc +#define mmDAGB6_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB6_WR_VC0_CNTL 0x30cd +#define mmDAGB6_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_VC1_CNTL 0x30ce +#define mmDAGB6_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_VC2_CNTL 0x30cf +#define mmDAGB6_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_VC3_CNTL 0x30d0 +#define mmDAGB6_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_VC4_CNTL 0x30d1 +#define mmDAGB6_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_VC5_CNTL 0x30d2 +#define mmDAGB6_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_VC6_CNTL 0x30d3 +#define mmDAGB6_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_VC7_CNTL 0x30d4 +#define mmDAGB6_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB6_WR_CNTL_MISC 0x30d5 +#define mmDAGB6_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB6_WR_TLB_CREDIT 0x30d6 +#define mmDAGB6_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB6_WR_DATA_CREDIT 0x30d7 +#define mmDAGB6_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB6_WR_MISC_CREDIT 0x30d8 +#define mmDAGB6_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB6_WRCLI_ASK_PENDING 0x30dd +#define mmDAGB6_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI_GO_PENDING 0x30de +#define mmDAGB6_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI_GBLSEND_PENDING 0x30df +#define mmDAGB6_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI_TLB_PENDING 0x30e0 +#define mmDAGB6_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI_OARB_PENDING 0x30e1 +#define mmDAGB6_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI_OSD_PENDING 0x30e2 +#define mmDAGB6_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI_DBUS_ASK_PENDING 0x30e3 +#define mmDAGB6_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB6_WRCLI_DBUS_GO_PENDING 0x30e4 +#define mmDAGB6_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB6_DAGB_DLY 0x30e5 +#define mmDAGB6_DAGB_DLY_BASE_IDX 1 +#define mmDAGB6_CNTL_MISC 0x30e6 +#define mmDAGB6_CNTL_MISC_BASE_IDX 1 +#define mmDAGB6_CNTL_MISC2 0x30e7 +#define mmDAGB6_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB6_FIFO_EMPTY 0x30e8 +#define mmDAGB6_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB6_FIFO_FULL 0x30e9 +#define mmDAGB6_FIFO_FULL_BASE_IDX 1 +#define mmDAGB6_WR_CREDITS_FULL 0x30ea +#define mmDAGB6_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB6_RD_CREDITS_FULL 0x30eb +#define mmDAGB6_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB6_PERFCOUNTER_LO 0x30ec +#define mmDAGB6_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB6_PERFCOUNTER_HI 0x30ed +#define mmDAGB6_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB6_PERFCOUNTER0_CFG 0x30ee +#define mmDAGB6_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB6_PERFCOUNTER1_CFG 0x30ef +#define mmDAGB6_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB6_PERFCOUNTER2_CFG 0x30f0 +#define mmDAGB6_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB6_PERFCOUNTER_RSLT_CNTL 0x30f1 +#define mmDAGB6_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB6_RESERVE0 0x30f2 +#define mmDAGB6_RESERVE0_BASE_IDX 1 +#define mmDAGB6_RESERVE1 0x30f3 +#define mmDAGB6_RESERVE1_BASE_IDX 1 +#define mmDAGB6_RESERVE2 0x30f4 +#define mmDAGB6_RESERVE2_BASE_IDX 1 +#define mmDAGB6_RESERVE3 0x30f5 +#define mmDAGB6_RESERVE3_BASE_IDX 1 +#define mmDAGB6_RESERVE4 0x30f6 +#define mmDAGB6_RESERVE4_BASE_IDX 1 +#define mmDAGB6_RESERVE5 0x30f7 +#define mmDAGB6_RESERVE5_BASE_IDX 1 +#define mmDAGB6_RESERVE6 0x30f8 +#define mmDAGB6_RESERVE6_BASE_IDX 1 +#define mmDAGB6_RESERVE7 0x30f9 +#define mmDAGB6_RESERVE7_BASE_IDX 1 +#define mmDAGB6_RESERVE8 0x30fa +#define mmDAGB6_RESERVE8_BASE_IDX 1 +#define mmDAGB6_RESERVE9 0x30fb +#define mmDAGB6_RESERVE9_BASE_IDX 1 +#define mmDAGB6_RESERVE10 0x30fc +#define mmDAGB6_RESERVE10_BASE_IDX 1 +#define mmDAGB6_RESERVE11 0x30fd +#define mmDAGB6_RESERVE11_BASE_IDX 1 +#define mmDAGB6_RESERVE12 0x30fe +#define mmDAGB6_RESERVE12_BASE_IDX 1 +#define mmDAGB6_RESERVE13 0x30ff +#define mmDAGB6_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_dagb_dagbdec7 +// base address: 0x74400 +#define mmDAGB7_RDCLI0 0x3100 +#define mmDAGB7_RDCLI0_BASE_IDX 1 +#define mmDAGB7_RDCLI1 0x3101 +#define mmDAGB7_RDCLI1_BASE_IDX 1 +#define mmDAGB7_RDCLI2 0x3102 +#define mmDAGB7_RDCLI2_BASE_IDX 1 +#define mmDAGB7_RDCLI3 0x3103 +#define mmDAGB7_RDCLI3_BASE_IDX 1 +#define mmDAGB7_RDCLI4 0x3104 +#define mmDAGB7_RDCLI4_BASE_IDX 1 +#define mmDAGB7_RDCLI5 0x3105 +#define mmDAGB7_RDCLI5_BASE_IDX 1 +#define mmDAGB7_RDCLI6 0x3106 +#define mmDAGB7_RDCLI6_BASE_IDX 1 +#define mmDAGB7_RDCLI7 0x3107 +#define mmDAGB7_RDCLI7_BASE_IDX 1 +#define mmDAGB7_RDCLI8 0x3108 +#define mmDAGB7_RDCLI8_BASE_IDX 1 +#define mmDAGB7_RDCLI9 0x3109 +#define mmDAGB7_RDCLI9_BASE_IDX 1 +#define mmDAGB7_RDCLI10 0x310a +#define mmDAGB7_RDCLI10_BASE_IDX 1 +#define mmDAGB7_RDCLI11 0x310b +#define mmDAGB7_RDCLI11_BASE_IDX 1 +#define mmDAGB7_RDCLI12 0x310c +#define mmDAGB7_RDCLI12_BASE_IDX 1 +#define mmDAGB7_RDCLI13 0x310d +#define mmDAGB7_RDCLI13_BASE_IDX 1 +#define mmDAGB7_RDCLI14 0x310e +#define mmDAGB7_RDCLI14_BASE_IDX 1 +#define mmDAGB7_RDCLI15 0x310f +#define mmDAGB7_RDCLI15_BASE_IDX 1 +#define mmDAGB7_RD_CNTL 0x3110 +#define mmDAGB7_RD_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_GMI_CNTL 0x3111 +#define mmDAGB7_RD_GMI_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_ADDR_DAGB 0x3112 +#define mmDAGB7_RD_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB7_RD_OUTPUT_DAGB_MAX_BURST 0x3113 +#define mmDAGB7_RD_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB7_RD_OUTPUT_DAGB_LAZY_TIMER 0x3114 +#define mmDAGB7_RD_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB7_RD_CGTT_CLK_CTRL 0x3115 +#define mmDAGB7_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB7_L1TLB_RD_CGTT_CLK_CTRL 0x3116 +#define mmDAGB7_L1TLB_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB7_ATCVM_RD_CGTT_CLK_CTRL 0x3117 +#define mmDAGB7_ATCVM_RD_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST0 0x3118 +#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER0 0x3119 +#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST1 0x311a +#define mmDAGB7_RD_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER1 0x311b +#define mmDAGB7_RD_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB7_RD_VC0_CNTL 0x311c +#define mmDAGB7_RD_VC0_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_VC1_CNTL 0x311d +#define mmDAGB7_RD_VC1_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_VC2_CNTL 0x311e +#define mmDAGB7_RD_VC2_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_VC3_CNTL 0x311f +#define mmDAGB7_RD_VC3_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_VC4_CNTL 0x3120 +#define mmDAGB7_RD_VC4_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_VC5_CNTL 0x3121 +#define mmDAGB7_RD_VC5_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_VC6_CNTL 0x3122 +#define mmDAGB7_RD_VC6_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_VC7_CNTL 0x3123 +#define mmDAGB7_RD_VC7_CNTL_BASE_IDX 1 +#define mmDAGB7_RD_CNTL_MISC 0x3124 +#define mmDAGB7_RD_CNTL_MISC_BASE_IDX 1 +#define mmDAGB7_RD_TLB_CREDIT 0x3125 +#define mmDAGB7_RD_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB7_RDCLI_ASK_PENDING 0x3126 +#define mmDAGB7_RDCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB7_RDCLI_GO_PENDING 0x3127 +#define mmDAGB7_RDCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB7_RDCLI_GBLSEND_PENDING 0x3128 +#define mmDAGB7_RDCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB7_RDCLI_TLB_PENDING 0x3129 +#define mmDAGB7_RDCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB7_RDCLI_OARB_PENDING 0x312a +#define mmDAGB7_RDCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB7_RDCLI_OSD_PENDING 0x312b +#define mmDAGB7_RDCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI0 0x312c +#define mmDAGB7_WRCLI0_BASE_IDX 1 +#define mmDAGB7_WRCLI1 0x312d +#define mmDAGB7_WRCLI1_BASE_IDX 1 +#define mmDAGB7_WRCLI2 0x312e +#define mmDAGB7_WRCLI2_BASE_IDX 1 +#define mmDAGB7_WRCLI3 0x312f +#define mmDAGB7_WRCLI3_BASE_IDX 1 +#define mmDAGB7_WRCLI4 0x3130 +#define mmDAGB7_WRCLI4_BASE_IDX 1 +#define mmDAGB7_WRCLI5 0x3131 +#define mmDAGB7_WRCLI5_BASE_IDX 1 +#define mmDAGB7_WRCLI6 0x3132 +#define mmDAGB7_WRCLI6_BASE_IDX 1 +#define mmDAGB7_WRCLI7 0x3133 +#define mmDAGB7_WRCLI7_BASE_IDX 1 +#define mmDAGB7_WRCLI8 0x3134 +#define mmDAGB7_WRCLI8_BASE_IDX 1 +#define mmDAGB7_WRCLI9 0x3135 +#define mmDAGB7_WRCLI9_BASE_IDX 1 +#define mmDAGB7_WRCLI10 0x3136 +#define mmDAGB7_WRCLI10_BASE_IDX 1 +#define mmDAGB7_WRCLI11 0x3137 +#define mmDAGB7_WRCLI11_BASE_IDX 1 +#define mmDAGB7_WRCLI12 0x3138 +#define mmDAGB7_WRCLI12_BASE_IDX 1 +#define mmDAGB7_WRCLI13 0x3139 +#define mmDAGB7_WRCLI13_BASE_IDX 1 +#define mmDAGB7_WRCLI14 0x313a +#define mmDAGB7_WRCLI14_BASE_IDX 1 +#define mmDAGB7_WRCLI15 0x313b +#define mmDAGB7_WRCLI15_BASE_IDX 1 +#define mmDAGB7_WR_CNTL 0x313c +#define mmDAGB7_WR_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_GMI_CNTL 0x313d +#define mmDAGB7_WR_GMI_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_ADDR_DAGB 0x313e +#define mmDAGB7_WR_ADDR_DAGB_BASE_IDX 1 +#define mmDAGB7_WR_OUTPUT_DAGB_MAX_BURST 0x313f +#define mmDAGB7_WR_OUTPUT_DAGB_MAX_BURST_BASE_IDX 1 +#define mmDAGB7_WR_OUTPUT_DAGB_LAZY_TIMER 0x3140 +#define mmDAGB7_WR_OUTPUT_DAGB_LAZY_TIMER_BASE_IDX 1 +#define mmDAGB7_WR_CGTT_CLK_CTRL 0x3141 +#define mmDAGB7_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB7_L1TLB_WR_CGTT_CLK_CTRL 0x3142 +#define mmDAGB7_L1TLB_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB7_ATCVM_WR_CGTT_CLK_CTRL 0x3143 +#define mmDAGB7_ATCVM_WR_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST0 0x3144 +#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER0 0x3145 +#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST1 0x3146 +#define mmDAGB7_WR_ADDR_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER1 0x3147 +#define mmDAGB7_WR_ADDR_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB7_WR_DATA_DAGB 0x3148 +#define mmDAGB7_WR_DATA_DAGB_BASE_IDX 1 +#define mmDAGB7_WR_DATA_DAGB_MAX_BURST0 0x3149 +#define mmDAGB7_WR_DATA_DAGB_MAX_BURST0_BASE_IDX 1 +#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER0 0x314a +#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER0_BASE_IDX 1 +#define mmDAGB7_WR_DATA_DAGB_MAX_BURST1 0x314b +#define mmDAGB7_WR_DATA_DAGB_MAX_BURST1_BASE_IDX 1 +#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER1 0x314c +#define mmDAGB7_WR_DATA_DAGB_LAZY_TIMER1_BASE_IDX 1 +#define mmDAGB7_WR_VC0_CNTL 0x314d +#define mmDAGB7_WR_VC0_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_VC1_CNTL 0x314e +#define mmDAGB7_WR_VC1_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_VC2_CNTL 0x314f +#define mmDAGB7_WR_VC2_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_VC3_CNTL 0x3150 +#define mmDAGB7_WR_VC3_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_VC4_CNTL 0x3151 +#define mmDAGB7_WR_VC4_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_VC5_CNTL 0x3152 +#define mmDAGB7_WR_VC5_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_VC6_CNTL 0x3153 +#define mmDAGB7_WR_VC6_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_VC7_CNTL 0x3154 +#define mmDAGB7_WR_VC7_CNTL_BASE_IDX 1 +#define mmDAGB7_WR_CNTL_MISC 0x3155 +#define mmDAGB7_WR_CNTL_MISC_BASE_IDX 1 +#define mmDAGB7_WR_TLB_CREDIT 0x3156 +#define mmDAGB7_WR_TLB_CREDIT_BASE_IDX 1 +#define mmDAGB7_WR_DATA_CREDIT 0x3157 +#define mmDAGB7_WR_DATA_CREDIT_BASE_IDX 1 +#define mmDAGB7_WR_MISC_CREDIT 0x3158 +#define mmDAGB7_WR_MISC_CREDIT_BASE_IDX 1 +#define mmDAGB7_WRCLI_ASK_PENDING 0x315d +#define mmDAGB7_WRCLI_ASK_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI_GO_PENDING 0x315e +#define mmDAGB7_WRCLI_GO_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI_GBLSEND_PENDING 0x315f +#define mmDAGB7_WRCLI_GBLSEND_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI_TLB_PENDING 0x3160 +#define mmDAGB7_WRCLI_TLB_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI_OARB_PENDING 0x3161 +#define mmDAGB7_WRCLI_OARB_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI_OSD_PENDING 0x3162 +#define mmDAGB7_WRCLI_OSD_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI_DBUS_ASK_PENDING 0x3163 +#define mmDAGB7_WRCLI_DBUS_ASK_PENDING_BASE_IDX 1 +#define mmDAGB7_WRCLI_DBUS_GO_PENDING 0x3164 +#define mmDAGB7_WRCLI_DBUS_GO_PENDING_BASE_IDX 1 +#define mmDAGB7_DAGB_DLY 0x3165 +#define mmDAGB7_DAGB_DLY_BASE_IDX 1 +#define mmDAGB7_CNTL_MISC 0x3166 +#define mmDAGB7_CNTL_MISC_BASE_IDX 1 +#define mmDAGB7_CNTL_MISC2 0x3167 +#define mmDAGB7_CNTL_MISC2_BASE_IDX 1 +#define mmDAGB7_FIFO_EMPTY 0x3168 +#define mmDAGB7_FIFO_EMPTY_BASE_IDX 1 +#define mmDAGB7_FIFO_FULL 0x3169 +#define mmDAGB7_FIFO_FULL_BASE_IDX 1 +#define mmDAGB7_WR_CREDITS_FULL 0x316a +#define mmDAGB7_WR_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB7_RD_CREDITS_FULL 0x316b +#define mmDAGB7_RD_CREDITS_FULL_BASE_IDX 1 +#define mmDAGB7_PERFCOUNTER_LO 0x316c +#define mmDAGB7_PERFCOUNTER_LO_BASE_IDX 1 +#define mmDAGB7_PERFCOUNTER_HI 0x316d +#define mmDAGB7_PERFCOUNTER_HI_BASE_IDX 1 +#define mmDAGB7_PERFCOUNTER0_CFG 0x316e +#define mmDAGB7_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmDAGB7_PERFCOUNTER1_CFG 0x316f +#define mmDAGB7_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmDAGB7_PERFCOUNTER2_CFG 0x3170 +#define mmDAGB7_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmDAGB7_PERFCOUNTER_RSLT_CNTL 0x3171 +#define mmDAGB7_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmDAGB7_RESERVE0 0x3172 +#define mmDAGB7_RESERVE0_BASE_IDX 1 +#define mmDAGB7_RESERVE1 0x3173 +#define mmDAGB7_RESERVE1_BASE_IDX 1 +#define mmDAGB7_RESERVE2 0x3174 +#define mmDAGB7_RESERVE2_BASE_IDX 1 +#define mmDAGB7_RESERVE3 0x3175 +#define mmDAGB7_RESERVE3_BASE_IDX 1 +#define mmDAGB7_RESERVE4 0x3176 +#define mmDAGB7_RESERVE4_BASE_IDX 1 +#define mmDAGB7_RESERVE5 0x3177 +#define mmDAGB7_RESERVE5_BASE_IDX 1 +#define mmDAGB7_RESERVE6 0x3178 +#define mmDAGB7_RESERVE6_BASE_IDX 1 +#define mmDAGB7_RESERVE7 0x3179 +#define mmDAGB7_RESERVE7_BASE_IDX 1 +#define mmDAGB7_RESERVE8 0x317a +#define mmDAGB7_RESERVE8_BASE_IDX 1 +#define mmDAGB7_RESERVE9 0x317b +#define mmDAGB7_RESERVE9_BASE_IDX 1 +#define mmDAGB7_RESERVE10 0x317c +#define mmDAGB7_RESERVE10_BASE_IDX 1 +#define mmDAGB7_RESERVE11 0x317d +#define mmDAGB7_RESERVE11_BASE_IDX 1 +#define mmDAGB7_RESERVE12 0x317e +#define mmDAGB7_RESERVE12_BASE_IDX 1 +#define mmDAGB7_RESERVE13 0x317f +#define mmDAGB7_RESERVE13_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec5 +// base address: 0x74a00 +#define mmMMEA5_DRAM_RD_CLI2GRP_MAP0 0x3280 +#define mmMMEA5_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_CLI2GRP_MAP1 0x3281 +#define mmMMEA5_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_CLI2GRP_MAP0 0x3282 +#define mmMMEA5_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_CLI2GRP_MAP1 0x3283 +#define mmMMEA5_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_GRP2VC_MAP 0x3284 +#define mmMMEA5_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_GRP2VC_MAP 0x3285 +#define mmMMEA5_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_LAZY 0x3286 +#define mmMMEA5_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_LAZY 0x3287 +#define mmMMEA5_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_CAM_CNTL 0x3288 +#define mmMMEA5_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_CAM_CNTL 0x3289 +#define mmMMEA5_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA5_DRAM_PAGE_BURST 0x328a +#define mmMMEA5_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_PRI_AGE 0x328b +#define mmMMEA5_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_PRI_AGE 0x328c +#define mmMMEA5_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_PRI_QUEUING 0x328d +#define mmMMEA5_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_PRI_QUEUING 0x328e +#define mmMMEA5_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_PRI_FIXED 0x328f +#define mmMMEA5_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_PRI_FIXED 0x3290 +#define mmMMEA5_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_PRI_URGENCY 0x3291 +#define mmMMEA5_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_PRI_URGENCY 0x3292 +#define mmMMEA5_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI1 0x3293 +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI2 0x3294 +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI3 0x3295 +#define mmMMEA5_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI1 0x3296 +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI2 0x3297 +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI3 0x3298 +#define mmMMEA5_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA5_GMI_RD_CLI2GRP_MAP0 0x3299 +#define mmMMEA5_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA5_GMI_RD_CLI2GRP_MAP1 0x329a +#define mmMMEA5_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA5_GMI_WR_CLI2GRP_MAP0 0x329b +#define mmMMEA5_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA5_GMI_WR_CLI2GRP_MAP1 0x329c +#define mmMMEA5_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA5_GMI_RD_GRP2VC_MAP 0x329d +#define mmMMEA5_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA5_GMI_WR_GRP2VC_MAP 0x329e +#define mmMMEA5_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA5_GMI_RD_LAZY 0x329f +#define mmMMEA5_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA5_GMI_WR_LAZY 0x32a0 +#define mmMMEA5_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA5_GMI_RD_CAM_CNTL 0x32a1 +#define mmMMEA5_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA5_GMI_WR_CAM_CNTL 0x32a2 +#define mmMMEA5_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA5_GMI_PAGE_BURST 0x32a3 +#define mmMMEA5_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_AGE 0x32a4 +#define mmMMEA5_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_AGE 0x32a5 +#define mmMMEA5_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_QUEUING 0x32a6 +#define mmMMEA5_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_QUEUING 0x32a7 +#define mmMMEA5_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_FIXED 0x32a8 +#define mmMMEA5_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_FIXED 0x32a9 +#define mmMMEA5_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_URGENCY 0x32aa +#define mmMMEA5_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_URGENCY 0x32ab +#define mmMMEA5_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_URGENCY_MASKING 0x32ac +#define mmMMEA5_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_URGENCY_MASKING 0x32ad +#define mmMMEA5_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI1 0x32ae +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI2 0x32af +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI3 0x32b0 +#define mmMMEA5_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI1 0x32b1 +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI2 0x32b2 +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI3 0x32b3 +#define mmMMEA5_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_BASE_ADDR0 0x32b4 +#define mmMMEA5_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR0 0x32b5 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_BASE_ADDR1 0x32b6 +#define mmMMEA5_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR1 0x32b7 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR1 0x32b8 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_BASE_ADDR2 0x32b9 +#define mmMMEA5_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR2 0x32ba +#define mmMMEA5_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_BASE_ADDR3 0x32bb +#define mmMMEA5_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR3 0x32bc +#define mmMMEA5_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR3 0x32bd +#define mmMMEA5_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_BASE_ADDR4 0x32be +#define mmMMEA5_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR4 0x32bf +#define mmMMEA5_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_BASE_ADDR5 0x32c0 +#define mmMMEA5_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR5 0x32c1 +#define mmMMEA5_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR5 0x32c2 +#define mmMMEA5_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA5_ADDRNORMDRAM_HOLE_CNTL 0x32c3 +#define mmMMEA5_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA5_ADDRNORMGMI_HOLE_CNTL 0x32c4 +#define mmMMEA5_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x32c5 +#define mmMMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG 0x32c6 +#define mmMMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA5_ADDRDEC_BANK_CFG 0x32c7 +#define mmMMEA5_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA5_ADDRDEC_MISC_CFG 0x32c8 +#define mmMMEA5_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK0 0x32c9 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK1 0x32ca +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK2 0x32cb +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK3 0x32cc +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK4 0x32cd +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK5 0x32ce +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC 0x32cf +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC2 0x32d0 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS0 0x32d1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS1 0x32d2 +#define mmMMEA5_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA5_ADDRDECDRAM_HARVEST_ENABLE 0x32d3 +#define mmMMEA5_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK0 0x32d4 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK1 0x32d5 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK2 0x32d6 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK3 0x32d7 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK4 0x32d8 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK5 0x32d9 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC 0x32da +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC2 0x32db +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS0 0x32dc +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS1 0x32dd +#define mmMMEA5_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA5_ADDRDECGMI_HARVEST_ENABLE 0x32de +#define mmMMEA5_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS0 0x32df +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS1 0x32e0 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS2 0x32e1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS3 0x32e2 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS0 0x32e3 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS1 0x32e4 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS2 0x32e5 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS3 0x32e6 +#define mmMMEA5_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS01 0x32e7 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS23 0x32e8 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS01 0x32e9 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS23 0x32ea +#define mmMMEA5_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS01 0x32eb +#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS23 0x32ec +#define mmMMEA5_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS01 0x32ed +#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS23 0x32ee +#define mmMMEA5_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS01 0x32ef +#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS23 0x32f0 +#define mmMMEA5_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS01 0x32f1 +#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS23 0x32f2 +#define mmMMEA5_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS01 0x32f3 +#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS23 0x32f4 +#define mmMMEA5_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_RM_SEL_CS01 0x32f5 +#define mmMMEA5_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_RM_SEL_CS23 0x32f6 +#define mmMMEA5_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS01 0x32f7 +#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS23 0x32f8 +#define mmMMEA5_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS0 0x32f9 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS1 0x32fa +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS2 0x32fb +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS3 0x32fc +#define mmMMEA5_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS0 0x32fd +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS1 0x32fe +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS2 0x32ff +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS3 0x3300 +#define mmMMEA5_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS01 0x3301 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS23 0x3302 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS01 0x3303 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS23 0x3304 +#define mmMMEA5_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS01 0x3305 +#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS23 0x3306 +#define mmMMEA5_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS01 0x3307 +#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS23 0x3308 +#define mmMMEA5_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS01 0x3309 +#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS23 0x330a +#define mmMMEA5_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS01 0x330b +#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS23 0x330c +#define mmMMEA5_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS01 0x330d +#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS23 0x330e +#define mmMMEA5_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_RM_SEL_CS01 0x330f +#define mmMMEA5_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_RM_SEL_CS23 0x3310 +#define mmMMEA5_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS01 0x3311 +#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS23 0x3312 +#define mmMMEA5_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS0 0x3313 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS1 0x3314 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS2 0x3315 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS3 0x3316 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS0 0x3317 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS1 0x3318 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS2 0x3319 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS3 0x331a +#define mmMMEA5_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS01 0x331b +#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS23 0x331c +#define mmMMEA5_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS01 0x331d +#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS23 0x331e +#define mmMMEA5_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS01 0x331f +#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS23 0x3320 +#define mmMMEA5_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS01 0x3321 +#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS23 0x3322 +#define mmMMEA5_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS01 0x3323 +#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS23 0x3324 +#define mmMMEA5_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS01 0x3325 +#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS23 0x3326 +#define mmMMEA5_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS01 0x3327 +#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS23 0x3328 +#define mmMMEA5_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_RM_SEL_CS01 0x3329 +#define mmMMEA5_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_RM_SEL_CS23 0x332a +#define mmMMEA5_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS01 0x332b +#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS23 0x332c +#define mmMMEA5_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA5_ADDRNORMDRAM_GLOBAL_CNTL 0x332d +#define mmMMEA5_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA5_ADDRNORMGMI_GLOBAL_CNTL 0x332e +#define mmMMEA5_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA5_IO_RD_CLI2GRP_MAP0 0x3355 +#define mmMMEA5_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA5_IO_RD_CLI2GRP_MAP1 0x3356 +#define mmMMEA5_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA5_IO_WR_CLI2GRP_MAP0 0x3357 +#define mmMMEA5_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA5_IO_WR_CLI2GRP_MAP1 0x3358 +#define mmMMEA5_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA5_IO_RD_COMBINE_FLUSH 0x3359 +#define mmMMEA5_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA5_IO_WR_COMBINE_FLUSH 0x335a +#define mmMMEA5_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA5_IO_GROUP_BURST 0x335b +#define mmMMEA5_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_AGE 0x335c +#define mmMMEA5_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_AGE 0x335d +#define mmMMEA5_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_QUEUING 0x335e +#define mmMMEA5_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_QUEUING 0x335f +#define mmMMEA5_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_FIXED 0x3360 +#define mmMMEA5_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_FIXED 0x3361 +#define mmMMEA5_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_URGENCY 0x3362 +#define mmMMEA5_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_URGENCY 0x3363 +#define mmMMEA5_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_URGENCY_MASKING 0x3364 +#define mmMMEA5_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_URGENCY_MASKING 0x3365 +#define mmMMEA5_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_QUANT_PRI1 0x3366 +#define mmMMEA5_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_QUANT_PRI2 0x3367 +#define mmMMEA5_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA5_IO_RD_PRI_QUANT_PRI3 0x3368 +#define mmMMEA5_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_QUANT_PRI1 0x3369 +#define mmMMEA5_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_QUANT_PRI2 0x336a +#define mmMMEA5_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA5_IO_WR_PRI_QUANT_PRI3 0x336b +#define mmMMEA5_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA5_SDP_ARB_DRAM 0x336c +#define mmMMEA5_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA5_SDP_ARB_GMI 0x336d +#define mmMMEA5_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA5_SDP_ARB_FINAL 0x336e +#define mmMMEA5_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA5_SDP_DRAM_PRIORITY 0x336f +#define mmMMEA5_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA5_SDP_GMI_PRIORITY 0x3370 +#define mmMMEA5_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA5_SDP_IO_PRIORITY 0x3371 +#define mmMMEA5_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA5_SDP_CREDITS 0x3372 +#define mmMMEA5_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA5_SDP_TAG_RESERVE0 0x3373 +#define mmMMEA5_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA5_SDP_TAG_RESERVE1 0x3374 +#define mmMMEA5_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA5_SDP_VCC_RESERVE0 0x3375 +#define mmMMEA5_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA5_SDP_VCC_RESERVE1 0x3376 +#define mmMMEA5_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA5_SDP_VCD_RESERVE0 0x3377 +#define mmMMEA5_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA5_SDP_VCD_RESERVE1 0x3378 +#define mmMMEA5_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA5_SDP_REQ_CNTL 0x3379 +#define mmMMEA5_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA5_MISC 0x337a +#define mmMMEA5_MISC_BASE_IDX 1 +#define mmMMEA5_LATENCY_SAMPLING 0x337b +#define mmMMEA5_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA5_PERFCOUNTER_LO 0x337c +#define mmMMEA5_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA5_PERFCOUNTER_HI 0x337d +#define mmMMEA5_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA5_PERFCOUNTER0_CFG 0x337e +#define mmMMEA5_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA5_PERFCOUNTER1_CFG 0x337f +#define mmMMEA5_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA5_PERFCOUNTER_RSLT_CNTL 0x3380 +#define mmMMEA5_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA5_EDC_CNT 0x3386 +#define mmMMEA5_EDC_CNT_BASE_IDX 1 +#define mmMMEA5_EDC_CNT2 0x3387 +#define mmMMEA5_EDC_CNT2_BASE_IDX 1 +#define mmMMEA5_DSM_CNTL 0x3388 +#define mmMMEA5_DSM_CNTL_BASE_IDX 1 +#define mmMMEA5_DSM_CNTLA 0x3389 +#define mmMMEA5_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA5_DSM_CNTLB 0x338a +#define mmMMEA5_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA5_DSM_CNTL2 0x338b +#define mmMMEA5_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA5_DSM_CNTL2A 0x338c +#define mmMMEA5_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA5_DSM_CNTL2B 0x338d +#define mmMMEA5_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA5_CGTT_CLK_CTRL 0x338f +#define mmMMEA5_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA5_EDC_MODE 0x3390 +#define mmMMEA5_EDC_MODE_BASE_IDX 1 +#define mmMMEA5_ERR_STATUS 0x3391 +#define mmMMEA5_ERR_STATUS_BASE_IDX 1 +#define mmMMEA5_MISC2 0x3392 +#define mmMMEA5_MISC2_BASE_IDX 1 +#define mmMMEA5_ADDRDEC_SELECT 0x3393 +#define mmMMEA5_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA5_EDC_CNT3 0x3394 +#define mmMMEA5_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec6 +// base address: 0x74f00 +#define mmMMEA6_DRAM_RD_CLI2GRP_MAP0 0x33c0 +#define mmMMEA6_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_CLI2GRP_MAP1 0x33c1 +#define mmMMEA6_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_CLI2GRP_MAP0 0x33c2 +#define mmMMEA6_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_CLI2GRP_MAP1 0x33c3 +#define mmMMEA6_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_GRP2VC_MAP 0x33c4 +#define mmMMEA6_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_GRP2VC_MAP 0x33c5 +#define mmMMEA6_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_LAZY 0x33c6 +#define mmMMEA6_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_LAZY 0x33c7 +#define mmMMEA6_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_CAM_CNTL 0x33c8 +#define mmMMEA6_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_CAM_CNTL 0x33c9 +#define mmMMEA6_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA6_DRAM_PAGE_BURST 0x33ca +#define mmMMEA6_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_PRI_AGE 0x33cb +#define mmMMEA6_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_PRI_AGE 0x33cc +#define mmMMEA6_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_PRI_QUEUING 0x33cd +#define mmMMEA6_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_PRI_QUEUING 0x33ce +#define mmMMEA6_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_PRI_FIXED 0x33cf +#define mmMMEA6_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_PRI_FIXED 0x33d0 +#define mmMMEA6_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_PRI_URGENCY 0x33d1 +#define mmMMEA6_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_PRI_URGENCY 0x33d2 +#define mmMMEA6_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI1 0x33d3 +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI2 0x33d4 +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI3 0x33d5 +#define mmMMEA6_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI1 0x33d6 +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI2 0x33d7 +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI3 0x33d8 +#define mmMMEA6_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA6_GMI_RD_CLI2GRP_MAP0 0x33d9 +#define mmMMEA6_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA6_GMI_RD_CLI2GRP_MAP1 0x33da +#define mmMMEA6_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA6_GMI_WR_CLI2GRP_MAP0 0x33db +#define mmMMEA6_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA6_GMI_WR_CLI2GRP_MAP1 0x33dc +#define mmMMEA6_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA6_GMI_RD_GRP2VC_MAP 0x33dd +#define mmMMEA6_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA6_GMI_WR_GRP2VC_MAP 0x33de +#define mmMMEA6_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA6_GMI_RD_LAZY 0x33df +#define mmMMEA6_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA6_GMI_WR_LAZY 0x33e0 +#define mmMMEA6_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA6_GMI_RD_CAM_CNTL 0x33e1 +#define mmMMEA6_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA6_GMI_WR_CAM_CNTL 0x33e2 +#define mmMMEA6_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA6_GMI_PAGE_BURST 0x33e3 +#define mmMMEA6_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_AGE 0x33e4 +#define mmMMEA6_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_AGE 0x33e5 +#define mmMMEA6_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_QUEUING 0x33e6 +#define mmMMEA6_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_QUEUING 0x33e7 +#define mmMMEA6_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_FIXED 0x33e8 +#define mmMMEA6_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_FIXED 0x33e9 +#define mmMMEA6_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_URGENCY 0x33ea +#define mmMMEA6_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_URGENCY 0x33eb +#define mmMMEA6_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_URGENCY_MASKING 0x33ec +#define mmMMEA6_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_URGENCY_MASKING 0x33ed +#define mmMMEA6_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI1 0x33ee +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI2 0x33ef +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI3 0x33f0 +#define mmMMEA6_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI1 0x33f1 +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI2 0x33f2 +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI3 0x33f3 +#define mmMMEA6_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_BASE_ADDR0 0x33f4 +#define mmMMEA6_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR0 0x33f5 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_BASE_ADDR1 0x33f6 +#define mmMMEA6_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR1 0x33f7 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR1 0x33f8 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_BASE_ADDR2 0x33f9 +#define mmMMEA6_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR2 0x33fa +#define mmMMEA6_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_BASE_ADDR3 0x33fb +#define mmMMEA6_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR3 0x33fc +#define mmMMEA6_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR3 0x33fd +#define mmMMEA6_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_BASE_ADDR4 0x33fe +#define mmMMEA6_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR4 0x33ff +#define mmMMEA6_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_BASE_ADDR5 0x3400 +#define mmMMEA6_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR5 0x3401 +#define mmMMEA6_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR5 0x3402 +#define mmMMEA6_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA6_ADDRNORMDRAM_HOLE_CNTL 0x3403 +#define mmMMEA6_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA6_ADDRNORMGMI_HOLE_CNTL 0x3404 +#define mmMMEA6_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x3405 +#define mmMMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG 0x3406 +#define mmMMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA6_ADDRDEC_BANK_CFG 0x3407 +#define mmMMEA6_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA6_ADDRDEC_MISC_CFG 0x3408 +#define mmMMEA6_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK0 0x3409 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK1 0x340a +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK2 0x340b +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK3 0x340c +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK4 0x340d +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK5 0x340e +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC 0x340f +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC2 0x3410 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS0 0x3411 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS1 0x3412 +#define mmMMEA6_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA6_ADDRDECDRAM_HARVEST_ENABLE 0x3413 +#define mmMMEA6_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK0 0x3414 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK1 0x3415 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK2 0x3416 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK3 0x3417 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK4 0x3418 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK5 0x3419 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC 0x341a +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC2 0x341b +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS0 0x341c +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS1 0x341d +#define mmMMEA6_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA6_ADDRDECGMI_HARVEST_ENABLE 0x341e +#define mmMMEA6_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS0 0x341f +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS1 0x3420 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS2 0x3421 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS3 0x3422 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS0 0x3423 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS1 0x3424 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS2 0x3425 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS3 0x3426 +#define mmMMEA6_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS01 0x3427 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS23 0x3428 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS01 0x3429 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS23 0x342a +#define mmMMEA6_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS01 0x342b +#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS23 0x342c +#define mmMMEA6_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS01 0x342d +#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS23 0x342e +#define mmMMEA6_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS01 0x342f +#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS23 0x3430 +#define mmMMEA6_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS01 0x3431 +#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS23 0x3432 +#define mmMMEA6_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS01 0x3433 +#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS23 0x3434 +#define mmMMEA6_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_RM_SEL_CS01 0x3435 +#define mmMMEA6_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_RM_SEL_CS23 0x3436 +#define mmMMEA6_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS01 0x3437 +#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS23 0x3438 +#define mmMMEA6_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS0 0x3439 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS1 0x343a +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS2 0x343b +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS3 0x343c +#define mmMMEA6_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS0 0x343d +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS1 0x343e +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS2 0x343f +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS3 0x3440 +#define mmMMEA6_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS01 0x3441 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS23 0x3442 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS01 0x3443 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS23 0x3444 +#define mmMMEA6_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS01 0x3445 +#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS23 0x3446 +#define mmMMEA6_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS01 0x3447 +#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS23 0x3448 +#define mmMMEA6_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS01 0x3449 +#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS23 0x344a +#define mmMMEA6_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS01 0x344b +#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS23 0x344c +#define mmMMEA6_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS01 0x344d +#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS23 0x344e +#define mmMMEA6_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_RM_SEL_CS01 0x344f +#define mmMMEA6_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_RM_SEL_CS23 0x3450 +#define mmMMEA6_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS01 0x3451 +#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS23 0x3452 +#define mmMMEA6_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS0 0x3453 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS1 0x3454 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS2 0x3455 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS3 0x3456 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS0 0x3457 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS1 0x3458 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS2 0x3459 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS3 0x345a +#define mmMMEA6_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS01 0x345b +#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS23 0x345c +#define mmMMEA6_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS01 0x345d +#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS23 0x345e +#define mmMMEA6_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS01 0x345f +#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS23 0x3460 +#define mmMMEA6_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS01 0x3461 +#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS23 0x3462 +#define mmMMEA6_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS01 0x3463 +#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS23 0x3464 +#define mmMMEA6_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS01 0x3465 +#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS23 0x3466 +#define mmMMEA6_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS01 0x3467 +#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS23 0x3468 +#define mmMMEA6_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_RM_SEL_CS01 0x3469 +#define mmMMEA6_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_RM_SEL_CS23 0x346a +#define mmMMEA6_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS01 0x346b +#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS23 0x346c +#define mmMMEA6_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA6_ADDRNORMDRAM_GLOBAL_CNTL 0x346d +#define mmMMEA6_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA6_ADDRNORMGMI_GLOBAL_CNTL 0x346e +#define mmMMEA6_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA6_IO_RD_CLI2GRP_MAP0 0x3495 +#define mmMMEA6_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA6_IO_RD_CLI2GRP_MAP1 0x3496 +#define mmMMEA6_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA6_IO_WR_CLI2GRP_MAP0 0x3497 +#define mmMMEA6_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA6_IO_WR_CLI2GRP_MAP1 0x3498 +#define mmMMEA6_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA6_IO_RD_COMBINE_FLUSH 0x3499 +#define mmMMEA6_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA6_IO_WR_COMBINE_FLUSH 0x349a +#define mmMMEA6_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA6_IO_GROUP_BURST 0x349b +#define mmMMEA6_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_AGE 0x349c +#define mmMMEA6_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_AGE 0x349d +#define mmMMEA6_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_QUEUING 0x349e +#define mmMMEA6_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_QUEUING 0x349f +#define mmMMEA6_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_FIXED 0x34a0 +#define mmMMEA6_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_FIXED 0x34a1 +#define mmMMEA6_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_URGENCY 0x34a2 +#define mmMMEA6_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_URGENCY 0x34a3 +#define mmMMEA6_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_URGENCY_MASKING 0x34a4 +#define mmMMEA6_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_URGENCY_MASKING 0x34a5 +#define mmMMEA6_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_QUANT_PRI1 0x34a6 +#define mmMMEA6_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_QUANT_PRI2 0x34a7 +#define mmMMEA6_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA6_IO_RD_PRI_QUANT_PRI3 0x34a8 +#define mmMMEA6_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_QUANT_PRI1 0x34a9 +#define mmMMEA6_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_QUANT_PRI2 0x34aa +#define mmMMEA6_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA6_IO_WR_PRI_QUANT_PRI3 0x34ab +#define mmMMEA6_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA6_SDP_ARB_DRAM 0x34ac +#define mmMMEA6_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA6_SDP_ARB_GMI 0x34ad +#define mmMMEA6_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA6_SDP_ARB_FINAL 0x34ae +#define mmMMEA6_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA6_SDP_DRAM_PRIORITY 0x34af +#define mmMMEA6_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA6_SDP_GMI_PRIORITY 0x34b0 +#define mmMMEA6_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA6_SDP_IO_PRIORITY 0x34b1 +#define mmMMEA6_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA6_SDP_CREDITS 0x34b2 +#define mmMMEA6_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA6_SDP_TAG_RESERVE0 0x34b3 +#define mmMMEA6_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA6_SDP_TAG_RESERVE1 0x34b4 +#define mmMMEA6_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA6_SDP_VCC_RESERVE0 0x34b5 +#define mmMMEA6_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA6_SDP_VCC_RESERVE1 0x34b6 +#define mmMMEA6_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA6_SDP_VCD_RESERVE0 0x34b7 +#define mmMMEA6_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA6_SDP_VCD_RESERVE1 0x34b8 +#define mmMMEA6_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA6_SDP_REQ_CNTL 0x34b9 +#define mmMMEA6_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA6_MISC 0x34ba +#define mmMMEA6_MISC_BASE_IDX 1 +#define mmMMEA6_LATENCY_SAMPLING 0x34bb +#define mmMMEA6_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA6_PERFCOUNTER_LO 0x34bc +#define mmMMEA6_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA6_PERFCOUNTER_HI 0x34bd +#define mmMMEA6_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA6_PERFCOUNTER0_CFG 0x34be +#define mmMMEA6_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA6_PERFCOUNTER1_CFG 0x34bf +#define mmMMEA6_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA6_PERFCOUNTER_RSLT_CNTL 0x34c0 +#define mmMMEA6_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA6_EDC_CNT 0x34c6 +#define mmMMEA6_EDC_CNT_BASE_IDX 1 +#define mmMMEA6_EDC_CNT2 0x34c7 +#define mmMMEA6_EDC_CNT2_BASE_IDX 1 +#define mmMMEA6_DSM_CNTL 0x34c8 +#define mmMMEA6_DSM_CNTL_BASE_IDX 1 +#define mmMMEA6_DSM_CNTLA 0x34c9 +#define mmMMEA6_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA6_DSM_CNTLB 0x34ca +#define mmMMEA6_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA6_DSM_CNTL2 0x34cb +#define mmMMEA6_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA6_DSM_CNTL2A 0x34cc +#define mmMMEA6_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA6_DSM_CNTL2B 0x34cd +#define mmMMEA6_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA6_CGTT_CLK_CTRL 0x34cf +#define mmMMEA6_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA6_EDC_MODE 0x34d0 +#define mmMMEA6_EDC_MODE_BASE_IDX 1 +#define mmMMEA6_ERR_STATUS 0x34d1 +#define mmMMEA6_ERR_STATUS_BASE_IDX 1 +#define mmMMEA6_MISC2 0x34d2 +#define mmMMEA6_MISC2_BASE_IDX 1 +#define mmMMEA6_ADDRDEC_SELECT 0x34d3 +#define mmMMEA6_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA6_EDC_CNT3 0x34d4 +#define mmMMEA6_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_ea_mmeadec7 +// base address: 0x75400 +#define mmMMEA7_DRAM_RD_CLI2GRP_MAP0 0x3500 +#define mmMMEA7_DRAM_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_CLI2GRP_MAP1 0x3501 +#define mmMMEA7_DRAM_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_CLI2GRP_MAP0 0x3502 +#define mmMMEA7_DRAM_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_CLI2GRP_MAP1 0x3503 +#define mmMMEA7_DRAM_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_GRP2VC_MAP 0x3504 +#define mmMMEA7_DRAM_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_GRP2VC_MAP 0x3505 +#define mmMMEA7_DRAM_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_LAZY 0x3506 +#define mmMMEA7_DRAM_RD_LAZY_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_LAZY 0x3507 +#define mmMMEA7_DRAM_WR_LAZY_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_CAM_CNTL 0x3508 +#define mmMMEA7_DRAM_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_CAM_CNTL 0x3509 +#define mmMMEA7_DRAM_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA7_DRAM_PAGE_BURST 0x350a +#define mmMMEA7_DRAM_PAGE_BURST_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_PRI_AGE 0x350b +#define mmMMEA7_DRAM_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_PRI_AGE 0x350c +#define mmMMEA7_DRAM_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_PRI_QUEUING 0x350d +#define mmMMEA7_DRAM_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_PRI_QUEUING 0x350e +#define mmMMEA7_DRAM_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_PRI_FIXED 0x350f +#define mmMMEA7_DRAM_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_PRI_FIXED 0x3510 +#define mmMMEA7_DRAM_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_PRI_URGENCY 0x3511 +#define mmMMEA7_DRAM_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_PRI_URGENCY 0x3512 +#define mmMMEA7_DRAM_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI1 0x3513 +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI2 0x3514 +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI3 0x3515 +#define mmMMEA7_DRAM_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI1 0x3516 +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI2 0x3517 +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI3 0x3518 +#define mmMMEA7_DRAM_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA7_GMI_RD_CLI2GRP_MAP0 0x3519 +#define mmMMEA7_GMI_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA7_GMI_RD_CLI2GRP_MAP1 0x351a +#define mmMMEA7_GMI_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA7_GMI_WR_CLI2GRP_MAP0 0x351b +#define mmMMEA7_GMI_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA7_GMI_WR_CLI2GRP_MAP1 0x351c +#define mmMMEA7_GMI_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA7_GMI_RD_GRP2VC_MAP 0x351d +#define mmMMEA7_GMI_RD_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA7_GMI_WR_GRP2VC_MAP 0x351e +#define mmMMEA7_GMI_WR_GRP2VC_MAP_BASE_IDX 1 +#define mmMMEA7_GMI_RD_LAZY 0x351f +#define mmMMEA7_GMI_RD_LAZY_BASE_IDX 1 +#define mmMMEA7_GMI_WR_LAZY 0x3520 +#define mmMMEA7_GMI_WR_LAZY_BASE_IDX 1 +#define mmMMEA7_GMI_RD_CAM_CNTL 0x3521 +#define mmMMEA7_GMI_RD_CAM_CNTL_BASE_IDX 1 +#define mmMMEA7_GMI_WR_CAM_CNTL 0x3522 +#define mmMMEA7_GMI_WR_CAM_CNTL_BASE_IDX 1 +#define mmMMEA7_GMI_PAGE_BURST 0x3523 +#define mmMMEA7_GMI_PAGE_BURST_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_AGE 0x3524 +#define mmMMEA7_GMI_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_AGE 0x3525 +#define mmMMEA7_GMI_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_QUEUING 0x3526 +#define mmMMEA7_GMI_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_QUEUING 0x3527 +#define mmMMEA7_GMI_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_FIXED 0x3528 +#define mmMMEA7_GMI_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_FIXED 0x3529 +#define mmMMEA7_GMI_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_URGENCY 0x352a +#define mmMMEA7_GMI_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_URGENCY 0x352b +#define mmMMEA7_GMI_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_URGENCY_MASKING 0x352c +#define mmMMEA7_GMI_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_URGENCY_MASKING 0x352d +#define mmMMEA7_GMI_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI1 0x352e +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI2 0x352f +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI3 0x3530 +#define mmMMEA7_GMI_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI1 0x3531 +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI2 0x3532 +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI3 0x3533 +#define mmMMEA7_GMI_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_BASE_ADDR0 0x3534 +#define mmMMEA7_ADDRNORM_BASE_ADDR0_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR0 0x3535 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR0_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_BASE_ADDR1 0x3536 +#define mmMMEA7_ADDRNORM_BASE_ADDR1_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR1 0x3537 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR1_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR1 0x3538 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR1_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_BASE_ADDR2 0x3539 +#define mmMMEA7_ADDRNORM_BASE_ADDR2_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR2 0x353a +#define mmMMEA7_ADDRNORM_LIMIT_ADDR2_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_BASE_ADDR3 0x353b +#define mmMMEA7_ADDRNORM_BASE_ADDR3_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR3 0x353c +#define mmMMEA7_ADDRNORM_LIMIT_ADDR3_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR3 0x353d +#define mmMMEA7_ADDRNORM_OFFSET_ADDR3_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_BASE_ADDR4 0x353e +#define mmMMEA7_ADDRNORM_BASE_ADDR4_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR4 0x353f +#define mmMMEA7_ADDRNORM_LIMIT_ADDR4_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_BASE_ADDR5 0x3540 +#define mmMMEA7_ADDRNORM_BASE_ADDR5_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR5 0x3541 +#define mmMMEA7_ADDRNORM_LIMIT_ADDR5_BASE_IDX 1 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR5 0x3542 +#define mmMMEA7_ADDRNORM_OFFSET_ADDR5_BASE_IDX 1 +#define mmMMEA7_ADDRNORMDRAM_HOLE_CNTL 0x3543 +#define mmMMEA7_ADDRNORMDRAM_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA7_ADDRNORMGMI_HOLE_CNTL 0x3544 +#define mmMMEA7_ADDRNORMGMI_HOLE_CNTL_BASE_IDX 1 +#define mmMMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG 0x3545 +#define mmMMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG 0x3546 +#define mmMMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG_BASE_IDX 1 +#define mmMMEA7_ADDRDEC_BANK_CFG 0x3547 +#define mmMMEA7_ADDRDEC_BANK_CFG_BASE_IDX 1 +#define mmMMEA7_ADDRDEC_MISC_CFG 0x3548 +#define mmMMEA7_ADDRDEC_MISC_CFG_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK0 0x3549 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK1 0x354a +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK2 0x354b +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK3 0x354c +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK4 0x354d +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK5 0x354e +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC 0x354f +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC2 0x3550 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS0 0x3551 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS1 0x3552 +#define mmMMEA7_ADDRDECDRAM_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA7_ADDRDECDRAM_HARVEST_ENABLE 0x3553 +#define mmMMEA7_ADDRDECDRAM_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK0 0x3554 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK0_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK1 0x3555 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK1_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK2 0x3556 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK2_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK3 0x3557 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK3_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK4 0x3558 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK4_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK5 0x3559 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_BANK5_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC 0x355a +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC2 0x355b +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_PC2_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS0 0x355c +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS0_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS1 0x355d +#define mmMMEA7_ADDRDECGMI_ADDR_HASH_CS1_BASE_IDX 1 +#define mmMMEA7_ADDRDECGMI_HARVEST_ENABLE 0x355e +#define mmMMEA7_ADDRDECGMI_HARVEST_ENABLE_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS0 0x355f +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS1 0x3560 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS2 0x3561 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS3 0x3562 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS0 0x3563 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS1 0x3564 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS2 0x3565 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS3 0x3566 +#define mmMMEA7_ADDRDEC0_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS01 0x3567 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS23 0x3568 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS01 0x3569 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS23 0x356a +#define mmMMEA7_ADDRDEC0_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS01 0x356b +#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS23 0x356c +#define mmMMEA7_ADDRDEC0_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS01 0x356d +#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS23 0x356e +#define mmMMEA7_ADDRDEC0_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS01 0x356f +#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS23 0x3570 +#define mmMMEA7_ADDRDEC0_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS01 0x3571 +#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS23 0x3572 +#define mmMMEA7_ADDRDEC0_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS01 0x3573 +#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS23 0x3574 +#define mmMMEA7_ADDRDEC0_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_RM_SEL_CS01 0x3575 +#define mmMMEA7_ADDRDEC0_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_RM_SEL_CS23 0x3576 +#define mmMMEA7_ADDRDEC0_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS01 0x3577 +#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS23 0x3578 +#define mmMMEA7_ADDRDEC0_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS0 0x3579 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS1 0x357a +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS2 0x357b +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS3 0x357c +#define mmMMEA7_ADDRDEC1_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS0 0x357d +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS1 0x357e +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS2 0x357f +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS3 0x3580 +#define mmMMEA7_ADDRDEC1_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS01 0x3581 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS23 0x3582 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS01 0x3583 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS23 0x3584 +#define mmMMEA7_ADDRDEC1_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS01 0x3585 +#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS23 0x3586 +#define mmMMEA7_ADDRDEC1_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS01 0x3587 +#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS23 0x3588 +#define mmMMEA7_ADDRDEC1_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS01 0x3589 +#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS23 0x358a +#define mmMMEA7_ADDRDEC1_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS01 0x358b +#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS23 0x358c +#define mmMMEA7_ADDRDEC1_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS01 0x358d +#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS23 0x358e +#define mmMMEA7_ADDRDEC1_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_RM_SEL_CS01 0x358f +#define mmMMEA7_ADDRDEC1_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_RM_SEL_CS23 0x3590 +#define mmMMEA7_ADDRDEC1_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS01 0x3591 +#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS23 0x3592 +#define mmMMEA7_ADDRDEC1_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS0 0x3593 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS0_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS1 0x3594 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS1_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS2 0x3595 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS2_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS3 0x3596 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_CS3_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS0 0x3597 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS0_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS1 0x3598 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS1_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS2 0x3599 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS2_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS3 0x359a +#define mmMMEA7_ADDRDEC2_BASE_ADDR_SECCS3_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS01 0x359b +#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS23 0x359c +#define mmMMEA7_ADDRDEC2_ADDR_MASK_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS01 0x359d +#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS23 0x359e +#define mmMMEA7_ADDRDEC2_ADDR_MASK_SECCS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS01 0x359f +#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS23 0x35a0 +#define mmMMEA7_ADDRDEC2_ADDR_CFG_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS01 0x35a1 +#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS23 0x35a2 +#define mmMMEA7_ADDRDEC2_ADDR_SEL_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS01 0x35a3 +#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS23 0x35a4 +#define mmMMEA7_ADDRDEC2_ADDR_SEL2_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS01 0x35a5 +#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS23 0x35a6 +#define mmMMEA7_ADDRDEC2_COL_SEL_LO_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS01 0x35a7 +#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS23 0x35a8 +#define mmMMEA7_ADDRDEC2_COL_SEL_HI_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_RM_SEL_CS01 0x35a9 +#define mmMMEA7_ADDRDEC2_RM_SEL_CS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_RM_SEL_CS23 0x35aa +#define mmMMEA7_ADDRDEC2_RM_SEL_CS23_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS01 0x35ab +#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS01_BASE_IDX 1 +#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS23 0x35ac +#define mmMMEA7_ADDRDEC2_RM_SEL_SECCS23_BASE_IDX 1 +#define mmMMEA7_ADDRNORMDRAM_GLOBAL_CNTL 0x35ad +#define mmMMEA7_ADDRNORMDRAM_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA7_ADDRNORMGMI_GLOBAL_CNTL 0x35ae +#define mmMMEA7_ADDRNORMGMI_GLOBAL_CNTL_BASE_IDX 1 +#define mmMMEA7_IO_RD_CLI2GRP_MAP0 0x35d5 +#define mmMMEA7_IO_RD_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA7_IO_RD_CLI2GRP_MAP1 0x35d6 +#define mmMMEA7_IO_RD_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA7_IO_WR_CLI2GRP_MAP0 0x35d7 +#define mmMMEA7_IO_WR_CLI2GRP_MAP0_BASE_IDX 1 +#define mmMMEA7_IO_WR_CLI2GRP_MAP1 0x35d8 +#define mmMMEA7_IO_WR_CLI2GRP_MAP1_BASE_IDX 1 +#define mmMMEA7_IO_RD_COMBINE_FLUSH 0x35d9 +#define mmMMEA7_IO_RD_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA7_IO_WR_COMBINE_FLUSH 0x35da +#define mmMMEA7_IO_WR_COMBINE_FLUSH_BASE_IDX 1 +#define mmMMEA7_IO_GROUP_BURST 0x35db +#define mmMMEA7_IO_GROUP_BURST_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_AGE 0x35dc +#define mmMMEA7_IO_RD_PRI_AGE_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_AGE 0x35dd +#define mmMMEA7_IO_WR_PRI_AGE_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_QUEUING 0x35de +#define mmMMEA7_IO_RD_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_QUEUING 0x35df +#define mmMMEA7_IO_WR_PRI_QUEUING_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_FIXED 0x35e0 +#define mmMMEA7_IO_RD_PRI_FIXED_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_FIXED 0x35e1 +#define mmMMEA7_IO_WR_PRI_FIXED_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_URGENCY 0x35e2 +#define mmMMEA7_IO_RD_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_URGENCY 0x35e3 +#define mmMMEA7_IO_WR_PRI_URGENCY_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_URGENCY_MASKING 0x35e4 +#define mmMMEA7_IO_RD_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_URGENCY_MASKING 0x35e5 +#define mmMMEA7_IO_WR_PRI_URGENCY_MASKING_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_QUANT_PRI1 0x35e6 +#define mmMMEA7_IO_RD_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_QUANT_PRI2 0x35e7 +#define mmMMEA7_IO_RD_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA7_IO_RD_PRI_QUANT_PRI3 0x35e8 +#define mmMMEA7_IO_RD_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_QUANT_PRI1 0x35e9 +#define mmMMEA7_IO_WR_PRI_QUANT_PRI1_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_QUANT_PRI2 0x35ea +#define mmMMEA7_IO_WR_PRI_QUANT_PRI2_BASE_IDX 1 +#define mmMMEA7_IO_WR_PRI_QUANT_PRI3 0x35eb +#define mmMMEA7_IO_WR_PRI_QUANT_PRI3_BASE_IDX 1 +#define mmMMEA7_SDP_ARB_DRAM 0x35ec +#define mmMMEA7_SDP_ARB_DRAM_BASE_IDX 1 +#define mmMMEA7_SDP_ARB_GMI 0x35ed +#define mmMMEA7_SDP_ARB_GMI_BASE_IDX 1 +#define mmMMEA7_SDP_ARB_FINAL 0x35ee +#define mmMMEA7_SDP_ARB_FINAL_BASE_IDX 1 +#define mmMMEA7_SDP_DRAM_PRIORITY 0x35ef +#define mmMMEA7_SDP_DRAM_PRIORITY_BASE_IDX 1 +#define mmMMEA7_SDP_GMI_PRIORITY 0x35f0 +#define mmMMEA7_SDP_GMI_PRIORITY_BASE_IDX 1 +#define mmMMEA7_SDP_IO_PRIORITY 0x35f1 +#define mmMMEA7_SDP_IO_PRIORITY_BASE_IDX 1 +#define mmMMEA7_SDP_CREDITS 0x35f2 +#define mmMMEA7_SDP_CREDITS_BASE_IDX 1 +#define mmMMEA7_SDP_TAG_RESERVE0 0x35f3 +#define mmMMEA7_SDP_TAG_RESERVE0_BASE_IDX 1 +#define mmMMEA7_SDP_TAG_RESERVE1 0x35f4 +#define mmMMEA7_SDP_TAG_RESERVE1_BASE_IDX 1 +#define mmMMEA7_SDP_VCC_RESERVE0 0x35f5 +#define mmMMEA7_SDP_VCC_RESERVE0_BASE_IDX 1 +#define mmMMEA7_SDP_VCC_RESERVE1 0x35f6 +#define mmMMEA7_SDP_VCC_RESERVE1_BASE_IDX 1 +#define mmMMEA7_SDP_VCD_RESERVE0 0x35f7 +#define mmMMEA7_SDP_VCD_RESERVE0_BASE_IDX 1 +#define mmMMEA7_SDP_VCD_RESERVE1 0x35f8 +#define mmMMEA7_SDP_VCD_RESERVE1_BASE_IDX 1 +#define mmMMEA7_SDP_REQ_CNTL 0x35f9 +#define mmMMEA7_SDP_REQ_CNTL_BASE_IDX 1 +#define mmMMEA7_MISC 0x35fa +#define mmMMEA7_MISC_BASE_IDX 1 +#define mmMMEA7_LATENCY_SAMPLING 0x35fb +#define mmMMEA7_LATENCY_SAMPLING_BASE_IDX 1 +#define mmMMEA7_PERFCOUNTER_LO 0x35fc +#define mmMMEA7_PERFCOUNTER_LO_BASE_IDX 1 +#define mmMMEA7_PERFCOUNTER_HI 0x35fd +#define mmMMEA7_PERFCOUNTER_HI_BASE_IDX 1 +#define mmMMEA7_PERFCOUNTER0_CFG 0x35fe +#define mmMMEA7_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmMMEA7_PERFCOUNTER1_CFG 0x35ff +#define mmMMEA7_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmMMEA7_PERFCOUNTER_RSLT_CNTL 0x3600 +#define mmMMEA7_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 +#define mmMMEA7_EDC_CNT 0x3606 +#define mmMMEA7_EDC_CNT_BASE_IDX 1 +#define mmMMEA7_EDC_CNT2 0x3607 +#define mmMMEA7_EDC_CNT2_BASE_IDX 1 +#define mmMMEA7_DSM_CNTL 0x3608 +#define mmMMEA7_DSM_CNTL_BASE_IDX 1 +#define mmMMEA7_DSM_CNTLA 0x3609 +#define mmMMEA7_DSM_CNTLA_BASE_IDX 1 +#define mmMMEA7_DSM_CNTLB 0x360a +#define mmMMEA7_DSM_CNTLB_BASE_IDX 1 +#define mmMMEA7_DSM_CNTL2 0x360b +#define mmMMEA7_DSM_CNTL2_BASE_IDX 1 +#define mmMMEA7_DSM_CNTL2A 0x360c +#define mmMMEA7_DSM_CNTL2A_BASE_IDX 1 +#define mmMMEA7_DSM_CNTL2B 0x360d +#define mmMMEA7_DSM_CNTL2B_BASE_IDX 1 +#define mmMMEA7_CGTT_CLK_CTRL 0x360f +#define mmMMEA7_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmMMEA7_EDC_MODE 0x3610 +#define mmMMEA7_EDC_MODE_BASE_IDX 1 +#define mmMMEA7_ERR_STATUS 0x3611 +#define mmMMEA7_ERR_STATUS_BASE_IDX 1 +#define mmMMEA7_MISC2 0x3612 +#define mmMMEA7_MISC2_BASE_IDX 1 +#define mmMMEA7_ADDRDEC_SELECT 0x3613 +#define mmMMEA7_ADDRDEC_SELECT_BASE_IDX 1 +#define mmMMEA7_EDC_CNT3 0x3614 +#define mmMMEA7_EDC_CNT3_BASE_IDX 1 + + +// addressBlock: mmhub_pctldec1 +// base address: 0x76300 +#define mmPCTL1_CTRL 0x38c0 +#define mmPCTL1_CTRL_BASE_IDX 1 +#define mmPCTL1_MMHUB_DEEPSLEEP_IB 0x38c1 +#define mmPCTL1_MMHUB_DEEPSLEEP_IB_BASE_IDX 1 +#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE 0x38c2 +#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_BASE_IDX 1 +#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB 0x38c3 +#define mmPCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB_BASE_IDX 1 +#define mmPCTL1_PG_IGNORE_DEEPSLEEP 0x38c4 +#define mmPCTL1_PG_IGNORE_DEEPSLEEP_BASE_IDX 1 +#define mmPCTL1_PG_IGNORE_DEEPSLEEP_IB 0x38c5 +#define mmPCTL1_PG_IGNORE_DEEPSLEEP_IB_BASE_IDX 1 +#define mmPCTL1_SLICE0_CFG_DAGB_BUSY 0x38c6 +#define mmPCTL1_SLICE0_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL1_SLICE0_CFG_DS_ALLOW 0x38c7 +#define mmPCTL1_SLICE0_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL1_SLICE0_CFG_DS_ALLOW_IB 0x38c8 +#define mmPCTL1_SLICE0_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL1_SLICE1_CFG_DAGB_BUSY 0x38c9 +#define mmPCTL1_SLICE1_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL1_SLICE1_CFG_DS_ALLOW 0x38ca +#define mmPCTL1_SLICE1_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL1_SLICE1_CFG_DS_ALLOW_IB 0x38cb +#define mmPCTL1_SLICE1_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL1_SLICE2_CFG_DAGB_BUSY 0x38cc +#define mmPCTL1_SLICE2_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL1_SLICE2_CFG_DS_ALLOW 0x38cd +#define mmPCTL1_SLICE2_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL1_SLICE2_CFG_DS_ALLOW_IB 0x38ce +#define mmPCTL1_SLICE2_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL1_SLICE3_CFG_DAGB_BUSY 0x38cf +#define mmPCTL1_SLICE3_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL1_SLICE3_CFG_DS_ALLOW 0x38d0 +#define mmPCTL1_SLICE3_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL1_SLICE3_CFG_DS_ALLOW_IB 0x38d1 +#define mmPCTL1_SLICE3_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL1_SLICE4_CFG_DAGB_BUSY 0x38d2 +#define mmPCTL1_SLICE4_CFG_DAGB_BUSY_BASE_IDX 1 +#define mmPCTL1_SLICE4_CFG_DS_ALLOW 0x38d3 +#define mmPCTL1_SLICE4_CFG_DS_ALLOW_BASE_IDX 1 +#define mmPCTL1_SLICE4_CFG_DS_ALLOW_IB 0x38d4 +#define mmPCTL1_SLICE4_CFG_DS_ALLOW_IB_BASE_IDX 1 +#define mmPCTL1_UTCL2_MISC 0x38d5 +#define mmPCTL1_UTCL2_MISC_BASE_IDX 1 +#define mmPCTL1_SLICE0_MISC 0x38d6 +#define mmPCTL1_SLICE0_MISC_BASE_IDX 1 +#define mmPCTL1_SLICE1_MISC 0x38d7 +#define mmPCTL1_SLICE1_MISC_BASE_IDX 1 +#define mmPCTL1_SLICE2_MISC 0x38d8 +#define mmPCTL1_SLICE2_MISC_BASE_IDX 1 +#define mmPCTL1_SLICE3_MISC 0x38d9 +#define mmPCTL1_SLICE3_MISC_BASE_IDX 1 +#define mmPCTL1_SLICE4_MISC 0x38da +#define mmPCTL1_SLICE4_MISC_BASE_IDX 1 +#define mmPCTL1_UTCL2_RENG_EXECUTE 0x38db +#define mmPCTL1_UTCL2_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL1_SLICE0_RENG_EXECUTE 0x38dc +#define mmPCTL1_SLICE0_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL1_SLICE1_RENG_EXECUTE 0x38dd +#define mmPCTL1_SLICE1_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL1_SLICE2_RENG_EXECUTE 0x38de +#define mmPCTL1_SLICE2_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL1_SLICE3_RENG_EXECUTE 0x38df +#define mmPCTL1_SLICE3_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL1_SLICE4_RENG_EXECUTE 0x38e0 +#define mmPCTL1_SLICE4_RENG_EXECUTE_BASE_IDX 1 +#define mmPCTL1_UTCL2_RENG_RAM_INDEX 0x38e1 +#define mmPCTL1_UTCL2_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL1_UTCL2_RENG_RAM_DATA 0x38e2 +#define mmPCTL1_UTCL2_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL1_SLICE0_RENG_RAM_INDEX 0x38e3 +#define mmPCTL1_SLICE0_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL1_SLICE0_RENG_RAM_DATA 0x38e4 +#define mmPCTL1_SLICE0_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL1_SLICE1_RENG_RAM_INDEX 0x38e5 +#define mmPCTL1_SLICE1_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL1_SLICE1_RENG_RAM_DATA 0x38e6 +#define mmPCTL1_SLICE1_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL1_SLICE2_RENG_RAM_INDEX 0x38e7 +#define mmPCTL1_SLICE2_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL1_SLICE2_RENG_RAM_DATA 0x38e8 +#define mmPCTL1_SLICE2_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL1_SLICE3_RENG_RAM_INDEX 0x38e9 +#define mmPCTL1_SLICE3_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL1_SLICE3_RENG_RAM_DATA 0x38ea +#define mmPCTL1_SLICE3_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL1_SLICE4_RENG_RAM_INDEX 0x38eb +#define mmPCTL1_SLICE4_RENG_RAM_INDEX_BASE_IDX 1 +#define mmPCTL1_SLICE4_RENG_RAM_DATA 0x38ec +#define mmPCTL1_SLICE4_RENG_RAM_DATA_BASE_IDX 1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0 0x38ed +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1 0x38ee +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2 0x38ef +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3 0x38f0 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4 0x38f1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x38f2 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x38f3 +#define mmPCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0 0x38f4 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1 0x38f5 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2 0x38f6 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3 0x38f7 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4 0x38f8 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0 0x38f9 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1 0x38fa +#define mmPCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0 0x38fb +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1 0x38fc +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2 0x38fd +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3 0x38fe +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4 0x38ff +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0 0x3900 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1 0x3901 +#define mmPCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0 0x3902 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1 0x3903 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2 0x3904 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3 0x3905 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4 0x3906 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0 0x3907 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1 0x3908 +#define mmPCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0 0x3909 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1 0x390a +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2 0x390b +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3 0x390c +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4 0x390d +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0 0x390e +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1 0x390f +#define mmPCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0 0x3910 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0_BASE_IDX 1 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1 0x3911 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1_BASE_IDX 1 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2 0x3912 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2_BASE_IDX 1 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3 0x3913 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3_BASE_IDX 1 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4 0x3914 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4_BASE_IDX 1 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0 0x3915 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0_BASE_IDX 1 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1 0x3916 +#define mmPCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1_BASE_IDX 1 + + +// addressBlock: mmhub_l1tlb_vml1dec:1 +// base address: 0x76500 +#define mmVML1_1_MC_VM_MX_L1_TLB0_STATUS 0x3948 +#define mmVML1_1_MC_VM_MX_L1_TLB0_STATUS_BASE_IDX 1 +#define mmVML1_1_MC_VM_MX_L1_TLB1_STATUS 0x3949 +#define mmVML1_1_MC_VM_MX_L1_TLB1_STATUS_BASE_IDX 1 +#define mmVML1_1_MC_VM_MX_L1_TLB2_STATUS 0x394a +#define mmVML1_1_MC_VM_MX_L1_TLB2_STATUS_BASE_IDX 1 +#define mmVML1_1_MC_VM_MX_L1_TLB3_STATUS 0x394b +#define mmVML1_1_MC_VM_MX_L1_TLB3_STATUS_BASE_IDX 1 +#define mmVML1_1_MC_VM_MX_L1_TLB4_STATUS 0x394c +#define mmVML1_1_MC_VM_MX_L1_TLB4_STATUS_BASE_IDX 1 +#define mmVML1_1_MC_VM_MX_L1_TLB5_STATUS 0x394d +#define mmVML1_1_MC_VM_MX_L1_TLB5_STATUS_BASE_IDX 1 +#define mmVML1_1_MC_VM_MX_L1_TLB6_STATUS 0x394e +#define mmVML1_1_MC_VM_MX_L1_TLB6_STATUS_BASE_IDX 1 +#define mmVML1_1_MC_VM_MX_L1_TLB7_STATUS 0x394f +#define mmVML1_1_MC_VM_MX_L1_TLB7_STATUS_BASE_IDX 1 + + +// addressBlock: mmhub_l1tlb_vml1pldec:1 +// base address: 0x76580 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG 0x3960 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG 0x3961 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG 0x3962 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG 0x3963 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG_BASE_IDX 1 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL 0x3964 +#define mmVML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_l1tlb_vml1prdec:1 +// base address: 0x765c0 +#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO 0x3970 +#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO_BASE_IDX 1 +#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI 0x3971 +#define mmVML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_atcl2dec:1 +// base address: 0x76600 +#define mmATCL2_1_ATC_L2_CNTL 0x3980 +#define mmATCL2_1_ATC_L2_CNTL_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CNTL2 0x3981 +#define mmATCL2_1_ATC_L2_CNTL2_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CACHE_DATA0 0x3984 +#define mmATCL2_1_ATC_L2_CACHE_DATA0_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CACHE_DATA1 0x3985 +#define mmATCL2_1_ATC_L2_CACHE_DATA1_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CACHE_DATA2 0x3986 +#define mmATCL2_1_ATC_L2_CACHE_DATA2_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CNTL3 0x3987 +#define mmATCL2_1_ATC_L2_CNTL3_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_STATUS 0x3988 +#define mmATCL2_1_ATC_L2_STATUS_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_STATUS2 0x3989 +#define mmATCL2_1_ATC_L2_STATUS2_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_STATUS3 0x398a +#define mmATCL2_1_ATC_L2_STATUS3_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_MISC_CG 0x398b +#define mmATCL2_1_ATC_L2_MISC_CG_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_MEM_POWER_LS 0x398c +#define mmATCL2_1_ATC_L2_MEM_POWER_LS_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CGTT_CLK_CTRL 0x398d +#define mmATCL2_1_ATC_L2_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX 0x398e +#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX 0x398f +#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL 0x3990 +#define mmATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL 0x3991 +#define mmATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_CNTL4 0x3992 +#define mmATCL2_1_ATC_L2_CNTL4_BASE_IDX 1 +#define mmATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES 0x3993 +#define mmATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2pfdec:1 +// base address: 0x76700 +#define mmVML2PF1_VM_L2_CNTL 0x39c0 +#define mmVML2PF1_VM_L2_CNTL_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CNTL2 0x39c1 +#define mmVML2PF1_VM_L2_CNTL2_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CNTL3 0x39c2 +#define mmVML2PF1_VM_L2_CNTL3_BASE_IDX 1 +#define mmVML2PF1_VM_L2_STATUS 0x39c3 +#define mmVML2PF1_VM_L2_STATUS_BASE_IDX 1 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_CNTL 0x39c4 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_CNTL_BASE_IDX 1 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32 0x39c5 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32 0x39c6 +#define mmVML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL 0x39c7 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL2 0x39c8 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_CNTL2_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3 0x39c9 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4 0x39ca +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_STATUS 0x39cb +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_STATUS_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32 0x39cc +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32 0x39cd +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 0x39ce +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 0x39cf +#define mmVML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 0x39d1 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 0x39d2 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 0x39d3 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 0x39d4 +#define mmVML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 0x39d5 +#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 0x39d6 +#define mmVML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CNTL4 0x39d7 +#define mmVML2PF1_VM_L2_CNTL4_BASE_IDX 1 +#define mmVML2PF1_VM_L2_MM_GROUP_RT_CLASSES 0x39d8 +#define mmVML2PF1_VM_L2_MM_GROUP_RT_CLASSES_BASE_IDX 1 +#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID 0x39d9 +#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID_BASE_IDX 1 +#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2 0x39da +#define mmVML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CACHE_PARITY_CNTL 0x39db +#define mmVML2PF1_VM_L2_CACHE_PARITY_CNTL_BASE_IDX 1 +#define mmVML2PF1_VM_L2_CGTT_CLK_CTRL 0x39de +#define mmVML2PF1_VM_L2_CGTT_CLK_CTRL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2vcdec:1 +// base address: 0x76800 +#define mmVML2VC1_VM_CONTEXT0_CNTL 0x3a00 +#define mmVML2VC1_VM_CONTEXT0_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT1_CNTL 0x3a01 +#define mmVML2VC1_VM_CONTEXT1_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT2_CNTL 0x3a02 +#define mmVML2VC1_VM_CONTEXT2_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT3_CNTL 0x3a03 +#define mmVML2VC1_VM_CONTEXT3_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT4_CNTL 0x3a04 +#define mmVML2VC1_VM_CONTEXT4_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT5_CNTL 0x3a05 +#define mmVML2VC1_VM_CONTEXT5_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT6_CNTL 0x3a06 +#define mmVML2VC1_VM_CONTEXT6_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT7_CNTL 0x3a07 +#define mmVML2VC1_VM_CONTEXT7_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT8_CNTL 0x3a08 +#define mmVML2VC1_VM_CONTEXT8_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT9_CNTL 0x3a09 +#define mmVML2VC1_VM_CONTEXT9_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT10_CNTL 0x3a0a +#define mmVML2VC1_VM_CONTEXT10_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT11_CNTL 0x3a0b +#define mmVML2VC1_VM_CONTEXT11_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT12_CNTL 0x3a0c +#define mmVML2VC1_VM_CONTEXT12_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT13_CNTL 0x3a0d +#define mmVML2VC1_VM_CONTEXT13_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT14_CNTL 0x3a0e +#define mmVML2VC1_VM_CONTEXT14_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT15_CNTL 0x3a0f +#define mmVML2VC1_VM_CONTEXT15_CNTL_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXTS_DISABLE 0x3a10 +#define mmVML2VC1_VM_CONTEXTS_DISABLE_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG0_SEM 0x3a11 +#define mmVML2VC1_VM_INVALIDATE_ENG0_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG1_SEM 0x3a12 +#define mmVML2VC1_VM_INVALIDATE_ENG1_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG2_SEM 0x3a13 +#define mmVML2VC1_VM_INVALIDATE_ENG2_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG3_SEM 0x3a14 +#define mmVML2VC1_VM_INVALIDATE_ENG3_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG4_SEM 0x3a15 +#define mmVML2VC1_VM_INVALIDATE_ENG4_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG5_SEM 0x3a16 +#define mmVML2VC1_VM_INVALIDATE_ENG5_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG6_SEM 0x3a17 +#define mmVML2VC1_VM_INVALIDATE_ENG6_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG7_SEM 0x3a18 +#define mmVML2VC1_VM_INVALIDATE_ENG7_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG8_SEM 0x3a19 +#define mmVML2VC1_VM_INVALIDATE_ENG8_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG9_SEM 0x3a1a +#define mmVML2VC1_VM_INVALIDATE_ENG9_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG10_SEM 0x3a1b +#define mmVML2VC1_VM_INVALIDATE_ENG10_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG11_SEM 0x3a1c +#define mmVML2VC1_VM_INVALIDATE_ENG11_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG12_SEM 0x3a1d +#define mmVML2VC1_VM_INVALIDATE_ENG12_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG13_SEM 0x3a1e +#define mmVML2VC1_VM_INVALIDATE_ENG13_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG14_SEM 0x3a1f +#define mmVML2VC1_VM_INVALIDATE_ENG14_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG15_SEM 0x3a20 +#define mmVML2VC1_VM_INVALIDATE_ENG15_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG16_SEM 0x3a21 +#define mmVML2VC1_VM_INVALIDATE_ENG16_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG17_SEM 0x3a22 +#define mmVML2VC1_VM_INVALIDATE_ENG17_SEM_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG0_REQ 0x3a23 +#define mmVML2VC1_VM_INVALIDATE_ENG0_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG1_REQ 0x3a24 +#define mmVML2VC1_VM_INVALIDATE_ENG1_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG2_REQ 0x3a25 +#define mmVML2VC1_VM_INVALIDATE_ENG2_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG3_REQ 0x3a26 +#define mmVML2VC1_VM_INVALIDATE_ENG3_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG4_REQ 0x3a27 +#define mmVML2VC1_VM_INVALIDATE_ENG4_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG5_REQ 0x3a28 +#define mmVML2VC1_VM_INVALIDATE_ENG5_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG6_REQ 0x3a29 +#define mmVML2VC1_VM_INVALIDATE_ENG6_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG7_REQ 0x3a2a +#define mmVML2VC1_VM_INVALIDATE_ENG7_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG8_REQ 0x3a2b +#define mmVML2VC1_VM_INVALIDATE_ENG8_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG9_REQ 0x3a2c +#define mmVML2VC1_VM_INVALIDATE_ENG9_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG10_REQ 0x3a2d +#define mmVML2VC1_VM_INVALIDATE_ENG10_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG11_REQ 0x3a2e +#define mmVML2VC1_VM_INVALIDATE_ENG11_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG12_REQ 0x3a2f +#define mmVML2VC1_VM_INVALIDATE_ENG12_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG13_REQ 0x3a30 +#define mmVML2VC1_VM_INVALIDATE_ENG13_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG14_REQ 0x3a31 +#define mmVML2VC1_VM_INVALIDATE_ENG14_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG15_REQ 0x3a32 +#define mmVML2VC1_VM_INVALIDATE_ENG15_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG16_REQ 0x3a33 +#define mmVML2VC1_VM_INVALIDATE_ENG16_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG17_REQ 0x3a34 +#define mmVML2VC1_VM_INVALIDATE_ENG17_REQ_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ACK 0x3a35 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ACK 0x3a36 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG2_ACK 0x3a37 +#define mmVML2VC1_VM_INVALIDATE_ENG2_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG3_ACK 0x3a38 +#define mmVML2VC1_VM_INVALIDATE_ENG3_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ACK 0x3a39 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ACK 0x3a3a +#define mmVML2VC1_VM_INVALIDATE_ENG5_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ACK 0x3a3b +#define mmVML2VC1_VM_INVALIDATE_ENG6_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ACK 0x3a3c +#define mmVML2VC1_VM_INVALIDATE_ENG7_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ACK 0x3a3d +#define mmVML2VC1_VM_INVALIDATE_ENG8_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG9_ACK 0x3a3e +#define mmVML2VC1_VM_INVALIDATE_ENG9_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG10_ACK 0x3a3f +#define mmVML2VC1_VM_INVALIDATE_ENG10_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG11_ACK 0x3a40 +#define mmVML2VC1_VM_INVALIDATE_ENG11_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ACK 0x3a41 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ACK 0x3a42 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ACK 0x3a43 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ACK 0x3a44 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ACK 0x3a45 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ACK 0x3a46 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ACK_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32 0x3a47 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32 0x3a48 +#define mmVML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 0x3a49 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32 0x3a4a +#define mmVML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32 0x3a4b +#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32 0x3a4c +#define mmVML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32 0x3a4d +#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32 0x3a4e +#define mmVML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32 0x3a4f +#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32 0x3a50 +#define mmVML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32 0x3a51 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32 0x3a52 +#define mmVML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32 0x3a53 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32 0x3a54 +#define mmVML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32 0x3a55 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32 0x3a56 +#define mmVML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32 0x3a57 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32 0x3a58 +#define mmVML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32 0x3a59 +#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32 0x3a5a +#define mmVML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32 0x3a5b +#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32 0x3a5c +#define mmVML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32 0x3a5d +#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32 0x3a5e +#define mmVML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32 0x3a5f +#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32 0x3a60 +#define mmVML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32 0x3a61 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32 0x3a62 +#define mmVML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32 0x3a63 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32 0x3a64 +#define mmVML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32 0x3a65 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32 0x3a66 +#define mmVML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x3a67 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x3a68 +#define mmVML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32 0x3a69 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32 0x3a6a +#define mmVML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x3a6b +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x3a6c +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 0x3a6d +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 0x3a6e +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 0x3a6f +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 0x3a70 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 0x3a71 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 0x3a72 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 0x3a73 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 0x3a74 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 0x3a75 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 0x3a76 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 0x3a77 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 0x3a78 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 0x3a79 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 0x3a7a +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 0x3a7b +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 0x3a7c +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 0x3a7d +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 0x3a7e +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 0x3a7f +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 0x3a80 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 0x3a81 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 0x3a82 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 0x3a83 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 0x3a84 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 0x3a85 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 0x3a86 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 0x3a87 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 0x3a88 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 0x3a89 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 0x3a8a +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x3a8b +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x3a8c +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 0x3a8d +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 0x3a8e +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 0x3a8f +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 0x3a90 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 0x3a91 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 0x3a92 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 0x3a93 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 0x3a94 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 0x3a95 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 0x3a96 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 0x3a97 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 0x3a98 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 0x3a99 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 0x3a9a +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 0x3a9b +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 0x3a9c +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 0x3a9d +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 0x3a9e +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 0x3a9f +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 0x3aa0 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 0x3aa1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 0x3aa2 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 0x3aa3 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 0x3aa4 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 0x3aa5 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 0x3aa6 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 0x3aa7 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 0x3aa8 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 0x3aa9 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 0x3aaa +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x3aab +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x3aac +#define mmVML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 0x3aad +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 0x3aae +#define mmVML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 0x3aaf +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 0x3ab0 +#define mmVML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 0x3ab1 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 0x3ab2 +#define mmVML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 0x3ab3 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 0x3ab4 +#define mmVML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 0x3ab5 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 0x3ab6 +#define mmVML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 0x3ab7 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 0x3ab8 +#define mmVML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 0x3ab9 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 0x3aba +#define mmVML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 0x3abb +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 0x3abc +#define mmVML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 0x3abd +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 0x3abe +#define mmVML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 0x3abf +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 0x3ac0 +#define mmVML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 0x3ac1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 0x3ac2 +#define mmVML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 0x3ac3 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 0x3ac4 +#define mmVML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 0x3ac5 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 0x3ac6 +#define mmVML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 0x3ac7 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 0x3ac8 +#define mmVML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 0x3ac9 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 1 +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 0x3aca +#define mmVML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vmsharedpfdec:1 +// base address: 0x76b90 +#define mmVMSHAREDPF1_MC_VM_NB_MMIOBASE 0x3ae4 +#define mmVMSHAREDPF1_MC_VM_NB_MMIOBASE_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_NB_MMIOLIMIT 0x3ae5 +#define mmVMSHAREDPF1_MC_VM_NB_MMIOLIMIT_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_NB_PCI_CTRL 0x3ae6 +#define mmVMSHAREDPF1_MC_VM_NB_PCI_CTRL_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_NB_PCI_ARB 0x3ae7 +#define mmVMSHAREDPF1_MC_VM_NB_PCI_ARB_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1 0x3ae8 +#define mmVMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2 0x3ae9 +#define mmVMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2 0x3aea +#define mmVMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_FB_OFFSET 0x3aeb +#define mmVMSHAREDPF1_MC_VM_FB_OFFSET_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB 0x3aec +#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB 0x3aed +#define mmVMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_STEERING 0x3aee +#define mmVMSHAREDPF1_MC_VM_STEERING_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ 0x3aef +#define mmVMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_MEM_POWER_LS 0x3af0 +#define mmVMSHAREDPF1_MC_MEM_POWER_LS_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START 0x3af1 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END 0x3af2 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_APT_CNTL 0x3af3 +#define mmVMSHAREDPF1_MC_VM_APT_CNTL_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START 0x3af4 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END 0x3af5 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL 0x3af6 +#define mmVMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_CNTL 0x3af7 +#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_CNTL_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_SIZE 0x3af8 +#define mmVMSHAREDPF1_MC_VM_XGMI_LFB_SIZE_BASE_IDX 1 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL 0x3af9 +#define mmVMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vmsharedvcdec:1 +// base address: 0x76c00 +#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_BASE 0x3b00 +#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_BASE_BASE_IDX 1 +#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_TOP 0x3b01 +#define mmVMSHAREDVC1_MC_VM_FB_LOCATION_TOP_BASE_IDX 1 +#define mmVMSHAREDVC1_MC_VM_AGP_TOP 0x3b02 +#define mmVMSHAREDVC1_MC_VM_AGP_TOP_BASE_IDX 1 +#define mmVMSHAREDVC1_MC_VM_AGP_BOT 0x3b03 +#define mmVMSHAREDVC1_MC_VM_AGP_BOT_BASE_IDX 1 +#define mmVMSHAREDVC1_MC_VM_AGP_BASE 0x3b04 +#define mmVMSHAREDVC1_MC_VM_AGP_BASE_BASE_IDX 1 +#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x3b05 +#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR_BASE_IDX 1 +#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x3b06 +#define mmVMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR_BASE_IDX 1 +#define mmVMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL 0x3b07 +#define mmVMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vmsharedhvdec:1 +// base address: 0x76c80 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0 0x3b20 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1 0x3b21 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2 0x3b22 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3 0x3b23 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4 0x3b24 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5 0x3b25 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6 0x3b26 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7 0x3b27 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8 0x3b28 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9 0x3b29 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10 0x3b2a +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11 0x3b2b +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12 0x3b2c +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13 0x3b2d +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14 0x3b2e +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15 0x3b2f +#define mmVMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1 0x3b30 +#define mmVMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_0 0x3b31 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_0_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_1 0x3b32 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_2 0x3b33 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_2_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_3 0x3b34 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_LO_3_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_0 0x3b35 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_0_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_1 0x3b36 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_2 0x3b37 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_2_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_3 0x3b38 +#define mmVMSHAREDHV1_MC_VM_MARC_BASE_HI_3_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_0 0x3b39 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_0_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_1 0x3b3a +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_2 0x3b3b +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_2_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_3 0x3b3c +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_LO_3_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_0 0x3b3d +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_0_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_1 0x3b3e +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_2 0x3b3f +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_2_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_3 0x3b40 +#define mmVMSHAREDHV1_MC_VM_MARC_RELOC_HI_3_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_0 0x3b41 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_0_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_1 0x3b42 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_2 0x3b43 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_2_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_3 0x3b44 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_LO_3_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_0 0x3b45 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_0_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_1 0x3b46 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_1_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_2 0x3b47 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_2_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_3 0x3b48 +#define mmVMSHAREDHV1_MC_VM_MARC_LEN_HI_3_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER 0x3b49 +#define mmVMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER 0x3b4a +#define mmVMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL 0x3b4b +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0 0x3b4c +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1 0x3b4d +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2 0x3b4e +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3 0x3b4f +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4 0x3b50 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5 0x3b51 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6 0x3b52 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7 0x3b53 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8 0x3b54 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9 0x3b55 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10 0x3b56 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11 0x3b57 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12 0x3b58 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13 0x3b59 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14 0x3b5a +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14_BASE_IDX 1 +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15 0x3b5b +#define mmVMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15_BASE_IDX 1 +#define mmVMSHAREDHV1_UTCL2_CGTT_CLK_CTRL 0x3b5c +#define mmVMSHAREDHV1_UTCL2_CGTT_CLK_CTRL_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID 0x3b5d +#define mmVMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmVMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE 0x3b5e +#define mmVMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_atcl2pfcntrdec:1 +// base address: 0x76dc0 +#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO 0x3b70 +#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO_BASE_IDX 1 +#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI 0x3b71 +#define mmATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_atcl2pfcntldec:1 +// base address: 0x76dd0 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG 0x3b74 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG 0x3b75 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL 0x3b76 +#define mmATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2pldec:1 +// base address: 0x76e00 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER0_CFG 0x3b80 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER0_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER1_CFG 0x3b81 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER1_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER2_CFG 0x3b82 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER2_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER3_CFG 0x3b83 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER3_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER4_CFG 0x3b84 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER4_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER5_CFG 0x3b85 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER5_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER6_CFG 0x3b86 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER6_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER7_CFG 0x3b87 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER7_CFG_BASE_IDX 1 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL 0x3b88 +#define mmVML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1 + + +// addressBlock: mmhub_utcl2_vml2prdec:1 +// base address: 0x76e40 +#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_LO 0x3b90 +#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_LO_BASE_IDX 1 +#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_HI 0x3b91 +#define mmVML2PR1_MC_VM_L2_PERFCOUNTER_HI_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h new file mode 100644 index 000000000000..40dfbf16bd34 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h @@ -0,0 +1,44884 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _mmhub_9_4_1_SH_MASK_HEADER +#define _mmhub_9_4_1_SH_MASK_HEADER + + +// addressBlock: mmhub_dagb_dagbdec0 +//DAGB0_RDCLI0 +#define DAGB0_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI1 +#define DAGB0_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI2 +#define DAGB0_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI3 +#define DAGB0_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI4 +#define DAGB0_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI5 +#define DAGB0_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI6 +#define DAGB0_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI7 +#define DAGB0_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI8 +#define DAGB0_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI9 +#define DAGB0_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI10 +#define DAGB0_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI11 +#define DAGB0_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI12 +#define DAGB0_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI13 +#define DAGB0_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI14 +#define DAGB0_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB0_RDCLI15 +#define DAGB0_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB0_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB0_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB0_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB0_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB0_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB0_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB0_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB0_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB0_RD_CNTL +#define DAGB0_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB0_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB0_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB0_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB0_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB0_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB0_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB0_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB0_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB0_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB0_RD_GMI_CNTL +#define DAGB0_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB0_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB0_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB0_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB0_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB0_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB0_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB0_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB0_RD_ADDR_DAGB +#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB0_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB0_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB0_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB0_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB0_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB0_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB0_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB0_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB0_RD_CGTT_CLK_CTRL +#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB0_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB0_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB0_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB0_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB0_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB0_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB0_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB0_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB0_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB0_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB0_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB0_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB0_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB0_RD_VC0_CNTL +#define DAGB0_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_VC1_CNTL +#define DAGB0_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_VC2_CNTL +#define DAGB0_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_VC3_CNTL +#define DAGB0_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_VC4_CNTL +#define DAGB0_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_VC5_CNTL +#define DAGB0_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_VC6_CNTL +#define DAGB0_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_VC7_CNTL +#define DAGB0_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_RD_CNTL_MISC +#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB0_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB0_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB0_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB0_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB0_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB0_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB0_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB0_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB0_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB0_RD_TLB_CREDIT +#define DAGB0_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB0_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB0_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB0_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB0_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB0_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB0_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB0_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB0_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB0_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB0_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB0_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB0_RDCLI_ASK_PENDING +#define DAGB0_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_RDCLI_GO_PENDING +#define DAGB0_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_RDCLI_GBLSEND_PENDING +#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_RDCLI_TLB_PENDING +#define DAGB0_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_RDCLI_OARB_PENDING +#define DAGB0_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_RDCLI_OSD_PENDING +#define DAGB0_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI0 +#define DAGB0_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI1 +#define DAGB0_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI2 +#define DAGB0_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI3 +#define DAGB0_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI4 +#define DAGB0_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI5 +#define DAGB0_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI6 +#define DAGB0_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI7 +#define DAGB0_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI8 +#define DAGB0_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI9 +#define DAGB0_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI10 +#define DAGB0_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI11 +#define DAGB0_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI12 +#define DAGB0_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI13 +#define DAGB0_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI14 +#define DAGB0_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB0_WRCLI15 +#define DAGB0_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB0_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB0_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB0_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB0_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB0_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB0_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB0_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB0_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB0_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB0_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB0_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB0_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB0_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB0_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB0_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB0_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB0_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB0_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB0_WR_CNTL +#define DAGB0_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB0_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB0_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB0_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB0_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB0_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB0_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB0_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB0_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB0_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB0_WR_GMI_CNTL +#define DAGB0_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB0_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB0_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB0_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB0_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB0_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB0_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB0_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB0_WR_ADDR_DAGB +#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB0_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB0_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB0_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB0_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB0_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB0_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB0_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB0_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB0_WR_CGTT_CLK_CTRL +#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB0_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB0_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB0_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB0_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB0_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB0_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB0_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB0_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB0_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB0_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB0_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB0_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB0_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB0_WR_DATA_DAGB +#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB0_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB0_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB0_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB0_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB0_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB0_WR_DATA_DAGB_MAX_BURST0 +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB0_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB0_WR_DATA_DAGB_MAX_BURST1 +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB0_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB0_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB0_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB0_WR_VC0_CNTL +#define DAGB0_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_VC1_CNTL +#define DAGB0_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_VC2_CNTL +#define DAGB0_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_VC3_CNTL +#define DAGB0_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_VC4_CNTL +#define DAGB0_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_VC5_CNTL +#define DAGB0_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_VC6_CNTL +#define DAGB0_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_VC7_CNTL +#define DAGB0_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB0_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB0_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB0_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB0_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB0_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB0_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB0_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB0_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB0_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB0_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB0_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB0_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB0_WR_CNTL_MISC +#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB0_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB0_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB0_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB0_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB0_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB0_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB0_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB0_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB0_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB0_WR_TLB_CREDIT +#define DAGB0_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB0_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB0_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB0_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB0_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB0_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB0_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB0_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB0_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB0_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB0_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB0_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB0_WR_DATA_CREDIT +#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB0_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB0_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB0_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB0_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB0_WR_MISC_CREDIT +#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB0_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB0_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB0_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB0_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB0_WRCLI_ASK_PENDING +#define DAGB0_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_GO_PENDING +#define DAGB0_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_GBLSEND_PENDING +#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_TLB_PENDING +#define DAGB0_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_OARB_PENDING +#define DAGB0_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_OSD_PENDING +#define DAGB0_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_DBUS_ASK_PENDING +#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_WRCLI_DBUS_GO_PENDING +#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB0_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB0_DAGB_DLY +#define DAGB0_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB0_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB0_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB0_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB0_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB0_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB0_CNTL_MISC +#define DAGB0_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB0_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB0_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB0_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB0_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB0_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB0_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB0_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB0_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB0_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB0_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB0_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB0_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB0_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB0_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB0_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB0_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB0_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB0_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB0_CNTL_MISC2 +#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB0_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB0_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB0_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB0_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB0_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB0_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB0_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB0_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB0_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB0_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB0_FIFO_EMPTY +#define DAGB0_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB0_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB0_FIFO_FULL +#define DAGB0_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB0_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB0_WR_CREDITS_FULL +#define DAGB0_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB0_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB0_RD_CREDITS_FULL +#define DAGB0_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB0_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB0_PERFCOUNTER_LO +#define DAGB0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB0_PERFCOUNTER_HI +#define DAGB0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB0_PERFCOUNTER0_CFG +#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB0_PERFCOUNTER1_CFG +#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB0_PERFCOUNTER2_CFG +#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB0_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB0_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB0_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB0_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB0_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB0_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB0_PERFCOUNTER_RSLT_CNTL +#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB0_RESERVE0 +#define DAGB0_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE1 +#define DAGB0_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE2 +#define DAGB0_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE3 +#define DAGB0_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE4 +#define DAGB0_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE5 +#define DAGB0_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE6 +#define DAGB0_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE7 +#define DAGB0_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE8 +#define DAGB0_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE9 +#define DAGB0_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE10 +#define DAGB0_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE11 +#define DAGB0_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE12 +#define DAGB0_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB0_RESERVE13 +#define DAGB0_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB0_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_dagb_dagbdec1 +//DAGB1_RDCLI0 +#define DAGB1_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI1 +#define DAGB1_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI2 +#define DAGB1_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI3 +#define DAGB1_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI4 +#define DAGB1_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI5 +#define DAGB1_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI6 +#define DAGB1_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI7 +#define DAGB1_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI8 +#define DAGB1_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI9 +#define DAGB1_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI10 +#define DAGB1_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI11 +#define DAGB1_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI12 +#define DAGB1_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI13 +#define DAGB1_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI14 +#define DAGB1_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB1_RDCLI15 +#define DAGB1_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB1_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB1_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB1_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB1_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB1_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB1_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB1_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB1_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB1_RD_CNTL +#define DAGB1_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB1_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB1_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB1_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB1_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB1_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB1_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB1_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB1_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB1_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB1_RD_GMI_CNTL +#define DAGB1_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB1_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB1_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB1_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB1_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB1_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB1_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB1_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB1_RD_ADDR_DAGB +#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB1_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB1_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB1_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB1_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB1_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB1_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB1_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB1_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB1_RD_CGTT_CLK_CTRL +#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB1_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB1_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB1_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB1_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB1_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB1_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB1_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB1_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB1_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB1_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB1_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB1_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB1_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB1_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB1_RD_VC0_CNTL +#define DAGB1_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_VC1_CNTL +#define DAGB1_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_VC2_CNTL +#define DAGB1_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_VC3_CNTL +#define DAGB1_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_VC4_CNTL +#define DAGB1_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_VC5_CNTL +#define DAGB1_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_VC6_CNTL +#define DAGB1_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_VC7_CNTL +#define DAGB1_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_RD_CNTL_MISC +#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB1_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB1_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB1_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB1_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB1_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB1_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB1_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB1_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB1_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB1_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB1_RD_TLB_CREDIT +#define DAGB1_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB1_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB1_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB1_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB1_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB1_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB1_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB1_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB1_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB1_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB1_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB1_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB1_RDCLI_ASK_PENDING +#define DAGB1_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_RDCLI_GO_PENDING +#define DAGB1_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_RDCLI_GBLSEND_PENDING +#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_RDCLI_TLB_PENDING +#define DAGB1_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_RDCLI_OARB_PENDING +#define DAGB1_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_RDCLI_OSD_PENDING +#define DAGB1_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI0 +#define DAGB1_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI1 +#define DAGB1_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI2 +#define DAGB1_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI3 +#define DAGB1_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI4 +#define DAGB1_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI5 +#define DAGB1_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI6 +#define DAGB1_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI7 +#define DAGB1_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI8 +#define DAGB1_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI9 +#define DAGB1_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI10 +#define DAGB1_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI11 +#define DAGB1_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI12 +#define DAGB1_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI13 +#define DAGB1_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI14 +#define DAGB1_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB1_WRCLI15 +#define DAGB1_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB1_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB1_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB1_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB1_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB1_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB1_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB1_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB1_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB1_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB1_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB1_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB1_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB1_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB1_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB1_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB1_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB1_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB1_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB1_WR_CNTL +#define DAGB1_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB1_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB1_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB1_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB1_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB1_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB1_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB1_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB1_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB1_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB1_WR_GMI_CNTL +#define DAGB1_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB1_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB1_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB1_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB1_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB1_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB1_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB1_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB1_WR_ADDR_DAGB +#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB1_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB1_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB1_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB1_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB1_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB1_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB1_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB1_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB1_WR_CGTT_CLK_CTRL +#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB1_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB1_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB1_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB1_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB1_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB1_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB1_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB1_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB1_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB1_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB1_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB1_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB1_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB1_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB1_WR_DATA_DAGB +#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB1_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB1_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB1_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB1_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB1_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB1_WR_DATA_DAGB_MAX_BURST0 +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB1_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB1_WR_DATA_DAGB_MAX_BURST1 +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB1_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB1_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB1_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB1_WR_VC0_CNTL +#define DAGB1_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_VC1_CNTL +#define DAGB1_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_VC2_CNTL +#define DAGB1_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_VC3_CNTL +#define DAGB1_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_VC4_CNTL +#define DAGB1_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_VC5_CNTL +#define DAGB1_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_VC6_CNTL +#define DAGB1_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_VC7_CNTL +#define DAGB1_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB1_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB1_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB1_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB1_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB1_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB1_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB1_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB1_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB1_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB1_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB1_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB1_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB1_WR_CNTL_MISC +#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB1_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB1_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB1_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB1_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB1_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB1_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB1_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB1_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB1_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB1_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB1_WR_TLB_CREDIT +#define DAGB1_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB1_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB1_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB1_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB1_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB1_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB1_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB1_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB1_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB1_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB1_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB1_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB1_WR_DATA_CREDIT +#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB1_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB1_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB1_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB1_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB1_WR_MISC_CREDIT +#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB1_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB1_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB1_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB1_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB1_WRCLI_ASK_PENDING +#define DAGB1_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_GO_PENDING +#define DAGB1_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_GBLSEND_PENDING +#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_TLB_PENDING +#define DAGB1_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_OARB_PENDING +#define DAGB1_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_OSD_PENDING +#define DAGB1_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_DBUS_ASK_PENDING +#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_WRCLI_DBUS_GO_PENDING +#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB1_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB1_DAGB_DLY +#define DAGB1_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB1_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB1_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB1_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB1_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB1_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB1_CNTL_MISC +#define DAGB1_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB1_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB1_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB1_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB1_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB1_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB1_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB1_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB1_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB1_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB1_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB1_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB1_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB1_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB1_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB1_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB1_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB1_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB1_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB1_CNTL_MISC2 +#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB1_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB1_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB1_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB1_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB1_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB1_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB1_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB1_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB1_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB1_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB1_FIFO_EMPTY +#define DAGB1_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB1_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB1_FIFO_FULL +#define DAGB1_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB1_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB1_WR_CREDITS_FULL +#define DAGB1_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB1_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB1_RD_CREDITS_FULL +#define DAGB1_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB1_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB1_PERFCOUNTER_LO +#define DAGB1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB1_PERFCOUNTER_HI +#define DAGB1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB1_PERFCOUNTER0_CFG +#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB1_PERFCOUNTER1_CFG +#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB1_PERFCOUNTER2_CFG +#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB1_PERFCOUNTER_RSLT_CNTL +#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB1_RESERVE0 +#define DAGB1_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE1 +#define DAGB1_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE2 +#define DAGB1_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE3 +#define DAGB1_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE4 +#define DAGB1_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE5 +#define DAGB1_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE6 +#define DAGB1_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE7 +#define DAGB1_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE8 +#define DAGB1_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE9 +#define DAGB1_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE10 +#define DAGB1_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE11 +#define DAGB1_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE12 +#define DAGB1_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB1_RESERVE13 +#define DAGB1_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB1_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_dagb_dagbdec2 +//DAGB2_RDCLI0 +#define DAGB2_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI1 +#define DAGB2_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI2 +#define DAGB2_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI3 +#define DAGB2_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI4 +#define DAGB2_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI5 +#define DAGB2_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI6 +#define DAGB2_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI7 +#define DAGB2_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI8 +#define DAGB2_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI9 +#define DAGB2_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI10 +#define DAGB2_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI11 +#define DAGB2_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI12 +#define DAGB2_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI13 +#define DAGB2_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI14 +#define DAGB2_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB2_RDCLI15 +#define DAGB2_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB2_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB2_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB2_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB2_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB2_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB2_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB2_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB2_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB2_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB2_RD_CNTL +#define DAGB2_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB2_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB2_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB2_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB2_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB2_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB2_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB2_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB2_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB2_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB2_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB2_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB2_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB2_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB2_RD_GMI_CNTL +#define DAGB2_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB2_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB2_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB2_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB2_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB2_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB2_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB2_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB2_RD_ADDR_DAGB +#define DAGB2_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB2_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB2_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB2_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB2_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB2_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB2_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB2_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB2_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB2_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB2_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB2_RD_CGTT_CLK_CTRL +#define DAGB2_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB2_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB2_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB2_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB2_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB2_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB2_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB2_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB2_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB2_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB2_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB2_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB2_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB2_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB2_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB2_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB2_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB2_RD_VC0_CNTL +#define DAGB2_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_VC1_CNTL +#define DAGB2_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_VC2_CNTL +#define DAGB2_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_VC3_CNTL +#define DAGB2_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_VC4_CNTL +#define DAGB2_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_VC5_CNTL +#define DAGB2_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_VC6_CNTL +#define DAGB2_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_VC7_CNTL +#define DAGB2_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_RD_CNTL_MISC +#define DAGB2_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB2_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB2_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB2_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB2_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB2_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB2_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB2_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB2_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB2_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB2_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB2_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB2_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB2_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB2_RD_TLB_CREDIT +#define DAGB2_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB2_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB2_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB2_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB2_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB2_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB2_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB2_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB2_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB2_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB2_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB2_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB2_RDCLI_ASK_PENDING +#define DAGB2_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_RDCLI_GO_PENDING +#define DAGB2_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_RDCLI_GBLSEND_PENDING +#define DAGB2_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_RDCLI_TLB_PENDING +#define DAGB2_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_RDCLI_OARB_PENDING +#define DAGB2_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_RDCLI_OSD_PENDING +#define DAGB2_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI0 +#define DAGB2_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI1 +#define DAGB2_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI2 +#define DAGB2_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI3 +#define DAGB2_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI4 +#define DAGB2_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI5 +#define DAGB2_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI6 +#define DAGB2_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI7 +#define DAGB2_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI8 +#define DAGB2_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI9 +#define DAGB2_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI10 +#define DAGB2_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI11 +#define DAGB2_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI12 +#define DAGB2_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI13 +#define DAGB2_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI14 +#define DAGB2_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB2_WRCLI15 +#define DAGB2_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB2_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB2_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB2_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB2_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB2_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB2_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB2_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB2_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB2_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB2_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB2_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB2_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB2_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB2_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB2_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB2_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB2_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB2_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB2_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB2_WR_CNTL +#define DAGB2_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB2_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB2_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB2_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB2_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB2_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB2_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB2_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB2_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB2_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB2_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB2_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB2_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB2_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB2_WR_GMI_CNTL +#define DAGB2_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB2_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB2_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB2_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB2_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB2_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB2_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB2_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB2_WR_ADDR_DAGB +#define DAGB2_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB2_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB2_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB2_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB2_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB2_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB2_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB2_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB2_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB2_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB2_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB2_WR_CGTT_CLK_CTRL +#define DAGB2_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB2_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB2_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB2_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB2_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB2_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB2_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB2_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB2_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB2_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB2_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB2_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB2_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB2_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB2_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB2_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB2_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB2_WR_DATA_DAGB +#define DAGB2_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB2_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB2_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB2_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB2_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB2_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB2_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB2_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB2_WR_DATA_DAGB_MAX_BURST0 +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB2_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB2_WR_DATA_DAGB_MAX_BURST1 +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB2_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB2_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB2_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB2_WR_VC0_CNTL +#define DAGB2_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_VC1_CNTL +#define DAGB2_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_VC2_CNTL +#define DAGB2_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_VC3_CNTL +#define DAGB2_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_VC4_CNTL +#define DAGB2_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_VC5_CNTL +#define DAGB2_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_VC6_CNTL +#define DAGB2_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_VC7_CNTL +#define DAGB2_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB2_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB2_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB2_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB2_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB2_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB2_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB2_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB2_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB2_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB2_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB2_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB2_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB2_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB2_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB2_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB2_WR_CNTL_MISC +#define DAGB2_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB2_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB2_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB2_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB2_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB2_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB2_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB2_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB2_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB2_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB2_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB2_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB2_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB2_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB2_WR_TLB_CREDIT +#define DAGB2_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB2_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB2_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB2_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB2_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB2_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB2_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB2_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB2_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB2_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB2_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB2_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB2_WR_DATA_CREDIT +#define DAGB2_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB2_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB2_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB2_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB2_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB2_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB2_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB2_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB2_WR_MISC_CREDIT +#define DAGB2_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB2_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB2_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB2_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB2_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB2_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB2_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB2_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB2_WRCLI_ASK_PENDING +#define DAGB2_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_GO_PENDING +#define DAGB2_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_GBLSEND_PENDING +#define DAGB2_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_TLB_PENDING +#define DAGB2_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_OARB_PENDING +#define DAGB2_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_OSD_PENDING +#define DAGB2_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_DBUS_ASK_PENDING +#define DAGB2_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_WRCLI_DBUS_GO_PENDING +#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB2_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB2_DAGB_DLY +#define DAGB2_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB2_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB2_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB2_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB2_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB2_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB2_CNTL_MISC +#define DAGB2_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB2_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB2_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB2_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB2_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB2_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB2_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB2_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB2_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB2_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB2_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB2_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB2_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB2_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB2_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB2_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB2_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB2_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB2_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB2_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB2_CNTL_MISC2 +#define DAGB2_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB2_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB2_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB2_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB2_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB2_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB2_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB2_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB2_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB2_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB2_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB2_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB2_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB2_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB2_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB2_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB2_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB2_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB2_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB2_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB2_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB2_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB2_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB2_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB2_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB2_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB2_FIFO_EMPTY +#define DAGB2_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB2_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB2_FIFO_FULL +#define DAGB2_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB2_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB2_WR_CREDITS_FULL +#define DAGB2_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB2_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB2_RD_CREDITS_FULL +#define DAGB2_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB2_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB2_PERFCOUNTER_LO +#define DAGB2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB2_PERFCOUNTER_HI +#define DAGB2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB2_PERFCOUNTER0_CFG +#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB2_PERFCOUNTER1_CFG +#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB2_PERFCOUNTER2_CFG +#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB2_PERFCOUNTER_RSLT_CNTL +#define DAGB2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB2_RESERVE0 +#define DAGB2_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE1 +#define DAGB2_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE2 +#define DAGB2_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE3 +#define DAGB2_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE4 +#define DAGB2_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE5 +#define DAGB2_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE6 +#define DAGB2_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE7 +#define DAGB2_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE8 +#define DAGB2_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE9 +#define DAGB2_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE10 +#define DAGB2_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE11 +#define DAGB2_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE12 +#define DAGB2_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB2_RESERVE13 +#define DAGB2_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB2_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_dagb_dagbdec3 +//DAGB3_RDCLI0 +#define DAGB3_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI1 +#define DAGB3_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI2 +#define DAGB3_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI3 +#define DAGB3_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI4 +#define DAGB3_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI5 +#define DAGB3_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI6 +#define DAGB3_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI7 +#define DAGB3_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI8 +#define DAGB3_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI9 +#define DAGB3_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI10 +#define DAGB3_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI11 +#define DAGB3_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI12 +#define DAGB3_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI13 +#define DAGB3_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI14 +#define DAGB3_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB3_RDCLI15 +#define DAGB3_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB3_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB3_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB3_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB3_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB3_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB3_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB3_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB3_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB3_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB3_RD_CNTL +#define DAGB3_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB3_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB3_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB3_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB3_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB3_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB3_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB3_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB3_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB3_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB3_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB3_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB3_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB3_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB3_RD_GMI_CNTL +#define DAGB3_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB3_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB3_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB3_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB3_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB3_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB3_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB3_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB3_RD_ADDR_DAGB +#define DAGB3_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB3_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB3_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB3_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB3_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB3_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB3_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB3_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB3_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB3_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB3_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB3_RD_CGTT_CLK_CTRL +#define DAGB3_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB3_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB3_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB3_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB3_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB3_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB3_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB3_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB3_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB3_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB3_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB3_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB3_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB3_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB3_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB3_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB3_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB3_RD_VC0_CNTL +#define DAGB3_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_VC1_CNTL +#define DAGB3_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_VC2_CNTL +#define DAGB3_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_VC3_CNTL +#define DAGB3_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_VC4_CNTL +#define DAGB3_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_VC5_CNTL +#define DAGB3_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_VC6_CNTL +#define DAGB3_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_VC7_CNTL +#define DAGB3_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_RD_CNTL_MISC +#define DAGB3_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB3_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB3_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB3_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB3_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB3_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB3_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB3_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB3_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB3_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB3_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB3_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB3_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB3_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB3_RD_TLB_CREDIT +#define DAGB3_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB3_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB3_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB3_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB3_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB3_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB3_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB3_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB3_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB3_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB3_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB3_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB3_RDCLI_ASK_PENDING +#define DAGB3_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_RDCLI_GO_PENDING +#define DAGB3_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_RDCLI_GBLSEND_PENDING +#define DAGB3_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_RDCLI_TLB_PENDING +#define DAGB3_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_RDCLI_OARB_PENDING +#define DAGB3_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_RDCLI_OSD_PENDING +#define DAGB3_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI0 +#define DAGB3_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI1 +#define DAGB3_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI2 +#define DAGB3_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI3 +#define DAGB3_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI4 +#define DAGB3_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI5 +#define DAGB3_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI6 +#define DAGB3_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI7 +#define DAGB3_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI8 +#define DAGB3_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI9 +#define DAGB3_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI10 +#define DAGB3_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI11 +#define DAGB3_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI12 +#define DAGB3_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI13 +#define DAGB3_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI14 +#define DAGB3_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB3_WRCLI15 +#define DAGB3_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB3_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB3_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB3_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB3_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB3_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB3_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB3_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB3_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB3_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB3_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB3_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB3_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB3_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB3_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB3_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB3_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB3_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB3_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB3_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB3_WR_CNTL +#define DAGB3_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB3_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB3_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB3_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB3_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB3_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB3_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB3_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB3_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB3_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB3_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB3_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB3_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB3_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB3_WR_GMI_CNTL +#define DAGB3_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB3_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB3_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB3_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB3_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB3_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB3_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB3_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB3_WR_ADDR_DAGB +#define DAGB3_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB3_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB3_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB3_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB3_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB3_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB3_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB3_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB3_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB3_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB3_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB3_WR_CGTT_CLK_CTRL +#define DAGB3_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB3_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB3_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB3_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB3_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB3_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB3_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB3_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB3_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB3_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB3_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB3_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB3_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB3_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB3_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB3_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB3_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB3_WR_DATA_DAGB +#define DAGB3_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB3_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB3_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB3_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB3_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB3_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB3_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB3_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB3_WR_DATA_DAGB_MAX_BURST0 +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB3_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB3_WR_DATA_DAGB_MAX_BURST1 +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB3_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB3_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB3_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB3_WR_VC0_CNTL +#define DAGB3_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_VC1_CNTL +#define DAGB3_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_VC2_CNTL +#define DAGB3_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_VC3_CNTL +#define DAGB3_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_VC4_CNTL +#define DAGB3_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_VC5_CNTL +#define DAGB3_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_VC6_CNTL +#define DAGB3_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_VC7_CNTL +#define DAGB3_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB3_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB3_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB3_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB3_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB3_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB3_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB3_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB3_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB3_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB3_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB3_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB3_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB3_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB3_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB3_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB3_WR_CNTL_MISC +#define DAGB3_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB3_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB3_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB3_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB3_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB3_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB3_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB3_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB3_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB3_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB3_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB3_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB3_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB3_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB3_WR_TLB_CREDIT +#define DAGB3_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB3_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB3_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB3_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB3_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB3_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB3_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB3_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB3_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB3_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB3_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB3_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB3_WR_DATA_CREDIT +#define DAGB3_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB3_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB3_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB3_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB3_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB3_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB3_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB3_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB3_WR_MISC_CREDIT +#define DAGB3_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB3_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB3_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB3_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB3_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB3_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB3_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB3_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB3_WRCLI_ASK_PENDING +#define DAGB3_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_GO_PENDING +#define DAGB3_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_GBLSEND_PENDING +#define DAGB3_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_TLB_PENDING +#define DAGB3_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_OARB_PENDING +#define DAGB3_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_OSD_PENDING +#define DAGB3_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_DBUS_ASK_PENDING +#define DAGB3_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_WRCLI_DBUS_GO_PENDING +#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB3_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB3_DAGB_DLY +#define DAGB3_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB3_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB3_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB3_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB3_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB3_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB3_CNTL_MISC +#define DAGB3_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB3_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB3_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB3_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB3_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB3_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB3_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB3_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB3_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB3_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB3_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB3_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB3_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB3_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB3_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB3_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB3_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB3_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB3_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB3_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB3_CNTL_MISC2 +#define DAGB3_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB3_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB3_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB3_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB3_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB3_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB3_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB3_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB3_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB3_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB3_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB3_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB3_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB3_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB3_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB3_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB3_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB3_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB3_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB3_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB3_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB3_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB3_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB3_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB3_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB3_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB3_FIFO_EMPTY +#define DAGB3_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB3_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB3_FIFO_FULL +#define DAGB3_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB3_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB3_WR_CREDITS_FULL +#define DAGB3_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB3_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB3_RD_CREDITS_FULL +#define DAGB3_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB3_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB3_PERFCOUNTER_LO +#define DAGB3_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB3_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB3_PERFCOUNTER_HI +#define DAGB3_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB3_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB3_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB3_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB3_PERFCOUNTER0_CFG +#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB3_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB3_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB3_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB3_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB3_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB3_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB3_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB3_PERFCOUNTER1_CFG +#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB3_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB3_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB3_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB3_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB3_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB3_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB3_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB3_PERFCOUNTER2_CFG +#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB3_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB3_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB3_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB3_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB3_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB3_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB3_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB3_PERFCOUNTER_RSLT_CNTL +#define DAGB3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB3_RESERVE0 +#define DAGB3_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE1 +#define DAGB3_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE2 +#define DAGB3_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE3 +#define DAGB3_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE4 +#define DAGB3_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE5 +#define DAGB3_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE6 +#define DAGB3_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE7 +#define DAGB3_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE8 +#define DAGB3_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE9 +#define DAGB3_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE10 +#define DAGB3_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE11 +#define DAGB3_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE12 +#define DAGB3_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB3_RESERVE13 +#define DAGB3_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB3_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_dagb_dagbdec4 +//DAGB4_RDCLI0 +#define DAGB4_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI1 +#define DAGB4_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI2 +#define DAGB4_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI3 +#define DAGB4_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI4 +#define DAGB4_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI5 +#define DAGB4_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI6 +#define DAGB4_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI7 +#define DAGB4_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI8 +#define DAGB4_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI9 +#define DAGB4_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI10 +#define DAGB4_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI11 +#define DAGB4_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI12 +#define DAGB4_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI13 +#define DAGB4_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI14 +#define DAGB4_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB4_RDCLI15 +#define DAGB4_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB4_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB4_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB4_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB4_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB4_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB4_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB4_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB4_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB4_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB4_RD_CNTL +#define DAGB4_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB4_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB4_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB4_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB4_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB4_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB4_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB4_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB4_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB4_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB4_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB4_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB4_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB4_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB4_RD_GMI_CNTL +#define DAGB4_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB4_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB4_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB4_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB4_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB4_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB4_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB4_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB4_RD_ADDR_DAGB +#define DAGB4_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB4_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB4_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB4_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB4_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB4_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB4_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB4_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB4_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB4_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB4_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB4_RD_CGTT_CLK_CTRL +#define DAGB4_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB4_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB4_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB4_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB4_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB4_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB4_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB4_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB4_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB4_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB4_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB4_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB4_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB4_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB4_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB4_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB4_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB4_RD_VC0_CNTL +#define DAGB4_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_VC1_CNTL +#define DAGB4_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_VC2_CNTL +#define DAGB4_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_VC3_CNTL +#define DAGB4_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_VC4_CNTL +#define DAGB4_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_VC5_CNTL +#define DAGB4_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_VC6_CNTL +#define DAGB4_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_VC7_CNTL +#define DAGB4_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_RD_CNTL_MISC +#define DAGB4_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB4_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB4_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB4_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB4_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB4_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB4_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB4_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB4_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB4_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB4_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB4_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB4_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB4_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB4_RD_TLB_CREDIT +#define DAGB4_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB4_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB4_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB4_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB4_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB4_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB4_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB4_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB4_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB4_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB4_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB4_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB4_RDCLI_ASK_PENDING +#define DAGB4_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_RDCLI_GO_PENDING +#define DAGB4_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_RDCLI_GBLSEND_PENDING +#define DAGB4_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_RDCLI_TLB_PENDING +#define DAGB4_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_RDCLI_OARB_PENDING +#define DAGB4_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_RDCLI_OSD_PENDING +#define DAGB4_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI0 +#define DAGB4_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI1 +#define DAGB4_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI2 +#define DAGB4_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI3 +#define DAGB4_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI4 +#define DAGB4_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI5 +#define DAGB4_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI6 +#define DAGB4_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI7 +#define DAGB4_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI8 +#define DAGB4_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI9 +#define DAGB4_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI10 +#define DAGB4_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI11 +#define DAGB4_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI12 +#define DAGB4_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI13 +#define DAGB4_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI14 +#define DAGB4_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB4_WRCLI15 +#define DAGB4_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB4_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB4_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB4_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB4_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB4_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB4_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB4_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB4_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB4_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB4_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB4_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB4_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB4_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB4_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB4_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB4_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB4_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB4_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB4_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB4_WR_CNTL +#define DAGB4_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB4_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB4_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB4_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB4_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB4_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB4_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB4_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB4_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB4_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB4_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB4_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB4_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB4_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB4_WR_GMI_CNTL +#define DAGB4_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB4_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB4_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB4_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB4_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB4_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB4_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB4_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB4_WR_ADDR_DAGB +#define DAGB4_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB4_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB4_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB4_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB4_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB4_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB4_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB4_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB4_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB4_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB4_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB4_WR_CGTT_CLK_CTRL +#define DAGB4_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB4_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB4_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB4_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB4_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB4_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB4_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB4_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB4_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB4_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB4_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB4_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB4_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB4_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB4_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB4_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB4_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB4_WR_DATA_DAGB +#define DAGB4_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB4_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB4_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB4_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB4_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB4_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB4_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB4_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB4_WR_DATA_DAGB_MAX_BURST0 +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB4_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB4_WR_DATA_DAGB_MAX_BURST1 +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB4_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB4_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB4_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB4_WR_VC0_CNTL +#define DAGB4_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_VC1_CNTL +#define DAGB4_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_VC2_CNTL +#define DAGB4_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_VC3_CNTL +#define DAGB4_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_VC4_CNTL +#define DAGB4_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_VC5_CNTL +#define DAGB4_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_VC6_CNTL +#define DAGB4_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_VC7_CNTL +#define DAGB4_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB4_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB4_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB4_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB4_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB4_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB4_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB4_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB4_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB4_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB4_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB4_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB4_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB4_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB4_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB4_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB4_WR_CNTL_MISC +#define DAGB4_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB4_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB4_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB4_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB4_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB4_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB4_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB4_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB4_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB4_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB4_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB4_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB4_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB4_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB4_WR_TLB_CREDIT +#define DAGB4_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB4_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB4_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB4_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB4_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB4_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB4_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB4_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB4_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB4_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB4_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB4_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB4_WR_DATA_CREDIT +#define DAGB4_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB4_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB4_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB4_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB4_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB4_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB4_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB4_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB4_WR_MISC_CREDIT +#define DAGB4_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB4_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB4_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB4_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB4_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB4_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB4_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB4_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB4_WRCLI_ASK_PENDING +#define DAGB4_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_GO_PENDING +#define DAGB4_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_GBLSEND_PENDING +#define DAGB4_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_TLB_PENDING +#define DAGB4_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_OARB_PENDING +#define DAGB4_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_OSD_PENDING +#define DAGB4_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_DBUS_ASK_PENDING +#define DAGB4_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_WRCLI_DBUS_GO_PENDING +#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB4_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB4_DAGB_DLY +#define DAGB4_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB4_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB4_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB4_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB4_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB4_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB4_CNTL_MISC +#define DAGB4_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB4_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB4_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB4_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB4_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB4_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB4_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB4_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB4_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB4_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB4_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB4_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB4_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB4_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB4_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB4_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB4_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB4_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB4_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB4_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB4_CNTL_MISC2 +#define DAGB4_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB4_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB4_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB4_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB4_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB4_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB4_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB4_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB4_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB4_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB4_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB4_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB4_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB4_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB4_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB4_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB4_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB4_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB4_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB4_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB4_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB4_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB4_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB4_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB4_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB4_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB4_FIFO_EMPTY +#define DAGB4_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB4_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB4_FIFO_FULL +#define DAGB4_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB4_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB4_WR_CREDITS_FULL +#define DAGB4_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB4_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB4_RD_CREDITS_FULL +#define DAGB4_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB4_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB4_PERFCOUNTER_LO +#define DAGB4_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB4_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB4_PERFCOUNTER_HI +#define DAGB4_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB4_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB4_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB4_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB4_PERFCOUNTER0_CFG +#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB4_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB4_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB4_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB4_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB4_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB4_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB4_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB4_PERFCOUNTER1_CFG +#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB4_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB4_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB4_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB4_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB4_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB4_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB4_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB4_PERFCOUNTER2_CFG +#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB4_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB4_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB4_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB4_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB4_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB4_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB4_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB4_PERFCOUNTER_RSLT_CNTL +#define DAGB4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB4_RESERVE0 +#define DAGB4_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE1 +#define DAGB4_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE2 +#define DAGB4_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE3 +#define DAGB4_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE4 +#define DAGB4_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE5 +#define DAGB4_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE6 +#define DAGB4_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE7 +#define DAGB4_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE8 +#define DAGB4_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE9 +#define DAGB4_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE10 +#define DAGB4_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE11 +#define DAGB4_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE12 +#define DAGB4_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB4_RESERVE13 +#define DAGB4_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB4_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_ea_mmeadec0 +//MMEA0_DRAM_RD_CLI2GRP_MAP0 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA0_DRAM_RD_CLI2GRP_MAP1 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA0_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA0_DRAM_WR_CLI2GRP_MAP0 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA0_DRAM_WR_CLI2GRP_MAP1 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA0_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA0_DRAM_RD_GRP2VC_MAP +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA0_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA0_DRAM_WR_GRP2VC_MAP +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA0_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA0_DRAM_RD_LAZY +#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA0_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA0_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA0_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA0_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA0_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA0_DRAM_WR_LAZY +#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA0_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA0_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA0_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA0_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA0_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA0_DRAM_RD_CAM_CNTL +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA0_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA0_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA0_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA0_DRAM_WR_CAM_CNTL +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA0_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA0_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA0_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA0_DRAM_PAGE_BURST +#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA0_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA0_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA0_DRAM_RD_PRI_AGE +#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA0_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA0_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA0_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA0_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA0_DRAM_WR_PRI_AGE +#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA0_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA0_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA0_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA0_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA0_DRAM_RD_PRI_QUEUING +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA0_DRAM_WR_PRI_QUEUING +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA0_DRAM_RD_PRI_FIXED +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA0_DRAM_WR_PRI_FIXED +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA0_DRAM_RD_PRI_URGENCY +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA0_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA0_DRAM_WR_PRI_URGENCY +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA0_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA0_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_GMI_RD_CLI2GRP_MAP0 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA0_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA0_GMI_RD_CLI2GRP_MAP1 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA0_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA0_GMI_WR_CLI2GRP_MAP0 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA0_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA0_GMI_WR_CLI2GRP_MAP1 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA0_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA0_GMI_RD_GRP2VC_MAP +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA0_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA0_GMI_WR_GRP2VC_MAP +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA0_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA0_GMI_RD_LAZY +#define MMEA0_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA0_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA0_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA0_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA0_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA0_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA0_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA0_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA0_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA0_GMI_WR_LAZY +#define MMEA0_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA0_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA0_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA0_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA0_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA0_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA0_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA0_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA0_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA0_GMI_RD_CAM_CNTL +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA0_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA0_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA0_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA0_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA0_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA0_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA0_GMI_WR_CAM_CNTL +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA0_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA0_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA0_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA0_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA0_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA0_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA0_GMI_PAGE_BURST +#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA0_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA0_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA0_GMI_RD_PRI_AGE +#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA0_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA0_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA0_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA0_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA0_GMI_WR_PRI_AGE +#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA0_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA0_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA0_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA0_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA0_GMI_RD_PRI_QUEUING +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA0_GMI_WR_PRI_QUEUING +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA0_GMI_RD_PRI_FIXED +#define MMEA0_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA0_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA0_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA0_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA0_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA0_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA0_GMI_WR_PRI_FIXED +#define MMEA0_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA0_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA0_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA0_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA0_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA0_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA0_GMI_RD_PRI_URGENCY +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA0_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA0_GMI_WR_PRI_URGENCY +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA0_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA0_GMI_RD_PRI_URGENCY_MASKING +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA0_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA0_GMI_WR_PRI_URGENCY_MASKING +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA0_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA0_GMI_RD_PRI_QUANT_PRI1 +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_GMI_RD_PRI_QUANT_PRI2 +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_GMI_RD_PRI_QUANT_PRI3 +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_GMI_WR_PRI_QUANT_PRI1 +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_GMI_WR_PRI_QUANT_PRI2 +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_GMI_WR_PRI_QUANT_PRI3 +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_ADDRNORM_BASE_ADDR0 +#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA0_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA0_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA0_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_LIMIT_ADDR0 +#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA0_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_BASE_ADDR1 +#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA0_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA0_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA0_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_LIMIT_ADDR1 +#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA0_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_OFFSET_ADDR1 +#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA0_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA0_ADDRNORM_BASE_ADDR2 +#define MMEA0_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA0_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA0_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA0_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA0_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA0_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_LIMIT_ADDR2 +#define MMEA0_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA0_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA0_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_BASE_ADDR3 +#define MMEA0_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA0_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA0_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA0_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA0_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA0_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_LIMIT_ADDR3 +#define MMEA0_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA0_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA0_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_OFFSET_ADDR3 +#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA0_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA0_ADDRNORM_BASE_ADDR4 +#define MMEA0_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA0_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA0_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA0_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA0_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA0_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_LIMIT_ADDR4 +#define MMEA0_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA0_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA0_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_BASE_ADDR5 +#define MMEA0_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA0_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA0_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA0_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA0_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA0_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_LIMIT_ADDR5 +#define MMEA0_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA0_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA0_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA0_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA0_ADDRNORM_OFFSET_ADDR5 +#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA0_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA0_ADDRNORMDRAM_HOLE_CNTL +#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA0_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA0_ADDRNORMGMI_HOLE_CNTL +#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA0_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA0_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA0_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA0_ADDRDEC_BANK_CFG +#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA0_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA0_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA0_ADDRDEC_MISC_CFG +#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA0_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA0_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA0_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA0_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA0_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA0_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA0_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA0_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECGMI_ADDR_HASH_PC +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA0_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA0_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDECGMI_HARVEST_ENABLE +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA0_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA0_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA0_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA0_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA0_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA0_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA0_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA0_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA0_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA0_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA0_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA0_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA0_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA0_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA0_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA0_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA0_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA0_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA0_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA0_ADDRDEC0_RM_SEL_CS01 +#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC0_RM_SEL_CS23 +#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA0_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA0_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA0_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA0_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA0_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA0_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA0_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA0_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA0_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA0_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA0_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA0_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA0_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA0_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA0_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA0_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA0_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA0_ADDRDEC1_RM_SEL_CS01 +#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC1_RM_SEL_CS23 +#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA0_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA0_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA0_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA0_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA0_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA0_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA0_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA0_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA0_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA0_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA0_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA0_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA0_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA0_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA0_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA0_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA0_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA0_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA0_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA0_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA0_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA0_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA0_ADDRDEC2_RM_SEL_CS01 +#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC2_RM_SEL_CS23 +#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA0_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA0_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA0_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA0_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA0_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA0_IO_RD_CLI2GRP_MAP0 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA0_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA0_IO_RD_CLI2GRP_MAP1 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA0_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA0_IO_WR_CLI2GRP_MAP0 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA0_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA0_IO_WR_CLI2GRP_MAP1 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA0_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA0_IO_RD_COMBINE_FLUSH +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA0_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA0_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA0_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA0_IO_WR_COMBINE_FLUSH +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA0_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA0_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA0_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA0_IO_GROUP_BURST +#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA0_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA0_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA0_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA0_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA0_IO_RD_PRI_AGE +#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA0_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA0_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA0_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA0_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA0_IO_WR_PRI_AGE +#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA0_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA0_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA0_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA0_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA0_IO_RD_PRI_QUEUING +#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA0_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA0_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA0_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA0_IO_WR_PRI_QUEUING +#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA0_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA0_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA0_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA0_IO_RD_PRI_FIXED +#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA0_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA0_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA0_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA0_IO_WR_PRI_FIXED +#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA0_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA0_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA0_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA0_IO_RD_PRI_URGENCY +#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA0_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA0_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA0_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA0_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA0_IO_WR_PRI_URGENCY +#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA0_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA0_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA0_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA0_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA0_IO_RD_PRI_URGENCY_MASKING +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA0_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA0_IO_WR_PRI_URGENCY_MASKING +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA0_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA0_IO_RD_PRI_QUANT_PRI1 +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_IO_RD_PRI_QUANT_PRI2 +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_IO_RD_PRI_QUANT_PRI3 +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_IO_WR_PRI_QUANT_PRI1 +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_IO_WR_PRI_QUANT_PRI2 +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_IO_WR_PRI_QUANT_PRI3 +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA0_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA0_SDP_ARB_DRAM +#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA0_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA0_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA0_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA0_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA0_SDP_ARB_GMI +#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA0_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA0_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA0_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA0_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA0_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA0_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA0_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA0_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA0_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA0_SDP_ARB_FINAL +#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA0_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA0_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA0_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA0_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA0_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA0_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA0_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA0_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA0_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA0_SDP_DRAM_PRIORITY +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA0_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA0_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA0_SDP_GMI_PRIORITY +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA0_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA0_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA0_SDP_IO_PRIORITY +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA0_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA0_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA0_SDP_CREDITS +#define MMEA0_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA0_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA0_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA0_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA0_SDP_TAG_RESERVE0 +#define MMEA0_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA0_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA0_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA0_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA0_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA0_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA0_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA0_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA0_SDP_TAG_RESERVE1 +#define MMEA0_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA0_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA0_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA0_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA0_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA0_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA0_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA0_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA0_SDP_VCC_RESERVE0 +#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA0_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA0_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA0_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA0_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA0_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA0_SDP_VCC_RESERVE1 +#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA0_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA0_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA0_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA0_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA0_SDP_VCD_RESERVE0 +#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA0_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA0_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA0_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA0_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA0_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA0_SDP_VCD_RESERVE1 +#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA0_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA0_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA0_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA0_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA0_SDP_REQ_CNTL +#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA0_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA0_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA0_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA0_MISC +#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA0_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA0_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA0_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA0_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA0_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA0_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA0_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA0_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA0_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA0_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA0_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA0_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA0_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA0_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA0_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA0_LATENCY_SAMPLING +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA0_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA0_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA0_PERFCOUNTER_LO +#define MMEA0_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA0_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA0_PERFCOUNTER_HI +#define MMEA0_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA0_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA0_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA0_PERFCOUNTER0_CFG +#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA0_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA0_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA0_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA0_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA0_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA0_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA0_PERFCOUNTER1_CFG +#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA0_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA0_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA0_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA0_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA0_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA0_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA0_PERFCOUNTER_RSLT_CNTL +#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA0_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA0_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA0_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA0_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA0_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA0_EDC_CNT +#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA0_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA0_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA0_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA0_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA0_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA0_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA0_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA0_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA0_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA0_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA0_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA0_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA0_EDC_CNT2 +#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA0_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA0_DSM_CNTL +#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA0_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA0_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA0_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA0_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA0_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA0_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA0_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA0_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA0_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA0_DSM_CNTLA +#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA0_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA0_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA0_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA0_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA0_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA0_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA0_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA0_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA0_DSM_CNTL2 +#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA0_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA0_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA0_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA0_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA0_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA0_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA0_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA0_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA0_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA0_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA0_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA0_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA0_DSM_CNTL2A +#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA0_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA0_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA0_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA0_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA0_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA0_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA0_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA0_CGTT_CLK_CTRL +#define MMEA0_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA0_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA0_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA0_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA0_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA0_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA0_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA0_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA0_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA0_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA0_EDC_MODE +#define MMEA0_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA0_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA0_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA0_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA0_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA0_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA0_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA0_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA0_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA0_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA0_ERR_STATUS +#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA0_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA0_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA0_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA0_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA0_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA0_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA0_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA0_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA0_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA0_MISC2 +#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA0_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA0_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA0_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA0_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA0_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA0_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA0_ADDRDEC_SELECT +#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA0_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA0_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA0_EDC_CNT3 +#define MMEA0_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA0_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA0_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA0_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA0_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA0_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA0_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA0_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA0_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA0_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA0_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA0_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA0_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA0_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_ea_mmeadec1 +//MMEA1_DRAM_RD_CLI2GRP_MAP0 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA1_DRAM_RD_CLI2GRP_MAP1 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA1_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA1_DRAM_WR_CLI2GRP_MAP0 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA1_DRAM_WR_CLI2GRP_MAP1 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA1_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA1_DRAM_RD_GRP2VC_MAP +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA1_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA1_DRAM_WR_GRP2VC_MAP +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA1_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA1_DRAM_RD_LAZY +#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA1_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA1_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA1_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA1_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA1_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA1_DRAM_WR_LAZY +#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA1_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA1_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA1_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA1_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA1_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA1_DRAM_RD_CAM_CNTL +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA1_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA1_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA1_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA1_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA1_DRAM_WR_CAM_CNTL +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA1_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA1_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA1_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA1_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA1_DRAM_PAGE_BURST +#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA1_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA1_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA1_DRAM_RD_PRI_AGE +#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA1_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA1_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA1_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA1_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA1_DRAM_WR_PRI_AGE +#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA1_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA1_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA1_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA1_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA1_DRAM_RD_PRI_QUEUING +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA1_DRAM_WR_PRI_QUEUING +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA1_DRAM_RD_PRI_FIXED +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA1_DRAM_WR_PRI_FIXED +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA1_DRAM_RD_PRI_URGENCY +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA1_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA1_DRAM_WR_PRI_URGENCY +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA1_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA1_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_GMI_RD_CLI2GRP_MAP0 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA1_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA1_GMI_RD_CLI2GRP_MAP1 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA1_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA1_GMI_WR_CLI2GRP_MAP0 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA1_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA1_GMI_WR_CLI2GRP_MAP1 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA1_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA1_GMI_RD_GRP2VC_MAP +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA1_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA1_GMI_WR_GRP2VC_MAP +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA1_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA1_GMI_RD_LAZY +#define MMEA1_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA1_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA1_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA1_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA1_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA1_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA1_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA1_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA1_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA1_GMI_WR_LAZY +#define MMEA1_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA1_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA1_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA1_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA1_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA1_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA1_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA1_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA1_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA1_GMI_RD_CAM_CNTL +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA1_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA1_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA1_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA1_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA1_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA1_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA1_GMI_WR_CAM_CNTL +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA1_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA1_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA1_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA1_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA1_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA1_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA1_GMI_PAGE_BURST +#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA1_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA1_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA1_GMI_RD_PRI_AGE +#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA1_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA1_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA1_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA1_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA1_GMI_WR_PRI_AGE +#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA1_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA1_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA1_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA1_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA1_GMI_RD_PRI_QUEUING +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA1_GMI_WR_PRI_QUEUING +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA1_GMI_RD_PRI_FIXED +#define MMEA1_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA1_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA1_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA1_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA1_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA1_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA1_GMI_WR_PRI_FIXED +#define MMEA1_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA1_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA1_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA1_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA1_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA1_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA1_GMI_RD_PRI_URGENCY +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA1_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA1_GMI_WR_PRI_URGENCY +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA1_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA1_GMI_RD_PRI_URGENCY_MASKING +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA1_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA1_GMI_WR_PRI_URGENCY_MASKING +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA1_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA1_GMI_RD_PRI_QUANT_PRI1 +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_GMI_RD_PRI_QUANT_PRI2 +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_GMI_RD_PRI_QUANT_PRI3 +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_GMI_WR_PRI_QUANT_PRI1 +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_GMI_WR_PRI_QUANT_PRI2 +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_GMI_WR_PRI_QUANT_PRI3 +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_ADDRNORM_BASE_ADDR0 +#define MMEA1_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA1_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA1_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA1_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA1_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA1_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_LIMIT_ADDR0 +#define MMEA1_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA1_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA1_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_BASE_ADDR1 +#define MMEA1_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA1_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA1_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA1_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA1_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA1_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_LIMIT_ADDR1 +#define MMEA1_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA1_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA1_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_OFFSET_ADDR1 +#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA1_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA1_ADDRNORM_BASE_ADDR2 +#define MMEA1_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA1_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA1_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA1_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA1_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA1_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_LIMIT_ADDR2 +#define MMEA1_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA1_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA1_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_BASE_ADDR3 +#define MMEA1_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA1_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA1_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA1_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA1_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA1_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_LIMIT_ADDR3 +#define MMEA1_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA1_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA1_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_OFFSET_ADDR3 +#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA1_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA1_ADDRNORM_BASE_ADDR4 +#define MMEA1_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA1_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA1_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA1_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA1_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA1_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_LIMIT_ADDR4 +#define MMEA1_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA1_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA1_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_BASE_ADDR5 +#define MMEA1_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA1_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA1_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA1_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA1_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA1_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_LIMIT_ADDR5 +#define MMEA1_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA1_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA1_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA1_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA1_ADDRNORM_OFFSET_ADDR5 +#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA1_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA1_ADDRNORMDRAM_HOLE_CNTL +#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA1_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA1_ADDRNORMGMI_HOLE_CNTL +#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA1_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA1_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA1_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA1_ADDRDEC_BANK_CFG +#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA1_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA1_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA1_ADDRDEC_MISC_CFG +#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA1_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA1_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA1_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA1_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA1_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA1_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA1_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA1_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECGMI_ADDR_HASH_PC +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA1_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA1_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDECGMI_HARVEST_ENABLE +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA1_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA1_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA1_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA1_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA1_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA1_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA1_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA1_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA1_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA1_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA1_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA1_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA1_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA1_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA1_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA1_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA1_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA1_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA1_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA1_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA1_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA1_ADDRDEC0_RM_SEL_CS01 +#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC0_RM_SEL_CS23 +#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA1_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA1_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA1_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA1_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA1_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA1_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA1_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA1_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA1_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA1_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA1_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA1_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA1_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA1_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA1_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA1_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA1_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA1_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA1_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA1_ADDRDEC1_RM_SEL_CS01 +#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC1_RM_SEL_CS23 +#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA1_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA1_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA1_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA1_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA1_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA1_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA1_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA1_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA1_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA1_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA1_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA1_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA1_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA1_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA1_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA1_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA1_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA1_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA1_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA1_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA1_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA1_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA1_ADDRDEC2_RM_SEL_CS01 +#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC2_RM_SEL_CS23 +#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA1_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA1_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA1_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA1_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA1_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA1_IO_RD_CLI2GRP_MAP0 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA1_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA1_IO_RD_CLI2GRP_MAP1 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA1_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA1_IO_WR_CLI2GRP_MAP0 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA1_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA1_IO_WR_CLI2GRP_MAP1 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA1_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA1_IO_RD_COMBINE_FLUSH +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA1_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA1_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA1_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA1_IO_WR_COMBINE_FLUSH +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA1_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA1_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA1_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA1_IO_GROUP_BURST +#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA1_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA1_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA1_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA1_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA1_IO_RD_PRI_AGE +#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA1_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA1_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA1_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA1_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA1_IO_WR_PRI_AGE +#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA1_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA1_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA1_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA1_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA1_IO_RD_PRI_QUEUING +#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA1_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA1_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA1_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA1_IO_WR_PRI_QUEUING +#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA1_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA1_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA1_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA1_IO_RD_PRI_FIXED +#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA1_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA1_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA1_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA1_IO_WR_PRI_FIXED +#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA1_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA1_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA1_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA1_IO_RD_PRI_URGENCY +#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA1_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA1_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA1_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA1_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA1_IO_WR_PRI_URGENCY +#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA1_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA1_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA1_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA1_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA1_IO_RD_PRI_URGENCY_MASKING +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA1_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA1_IO_WR_PRI_URGENCY_MASKING +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA1_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA1_IO_RD_PRI_QUANT_PRI1 +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_IO_RD_PRI_QUANT_PRI2 +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_IO_RD_PRI_QUANT_PRI3 +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_IO_WR_PRI_QUANT_PRI1 +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_IO_WR_PRI_QUANT_PRI2 +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_IO_WR_PRI_QUANT_PRI3 +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA1_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA1_SDP_ARB_DRAM +#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA1_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA1_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA1_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA1_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA1_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA1_SDP_ARB_GMI +#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA1_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA1_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA1_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA1_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA1_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA1_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA1_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA1_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA1_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA1_SDP_ARB_FINAL +#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA1_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA1_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA1_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA1_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA1_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA1_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA1_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA1_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA1_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA1_SDP_DRAM_PRIORITY +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA1_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA1_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA1_SDP_GMI_PRIORITY +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA1_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA1_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA1_SDP_IO_PRIORITY +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA1_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA1_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA1_SDP_CREDITS +#define MMEA1_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA1_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA1_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA1_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA1_SDP_TAG_RESERVE0 +#define MMEA1_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA1_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA1_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA1_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA1_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA1_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA1_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA1_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA1_SDP_TAG_RESERVE1 +#define MMEA1_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA1_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA1_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA1_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA1_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA1_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA1_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA1_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA1_SDP_VCC_RESERVE0 +#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA1_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA1_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA1_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA1_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA1_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA1_SDP_VCC_RESERVE1 +#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA1_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA1_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA1_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA1_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA1_SDP_VCD_RESERVE0 +#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA1_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA1_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA1_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA1_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA1_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA1_SDP_VCD_RESERVE1 +#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA1_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA1_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA1_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA1_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA1_SDP_REQ_CNTL +#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA1_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA1_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA1_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA1_MISC +#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA1_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA1_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA1_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA1_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA1_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA1_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA1_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA1_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA1_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA1_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA1_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA1_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA1_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA1_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA1_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA1_LATENCY_SAMPLING +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA1_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA1_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA1_PERFCOUNTER_LO +#define MMEA1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA1_PERFCOUNTER_HI +#define MMEA1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA1_PERFCOUNTER0_CFG +#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA1_PERFCOUNTER1_CFG +#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA1_PERFCOUNTER_RSLT_CNTL +#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA1_EDC_CNT +#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA1_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA1_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA1_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA1_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA1_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA1_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA1_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA1_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA1_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA1_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA1_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA1_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA1_EDC_CNT2 +#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA1_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA1_DSM_CNTL +#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA1_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA1_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA1_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA1_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA1_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA1_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA1_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA1_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA1_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA1_DSM_CNTLA +#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA1_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA1_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA1_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA1_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA1_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA1_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA1_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA1_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA1_DSM_CNTL2 +#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA1_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA1_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA1_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA1_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA1_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA1_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA1_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA1_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA1_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA1_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA1_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA1_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA1_DSM_CNTL2A +#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA1_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA1_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA1_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA1_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA1_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA1_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA1_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA1_CGTT_CLK_CTRL +#define MMEA1_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA1_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA1_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA1_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA1_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA1_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA1_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA1_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA1_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA1_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA1_EDC_MODE +#define MMEA1_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA1_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA1_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA1_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA1_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA1_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA1_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA1_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA1_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA1_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA1_ERR_STATUS +#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA1_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA1_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA1_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA1_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA1_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA1_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA1_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA1_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA1_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA1_MISC2 +#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA1_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA1_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA1_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA1_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA1_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA1_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA1_ADDRDEC_SELECT +#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA1_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA1_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA1_EDC_CNT3 +#define MMEA1_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA1_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA1_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA1_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA1_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA1_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA1_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA1_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA1_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA1_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA1_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA1_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA1_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA1_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_ea_mmeadec2 +//MMEA2_DRAM_RD_CLI2GRP_MAP0 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA2_DRAM_RD_CLI2GRP_MAP1 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA2_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA2_DRAM_WR_CLI2GRP_MAP0 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA2_DRAM_WR_CLI2GRP_MAP1 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA2_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA2_DRAM_RD_GRP2VC_MAP +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA2_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA2_DRAM_WR_GRP2VC_MAP +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA2_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA2_DRAM_RD_LAZY +#define MMEA2_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA2_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA2_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA2_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA2_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA2_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA2_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA2_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA2_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA2_DRAM_WR_LAZY +#define MMEA2_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA2_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA2_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA2_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA2_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA2_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA2_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA2_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA2_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA2_DRAM_RD_CAM_CNTL +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA2_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA2_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA2_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA2_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA2_DRAM_WR_CAM_CNTL +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA2_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA2_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA2_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA2_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA2_DRAM_PAGE_BURST +#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA2_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA2_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA2_DRAM_RD_PRI_AGE +#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA2_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA2_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA2_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA2_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA2_DRAM_WR_PRI_AGE +#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA2_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA2_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA2_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA2_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA2_DRAM_RD_PRI_QUEUING +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA2_DRAM_WR_PRI_QUEUING +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA2_DRAM_RD_PRI_FIXED +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA2_DRAM_WR_PRI_FIXED +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA2_DRAM_RD_PRI_URGENCY +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA2_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA2_DRAM_WR_PRI_URGENCY +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA2_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA2_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_GMI_RD_CLI2GRP_MAP0 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA2_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA2_GMI_RD_CLI2GRP_MAP1 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA2_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA2_GMI_WR_CLI2GRP_MAP0 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA2_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA2_GMI_WR_CLI2GRP_MAP1 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA2_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA2_GMI_RD_GRP2VC_MAP +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA2_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA2_GMI_WR_GRP2VC_MAP +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA2_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA2_GMI_RD_LAZY +#define MMEA2_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA2_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA2_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA2_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA2_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA2_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA2_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA2_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA2_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA2_GMI_WR_LAZY +#define MMEA2_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA2_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA2_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA2_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA2_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA2_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA2_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA2_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA2_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA2_GMI_RD_CAM_CNTL +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA2_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA2_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA2_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA2_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA2_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA2_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA2_GMI_WR_CAM_CNTL +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA2_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA2_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA2_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA2_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA2_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA2_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA2_GMI_PAGE_BURST +#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA2_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA2_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA2_GMI_RD_PRI_AGE +#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA2_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA2_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA2_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA2_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA2_GMI_WR_PRI_AGE +#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA2_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA2_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA2_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA2_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA2_GMI_RD_PRI_QUEUING +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA2_GMI_WR_PRI_QUEUING +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA2_GMI_RD_PRI_FIXED +#define MMEA2_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA2_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA2_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA2_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA2_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA2_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA2_GMI_WR_PRI_FIXED +#define MMEA2_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA2_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA2_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA2_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA2_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA2_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA2_GMI_RD_PRI_URGENCY +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA2_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA2_GMI_WR_PRI_URGENCY +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA2_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA2_GMI_RD_PRI_URGENCY_MASKING +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA2_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA2_GMI_WR_PRI_URGENCY_MASKING +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA2_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA2_GMI_RD_PRI_QUANT_PRI1 +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_GMI_RD_PRI_QUANT_PRI2 +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_GMI_RD_PRI_QUANT_PRI3 +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_GMI_WR_PRI_QUANT_PRI1 +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_GMI_WR_PRI_QUANT_PRI2 +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_GMI_WR_PRI_QUANT_PRI3 +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_ADDRNORM_BASE_ADDR0 +#define MMEA2_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA2_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA2_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA2_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA2_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA2_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_LIMIT_ADDR0 +#define MMEA2_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA2_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA2_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_BASE_ADDR1 +#define MMEA2_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA2_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA2_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA2_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA2_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA2_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_LIMIT_ADDR1 +#define MMEA2_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA2_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA2_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_OFFSET_ADDR1 +#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA2_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA2_ADDRNORM_BASE_ADDR2 +#define MMEA2_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA2_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA2_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA2_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA2_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA2_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_LIMIT_ADDR2 +#define MMEA2_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA2_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA2_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_BASE_ADDR3 +#define MMEA2_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA2_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA2_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA2_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA2_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA2_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_LIMIT_ADDR3 +#define MMEA2_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA2_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA2_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_OFFSET_ADDR3 +#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA2_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA2_ADDRNORM_BASE_ADDR4 +#define MMEA2_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA2_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA2_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA2_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA2_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA2_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_LIMIT_ADDR4 +#define MMEA2_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA2_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA2_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_BASE_ADDR5 +#define MMEA2_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA2_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA2_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA2_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA2_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA2_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_LIMIT_ADDR5 +#define MMEA2_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA2_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA2_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA2_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA2_ADDRNORM_OFFSET_ADDR5 +#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA2_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA2_ADDRNORMDRAM_HOLE_CNTL +#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA2_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA2_ADDRNORMGMI_HOLE_CNTL +#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA2_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA2_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA2_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA2_ADDRDEC_BANK_CFG +#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA2_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA2_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA2_ADDRDEC_MISC_CFG +#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA2_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA2_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA2_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA2_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA2_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA2_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA2_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA2_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECGMI_ADDR_HASH_PC +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA2_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA2_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDECGMI_HARVEST_ENABLE +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA2_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA2_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA2_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA2_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA2_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA2_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA2_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA2_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA2_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA2_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA2_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA2_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA2_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA2_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA2_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA2_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA2_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA2_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA2_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA2_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA2_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA2_ADDRDEC0_RM_SEL_CS01 +#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC0_RM_SEL_CS23 +#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA2_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA2_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA2_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA2_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA2_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA2_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA2_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA2_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA2_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA2_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA2_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA2_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA2_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA2_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA2_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA2_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA2_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA2_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA2_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA2_ADDRDEC1_RM_SEL_CS01 +#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC1_RM_SEL_CS23 +#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA2_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA2_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA2_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA2_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA2_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA2_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA2_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA2_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA2_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA2_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA2_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA2_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA2_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA2_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA2_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA2_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA2_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA2_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA2_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA2_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA2_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA2_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA2_ADDRDEC2_RM_SEL_CS01 +#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC2_RM_SEL_CS23 +#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA2_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA2_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA2_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA2_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA2_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA2_IO_RD_CLI2GRP_MAP0 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA2_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA2_IO_RD_CLI2GRP_MAP1 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA2_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA2_IO_WR_CLI2GRP_MAP0 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA2_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA2_IO_WR_CLI2GRP_MAP1 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA2_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA2_IO_RD_COMBINE_FLUSH +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA2_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA2_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA2_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA2_IO_WR_COMBINE_FLUSH +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA2_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA2_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA2_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA2_IO_GROUP_BURST +#define MMEA2_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA2_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA2_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA2_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA2_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA2_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA2_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA2_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA2_IO_RD_PRI_AGE +#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA2_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA2_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA2_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA2_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA2_IO_WR_PRI_AGE +#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA2_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA2_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA2_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA2_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA2_IO_RD_PRI_QUEUING +#define MMEA2_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA2_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA2_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA2_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA2_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA2_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA2_IO_WR_PRI_QUEUING +#define MMEA2_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA2_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA2_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA2_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA2_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA2_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA2_IO_RD_PRI_FIXED +#define MMEA2_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA2_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA2_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA2_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA2_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA2_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA2_IO_WR_PRI_FIXED +#define MMEA2_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA2_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA2_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA2_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA2_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA2_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA2_IO_RD_PRI_URGENCY +#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA2_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA2_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA2_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA2_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA2_IO_WR_PRI_URGENCY +#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA2_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA2_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA2_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA2_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA2_IO_RD_PRI_URGENCY_MASKING +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA2_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA2_IO_WR_PRI_URGENCY_MASKING +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA2_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA2_IO_RD_PRI_QUANT_PRI1 +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_IO_RD_PRI_QUANT_PRI2 +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_IO_RD_PRI_QUANT_PRI3 +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_IO_WR_PRI_QUANT_PRI1 +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_IO_WR_PRI_QUANT_PRI2 +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_IO_WR_PRI_QUANT_PRI3 +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA2_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA2_SDP_ARB_DRAM +#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA2_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA2_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA2_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA2_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA2_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA2_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA2_SDP_ARB_GMI +#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA2_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA2_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA2_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA2_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA2_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA2_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA2_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA2_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA2_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA2_SDP_ARB_FINAL +#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA2_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA2_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA2_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA2_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA2_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA2_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA2_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA2_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA2_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA2_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA2_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA2_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA2_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA2_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA2_SDP_DRAM_PRIORITY +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA2_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA2_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA2_SDP_GMI_PRIORITY +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA2_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA2_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA2_SDP_IO_PRIORITY +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA2_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA2_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA2_SDP_CREDITS +#define MMEA2_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA2_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA2_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA2_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA2_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA2_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA2_SDP_TAG_RESERVE0 +#define MMEA2_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA2_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA2_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA2_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA2_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA2_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA2_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA2_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA2_SDP_TAG_RESERVE1 +#define MMEA2_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA2_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA2_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA2_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA2_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA2_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA2_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA2_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA2_SDP_VCC_RESERVE0 +#define MMEA2_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA2_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA2_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA2_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA2_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA2_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA2_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA2_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA2_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA2_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA2_SDP_VCC_RESERVE1 +#define MMEA2_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA2_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA2_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA2_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA2_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA2_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA2_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA2_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA2_SDP_VCD_RESERVE0 +#define MMEA2_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA2_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA2_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA2_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA2_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA2_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA2_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA2_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA2_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA2_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA2_SDP_VCD_RESERVE1 +#define MMEA2_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA2_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA2_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA2_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA2_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA2_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA2_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA2_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA2_SDP_REQ_CNTL +#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA2_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA2_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA2_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA2_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA2_MISC +#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA2_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA2_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA2_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA2_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA2_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA2_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA2_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA2_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA2_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA2_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA2_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA2_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA2_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA2_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA2_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA2_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA2_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA2_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA2_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA2_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA2_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA2_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA2_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA2_LATENCY_SAMPLING +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA2_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA2_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA2_PERFCOUNTER_LO +#define MMEA2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA2_PERFCOUNTER_HI +#define MMEA2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA2_PERFCOUNTER0_CFG +#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA2_PERFCOUNTER1_CFG +#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA2_PERFCOUNTER_RSLT_CNTL +#define MMEA2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA2_EDC_CNT +#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA2_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA2_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA2_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA2_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA2_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA2_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA2_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA2_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA2_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA2_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA2_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA2_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA2_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA2_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA2_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA2_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA2_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA2_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA2_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA2_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA2_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA2_EDC_CNT2 +#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA2_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA2_DSM_CNTL +#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA2_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA2_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA2_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA2_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA2_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA2_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA2_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA2_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA2_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA2_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA2_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA2_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA2_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA2_DSM_CNTLA +#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA2_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA2_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA2_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA2_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA2_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA2_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA2_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA2_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA2_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA2_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA2_DSM_CNTL2 +#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA2_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA2_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA2_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA2_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA2_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA2_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA2_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA2_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA2_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA2_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA2_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA2_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA2_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA2_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA2_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA2_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA2_DSM_CNTL2A +#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA2_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA2_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA2_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA2_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA2_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA2_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA2_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA2_CGTT_CLK_CTRL +#define MMEA2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA2_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA2_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA2_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA2_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA2_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA2_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA2_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA2_EDC_MODE +#define MMEA2_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA2_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA2_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA2_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA2_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA2_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA2_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA2_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA2_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA2_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA2_ERR_STATUS +#define MMEA2_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA2_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA2_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA2_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA2_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA2_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA2_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA2_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA2_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA2_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA2_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA2_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA2_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA2_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA2_MISC2 +#define MMEA2_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA2_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA2_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA2_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA2_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA2_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA2_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA2_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA2_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA2_ADDRDEC_SELECT +#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA2_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA2_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA2_EDC_CNT3 +#define MMEA2_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA2_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA2_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA2_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA2_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA2_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA2_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA2_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA2_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA2_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA2_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA2_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA2_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA2_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_ea_mmeadec3 +//MMEA3_DRAM_RD_CLI2GRP_MAP0 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA3_DRAM_RD_CLI2GRP_MAP1 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA3_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA3_DRAM_WR_CLI2GRP_MAP0 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA3_DRAM_WR_CLI2GRP_MAP1 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA3_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA3_DRAM_RD_GRP2VC_MAP +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA3_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA3_DRAM_WR_GRP2VC_MAP +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA3_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA3_DRAM_RD_LAZY +#define MMEA3_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA3_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA3_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA3_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA3_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA3_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA3_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA3_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA3_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA3_DRAM_WR_LAZY +#define MMEA3_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA3_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA3_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA3_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA3_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA3_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA3_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA3_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA3_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA3_DRAM_RD_CAM_CNTL +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA3_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA3_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA3_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA3_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA3_DRAM_WR_CAM_CNTL +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA3_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA3_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA3_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA3_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA3_DRAM_PAGE_BURST +#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA3_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA3_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA3_DRAM_RD_PRI_AGE +#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA3_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA3_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA3_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA3_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA3_DRAM_WR_PRI_AGE +#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA3_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA3_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA3_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA3_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA3_DRAM_RD_PRI_QUEUING +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA3_DRAM_WR_PRI_QUEUING +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA3_DRAM_RD_PRI_FIXED +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA3_DRAM_WR_PRI_FIXED +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA3_DRAM_RD_PRI_URGENCY +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA3_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA3_DRAM_WR_PRI_URGENCY +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA3_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA3_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_GMI_RD_CLI2GRP_MAP0 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA3_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA3_GMI_RD_CLI2GRP_MAP1 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA3_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA3_GMI_WR_CLI2GRP_MAP0 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA3_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA3_GMI_WR_CLI2GRP_MAP1 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA3_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA3_GMI_RD_GRP2VC_MAP +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA3_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA3_GMI_WR_GRP2VC_MAP +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA3_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA3_GMI_RD_LAZY +#define MMEA3_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA3_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA3_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA3_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA3_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA3_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA3_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA3_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA3_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA3_GMI_WR_LAZY +#define MMEA3_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA3_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA3_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA3_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA3_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA3_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA3_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA3_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA3_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA3_GMI_RD_CAM_CNTL +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA3_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA3_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA3_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA3_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA3_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA3_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA3_GMI_WR_CAM_CNTL +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA3_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA3_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA3_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA3_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA3_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA3_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA3_GMI_PAGE_BURST +#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA3_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA3_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA3_GMI_RD_PRI_AGE +#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA3_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA3_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA3_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA3_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA3_GMI_WR_PRI_AGE +#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA3_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA3_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA3_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA3_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA3_GMI_RD_PRI_QUEUING +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA3_GMI_WR_PRI_QUEUING +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA3_GMI_RD_PRI_FIXED +#define MMEA3_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA3_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA3_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA3_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA3_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA3_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA3_GMI_WR_PRI_FIXED +#define MMEA3_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA3_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA3_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA3_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA3_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA3_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA3_GMI_RD_PRI_URGENCY +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA3_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA3_GMI_WR_PRI_URGENCY +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA3_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA3_GMI_RD_PRI_URGENCY_MASKING +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA3_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA3_GMI_WR_PRI_URGENCY_MASKING +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA3_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA3_GMI_RD_PRI_QUANT_PRI1 +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_GMI_RD_PRI_QUANT_PRI2 +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_GMI_RD_PRI_QUANT_PRI3 +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_GMI_WR_PRI_QUANT_PRI1 +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_GMI_WR_PRI_QUANT_PRI2 +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_GMI_WR_PRI_QUANT_PRI3 +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_ADDRNORM_BASE_ADDR0 +#define MMEA3_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA3_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA3_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA3_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA3_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA3_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_LIMIT_ADDR0 +#define MMEA3_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA3_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA3_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_BASE_ADDR1 +#define MMEA3_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA3_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA3_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA3_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA3_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA3_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_LIMIT_ADDR1 +#define MMEA3_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA3_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA3_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_OFFSET_ADDR1 +#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA3_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA3_ADDRNORM_BASE_ADDR2 +#define MMEA3_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA3_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA3_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA3_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA3_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA3_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_LIMIT_ADDR2 +#define MMEA3_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA3_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA3_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_BASE_ADDR3 +#define MMEA3_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA3_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA3_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA3_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA3_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA3_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_LIMIT_ADDR3 +#define MMEA3_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA3_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA3_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_OFFSET_ADDR3 +#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA3_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA3_ADDRNORM_BASE_ADDR4 +#define MMEA3_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA3_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA3_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA3_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA3_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA3_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_LIMIT_ADDR4 +#define MMEA3_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA3_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA3_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_BASE_ADDR5 +#define MMEA3_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA3_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA3_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA3_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA3_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA3_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_LIMIT_ADDR5 +#define MMEA3_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA3_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA3_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA3_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA3_ADDRNORM_OFFSET_ADDR5 +#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA3_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA3_ADDRNORMDRAM_HOLE_CNTL +#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA3_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA3_ADDRNORMGMI_HOLE_CNTL +#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA3_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA3_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA3_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA3_ADDRDEC_BANK_CFG +#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA3_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA3_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA3_ADDRDEC_MISC_CFG +#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA3_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA3_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA3_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA3_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA3_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA3_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA3_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA3_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECGMI_ADDR_HASH_PC +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA3_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA3_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDECGMI_HARVEST_ENABLE +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA3_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA3_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA3_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA3_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA3_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA3_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA3_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA3_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA3_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA3_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA3_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA3_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA3_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA3_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA3_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA3_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA3_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA3_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA3_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA3_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA3_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA3_ADDRDEC0_RM_SEL_CS01 +#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC0_RM_SEL_CS23 +#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA3_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA3_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA3_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA3_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA3_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA3_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA3_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA3_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA3_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA3_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA3_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA3_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA3_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA3_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA3_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA3_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA3_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA3_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA3_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA3_ADDRDEC1_RM_SEL_CS01 +#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC1_RM_SEL_CS23 +#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA3_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA3_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA3_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA3_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA3_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA3_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA3_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA3_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA3_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA3_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA3_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA3_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA3_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA3_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA3_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA3_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA3_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA3_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA3_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA3_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA3_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA3_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA3_ADDRDEC2_RM_SEL_CS01 +#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC2_RM_SEL_CS23 +#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA3_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA3_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA3_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA3_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA3_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA3_IO_RD_CLI2GRP_MAP0 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA3_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA3_IO_RD_CLI2GRP_MAP1 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA3_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA3_IO_WR_CLI2GRP_MAP0 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA3_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA3_IO_WR_CLI2GRP_MAP1 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA3_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA3_IO_RD_COMBINE_FLUSH +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA3_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA3_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA3_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA3_IO_WR_COMBINE_FLUSH +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA3_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA3_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA3_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA3_IO_GROUP_BURST +#define MMEA3_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA3_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA3_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA3_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA3_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA3_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA3_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA3_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA3_IO_RD_PRI_AGE +#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA3_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA3_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA3_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA3_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA3_IO_WR_PRI_AGE +#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA3_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA3_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA3_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA3_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA3_IO_RD_PRI_QUEUING +#define MMEA3_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA3_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA3_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA3_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA3_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA3_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA3_IO_WR_PRI_QUEUING +#define MMEA3_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA3_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA3_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA3_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA3_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA3_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA3_IO_RD_PRI_FIXED +#define MMEA3_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA3_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA3_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA3_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA3_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA3_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA3_IO_WR_PRI_FIXED +#define MMEA3_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA3_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA3_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA3_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA3_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA3_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA3_IO_RD_PRI_URGENCY +#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA3_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA3_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA3_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA3_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA3_IO_WR_PRI_URGENCY +#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA3_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA3_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA3_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA3_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA3_IO_RD_PRI_URGENCY_MASKING +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA3_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA3_IO_WR_PRI_URGENCY_MASKING +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA3_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA3_IO_RD_PRI_QUANT_PRI1 +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_IO_RD_PRI_QUANT_PRI2 +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_IO_RD_PRI_QUANT_PRI3 +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_IO_WR_PRI_QUANT_PRI1 +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_IO_WR_PRI_QUANT_PRI2 +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_IO_WR_PRI_QUANT_PRI3 +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA3_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA3_SDP_ARB_DRAM +#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA3_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA3_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA3_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA3_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA3_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA3_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA3_SDP_ARB_GMI +#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA3_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA3_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA3_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA3_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA3_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA3_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA3_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA3_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA3_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA3_SDP_ARB_FINAL +#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA3_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA3_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA3_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA3_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA3_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA3_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA3_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA3_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA3_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA3_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA3_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA3_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA3_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA3_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA3_SDP_DRAM_PRIORITY +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA3_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA3_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA3_SDP_GMI_PRIORITY +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA3_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA3_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA3_SDP_IO_PRIORITY +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA3_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA3_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA3_SDP_CREDITS +#define MMEA3_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA3_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA3_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA3_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA3_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA3_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA3_SDP_TAG_RESERVE0 +#define MMEA3_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA3_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA3_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA3_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA3_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA3_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA3_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA3_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA3_SDP_TAG_RESERVE1 +#define MMEA3_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA3_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA3_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA3_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA3_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA3_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA3_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA3_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA3_SDP_VCC_RESERVE0 +#define MMEA3_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA3_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA3_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA3_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA3_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA3_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA3_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA3_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA3_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA3_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA3_SDP_VCC_RESERVE1 +#define MMEA3_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA3_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA3_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA3_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA3_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA3_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA3_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA3_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA3_SDP_VCD_RESERVE0 +#define MMEA3_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA3_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA3_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA3_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA3_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA3_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA3_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA3_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA3_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA3_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA3_SDP_VCD_RESERVE1 +#define MMEA3_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA3_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA3_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA3_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA3_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA3_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA3_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA3_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA3_SDP_REQ_CNTL +#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA3_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA3_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA3_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA3_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA3_MISC +#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA3_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA3_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA3_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA3_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA3_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA3_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA3_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA3_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA3_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA3_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA3_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA3_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA3_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA3_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA3_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA3_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA3_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA3_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA3_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA3_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA3_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA3_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA3_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA3_LATENCY_SAMPLING +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA3_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA3_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA3_PERFCOUNTER_LO +#define MMEA3_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA3_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA3_PERFCOUNTER_HI +#define MMEA3_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA3_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA3_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA3_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA3_PERFCOUNTER0_CFG +#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA3_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA3_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA3_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA3_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA3_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA3_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA3_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA3_PERFCOUNTER1_CFG +#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA3_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA3_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA3_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA3_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA3_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA3_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA3_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA3_PERFCOUNTER_RSLT_CNTL +#define MMEA3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA3_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA3_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA3_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA3_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA3_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA3_EDC_CNT +#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA3_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA3_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA3_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA3_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA3_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA3_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA3_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA3_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA3_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA3_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA3_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA3_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA3_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA3_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA3_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA3_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA3_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA3_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA3_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA3_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA3_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA3_EDC_CNT2 +#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA3_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA3_DSM_CNTL +#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA3_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA3_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA3_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA3_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA3_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA3_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA3_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA3_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA3_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA3_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA3_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA3_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA3_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA3_DSM_CNTLA +#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA3_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA3_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA3_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA3_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA3_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA3_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA3_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA3_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA3_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA3_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA3_DSM_CNTL2 +#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA3_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA3_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA3_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA3_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA3_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA3_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA3_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA3_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA3_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA3_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA3_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA3_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA3_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA3_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA3_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA3_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA3_DSM_CNTL2A +#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA3_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA3_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA3_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA3_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA3_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA3_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA3_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA3_CGTT_CLK_CTRL +#define MMEA3_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA3_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA3_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA3_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA3_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA3_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA3_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA3_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA3_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA3_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA3_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA3_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA3_EDC_MODE +#define MMEA3_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA3_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA3_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA3_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA3_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA3_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA3_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA3_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA3_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA3_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA3_ERR_STATUS +#define MMEA3_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA3_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA3_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA3_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA3_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA3_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA3_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA3_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA3_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA3_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA3_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA3_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA3_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA3_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA3_MISC2 +#define MMEA3_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA3_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA3_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA3_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA3_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA3_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA3_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA3_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA3_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA3_ADDRDEC_SELECT +#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA3_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA3_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA3_EDC_CNT3 +#define MMEA3_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA3_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA3_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA3_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA3_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA3_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA3_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA3_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA3_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA3_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA3_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA3_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA3_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA3_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_ea_mmeadec4 +//MMEA4_DRAM_RD_CLI2GRP_MAP0 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA4_DRAM_RD_CLI2GRP_MAP1 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA4_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA4_DRAM_WR_CLI2GRP_MAP0 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA4_DRAM_WR_CLI2GRP_MAP1 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA4_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA4_DRAM_RD_GRP2VC_MAP +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA4_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA4_DRAM_WR_GRP2VC_MAP +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA4_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA4_DRAM_RD_LAZY +#define MMEA4_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA4_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA4_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA4_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA4_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA4_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA4_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA4_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA4_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA4_DRAM_WR_LAZY +#define MMEA4_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA4_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA4_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA4_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA4_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA4_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA4_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA4_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA4_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA4_DRAM_RD_CAM_CNTL +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA4_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA4_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA4_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA4_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA4_DRAM_WR_CAM_CNTL +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA4_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA4_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA4_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA4_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA4_DRAM_PAGE_BURST +#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA4_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA4_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA4_DRAM_RD_PRI_AGE +#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA4_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA4_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA4_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA4_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA4_DRAM_WR_PRI_AGE +#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA4_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA4_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA4_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA4_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA4_DRAM_RD_PRI_QUEUING +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA4_DRAM_WR_PRI_QUEUING +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA4_DRAM_RD_PRI_FIXED +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA4_DRAM_WR_PRI_FIXED +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA4_DRAM_RD_PRI_URGENCY +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA4_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA4_DRAM_WR_PRI_URGENCY +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA4_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA4_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_GMI_RD_CLI2GRP_MAP0 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA4_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA4_GMI_RD_CLI2GRP_MAP1 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA4_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA4_GMI_WR_CLI2GRP_MAP0 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA4_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA4_GMI_WR_CLI2GRP_MAP1 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA4_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA4_GMI_RD_GRP2VC_MAP +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA4_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA4_GMI_WR_GRP2VC_MAP +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA4_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA4_GMI_RD_LAZY +#define MMEA4_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA4_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA4_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA4_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA4_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA4_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA4_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA4_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA4_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA4_GMI_WR_LAZY +#define MMEA4_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA4_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA4_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA4_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA4_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA4_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA4_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA4_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA4_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA4_GMI_RD_CAM_CNTL +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA4_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA4_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA4_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA4_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA4_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA4_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA4_GMI_WR_CAM_CNTL +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA4_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA4_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA4_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA4_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA4_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA4_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA4_GMI_PAGE_BURST +#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA4_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA4_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA4_GMI_RD_PRI_AGE +#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA4_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA4_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA4_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA4_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA4_GMI_WR_PRI_AGE +#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA4_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA4_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA4_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA4_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA4_GMI_RD_PRI_QUEUING +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA4_GMI_WR_PRI_QUEUING +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA4_GMI_RD_PRI_FIXED +#define MMEA4_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA4_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA4_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA4_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA4_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA4_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA4_GMI_WR_PRI_FIXED +#define MMEA4_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA4_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA4_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA4_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA4_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA4_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA4_GMI_RD_PRI_URGENCY +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA4_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA4_GMI_WR_PRI_URGENCY +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA4_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA4_GMI_RD_PRI_URGENCY_MASKING +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA4_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA4_GMI_WR_PRI_URGENCY_MASKING +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA4_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA4_GMI_RD_PRI_QUANT_PRI1 +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_GMI_RD_PRI_QUANT_PRI2 +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_GMI_RD_PRI_QUANT_PRI3 +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_GMI_WR_PRI_QUANT_PRI1 +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_GMI_WR_PRI_QUANT_PRI2 +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_GMI_WR_PRI_QUANT_PRI3 +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_ADDRNORM_BASE_ADDR0 +#define MMEA4_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA4_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA4_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA4_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA4_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA4_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_LIMIT_ADDR0 +#define MMEA4_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA4_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA4_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_BASE_ADDR1 +#define MMEA4_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA4_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA4_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA4_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA4_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA4_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_LIMIT_ADDR1 +#define MMEA4_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA4_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA4_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_OFFSET_ADDR1 +#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA4_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA4_ADDRNORM_BASE_ADDR2 +#define MMEA4_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA4_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA4_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA4_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA4_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA4_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_LIMIT_ADDR2 +#define MMEA4_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA4_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA4_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_BASE_ADDR3 +#define MMEA4_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA4_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA4_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA4_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA4_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA4_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_LIMIT_ADDR3 +#define MMEA4_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA4_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA4_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_OFFSET_ADDR3 +#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA4_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA4_ADDRNORM_BASE_ADDR4 +#define MMEA4_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA4_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA4_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA4_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA4_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA4_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_LIMIT_ADDR4 +#define MMEA4_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA4_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA4_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_BASE_ADDR5 +#define MMEA4_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA4_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA4_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA4_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA4_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA4_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_LIMIT_ADDR5 +#define MMEA4_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA4_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA4_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA4_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA4_ADDRNORM_OFFSET_ADDR5 +#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA4_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA4_ADDRNORMDRAM_HOLE_CNTL +#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA4_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA4_ADDRNORMGMI_HOLE_CNTL +#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA4_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA4_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA4_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA4_ADDRDEC_BANK_CFG +#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA4_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA4_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA4_ADDRDEC_MISC_CFG +#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA4_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA4_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA4_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA4_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA4_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA4_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA4_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA4_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECGMI_ADDR_HASH_PC +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA4_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA4_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDECGMI_HARVEST_ENABLE +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA4_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA4_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA4_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA4_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA4_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA4_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA4_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA4_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA4_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA4_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA4_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA4_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA4_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA4_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA4_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA4_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA4_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA4_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA4_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA4_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA4_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA4_ADDRDEC0_RM_SEL_CS01 +#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC0_RM_SEL_CS23 +#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA4_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA4_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA4_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA4_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA4_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA4_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA4_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA4_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA4_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA4_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA4_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA4_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA4_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA4_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA4_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA4_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA4_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA4_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA4_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA4_ADDRDEC1_RM_SEL_CS01 +#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC1_RM_SEL_CS23 +#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA4_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA4_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA4_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA4_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA4_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA4_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA4_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA4_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA4_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA4_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA4_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA4_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA4_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA4_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA4_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA4_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA4_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA4_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA4_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA4_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA4_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA4_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA4_ADDRDEC2_RM_SEL_CS01 +#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC2_RM_SEL_CS23 +#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA4_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA4_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA4_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA4_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA4_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA4_IO_RD_CLI2GRP_MAP0 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA4_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA4_IO_RD_CLI2GRP_MAP1 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA4_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA4_IO_WR_CLI2GRP_MAP0 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA4_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA4_IO_WR_CLI2GRP_MAP1 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA4_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA4_IO_RD_COMBINE_FLUSH +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA4_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA4_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA4_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA4_IO_WR_COMBINE_FLUSH +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA4_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA4_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA4_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA4_IO_GROUP_BURST +#define MMEA4_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA4_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA4_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA4_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA4_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA4_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA4_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA4_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA4_IO_RD_PRI_AGE +#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA4_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA4_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA4_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA4_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA4_IO_WR_PRI_AGE +#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA4_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA4_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA4_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA4_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA4_IO_RD_PRI_QUEUING +#define MMEA4_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA4_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA4_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA4_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA4_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA4_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA4_IO_WR_PRI_QUEUING +#define MMEA4_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA4_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA4_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA4_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA4_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA4_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA4_IO_RD_PRI_FIXED +#define MMEA4_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA4_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA4_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA4_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA4_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA4_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA4_IO_WR_PRI_FIXED +#define MMEA4_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA4_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA4_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA4_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA4_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA4_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA4_IO_RD_PRI_URGENCY +#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA4_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA4_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA4_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA4_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA4_IO_WR_PRI_URGENCY +#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA4_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA4_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA4_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA4_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA4_IO_RD_PRI_URGENCY_MASKING +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA4_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA4_IO_WR_PRI_URGENCY_MASKING +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA4_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA4_IO_RD_PRI_QUANT_PRI1 +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_IO_RD_PRI_QUANT_PRI2 +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_IO_RD_PRI_QUANT_PRI3 +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_IO_WR_PRI_QUANT_PRI1 +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_IO_WR_PRI_QUANT_PRI2 +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_IO_WR_PRI_QUANT_PRI3 +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA4_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA4_SDP_ARB_DRAM +#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA4_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA4_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA4_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA4_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA4_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA4_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA4_SDP_ARB_GMI +#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA4_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA4_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA4_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA4_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA4_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA4_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA4_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA4_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA4_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA4_SDP_ARB_FINAL +#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA4_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA4_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA4_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA4_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA4_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA4_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA4_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA4_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA4_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA4_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA4_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA4_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA4_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA4_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA4_SDP_DRAM_PRIORITY +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA4_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA4_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA4_SDP_GMI_PRIORITY +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA4_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA4_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA4_SDP_IO_PRIORITY +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA4_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA4_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA4_SDP_CREDITS +#define MMEA4_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA4_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA4_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA4_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA4_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA4_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA4_SDP_TAG_RESERVE0 +#define MMEA4_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA4_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA4_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA4_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA4_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA4_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA4_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA4_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA4_SDP_TAG_RESERVE1 +#define MMEA4_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA4_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA4_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA4_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA4_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA4_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA4_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA4_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA4_SDP_VCC_RESERVE0 +#define MMEA4_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA4_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA4_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA4_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA4_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA4_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA4_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA4_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA4_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA4_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA4_SDP_VCC_RESERVE1 +#define MMEA4_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA4_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA4_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA4_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA4_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA4_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA4_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA4_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA4_SDP_VCD_RESERVE0 +#define MMEA4_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA4_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA4_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA4_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA4_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA4_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA4_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA4_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA4_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA4_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA4_SDP_VCD_RESERVE1 +#define MMEA4_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA4_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA4_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA4_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA4_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA4_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA4_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA4_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA4_SDP_REQ_CNTL +#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA4_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA4_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA4_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA4_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA4_MISC +#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA4_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA4_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA4_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA4_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA4_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA4_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA4_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA4_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA4_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA4_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA4_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA4_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA4_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA4_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA4_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA4_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA4_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA4_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA4_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA4_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA4_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA4_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA4_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA4_LATENCY_SAMPLING +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA4_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA4_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA4_PERFCOUNTER_LO +#define MMEA4_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA4_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA4_PERFCOUNTER_HI +#define MMEA4_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA4_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA4_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA4_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA4_PERFCOUNTER0_CFG +#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA4_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA4_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA4_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA4_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA4_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA4_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA4_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA4_PERFCOUNTER1_CFG +#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA4_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA4_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA4_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA4_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA4_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA4_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA4_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA4_PERFCOUNTER_RSLT_CNTL +#define MMEA4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA4_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA4_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA4_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA4_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA4_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA4_EDC_CNT +#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA4_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA4_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA4_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA4_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA4_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA4_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA4_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA4_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA4_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA4_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA4_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA4_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA4_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA4_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA4_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA4_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA4_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA4_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA4_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA4_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA4_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA4_EDC_CNT2 +#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA4_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA4_DSM_CNTL +#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA4_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA4_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA4_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA4_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA4_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA4_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA4_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA4_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA4_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA4_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA4_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA4_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA4_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA4_DSM_CNTLA +#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA4_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA4_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA4_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA4_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA4_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA4_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA4_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA4_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA4_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA4_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA4_DSM_CNTL2 +#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA4_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA4_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA4_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA4_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA4_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA4_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA4_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA4_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA4_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA4_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA4_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA4_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA4_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA4_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA4_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA4_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA4_DSM_CNTL2A +#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA4_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA4_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA4_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA4_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA4_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA4_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA4_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA4_CGTT_CLK_CTRL +#define MMEA4_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA4_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA4_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA4_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA4_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA4_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA4_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA4_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA4_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA4_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA4_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA4_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA4_EDC_MODE +#define MMEA4_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA4_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA4_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA4_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA4_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA4_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA4_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA4_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA4_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA4_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA4_ERR_STATUS +#define MMEA4_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA4_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA4_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA4_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA4_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA4_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA4_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA4_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA4_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA4_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA4_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA4_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA4_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA4_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA4_MISC2 +#define MMEA4_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA4_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA4_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA4_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA4_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA4_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA4_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA4_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA4_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA4_ADDRDEC_SELECT +#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA4_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA4_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA4_EDC_CNT3 +#define MMEA4_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA4_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA4_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA4_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA4_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA4_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA4_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA4_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA4_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA4_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA4_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA4_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA4_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA4_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_pctldec0 +//PCTL0_CTRL +#define PCTL0_CTRL__PG_ENABLE__SHIFT 0x0 +#define PCTL0_CTRL__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x1 +#define PCTL0_CTRL__STCTRL_RSMU_IDLE_THRESHOLD__SHIFT 0x4 +#define PCTL0_CTRL__STCTRL_DAGB_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL0_CTRL__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0x10 +#define PCTL0_CTRL__OVR_EA0_SDP_PARTACK__SHIFT 0x11 +#define PCTL0_CTRL__OVR_EA1_SDP_PARTACK__SHIFT 0x12 +#define PCTL0_CTRL__OVR_EA2_SDP_PARTACK__SHIFT 0x13 +#define PCTL0_CTRL__OVR_EA3_SDP_PARTACK__SHIFT 0x14 +#define PCTL0_CTRL__OVR_EA4_SDP_PARTACK__SHIFT 0x15 +#define PCTL0_CTRL__OVR_EA0_SDP_FULLACK__SHIFT 0x16 +#define PCTL0_CTRL__OVR_EA1_SDP_FULLACK__SHIFT 0x17 +#define PCTL0_CTRL__OVR_EA2_SDP_FULLACK__SHIFT 0x18 +#define PCTL0_CTRL__OVR_EA3_SDP_FULLACK__SHIFT 0x19 +#define PCTL0_CTRL__OVR_EA4_SDP_FULLACK__SHIFT 0x1a +#define PCTL0_CTRL__PGFSM_CMD_STATUS__SHIFT 0x1b +#define PCTL0_CTRL__PG_ENABLE_MASK 0x00000001L +#define PCTL0_CTRL__ALLOW_DEEP_SLEEP_MODE_MASK 0x0000000EL +#define PCTL0_CTRL__STCTRL_RSMU_IDLE_THRESHOLD_MASK 0x000007F0L +#define PCTL0_CTRL__STCTRL_DAGB_IDLE_THRESHOLD_MASK 0x0000F800L +#define PCTL0_CTRL__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x00010000L +#define PCTL0_CTRL__OVR_EA0_SDP_PARTACK_MASK 0x00020000L +#define PCTL0_CTRL__OVR_EA1_SDP_PARTACK_MASK 0x00040000L +#define PCTL0_CTRL__OVR_EA2_SDP_PARTACK_MASK 0x00080000L +#define PCTL0_CTRL__OVR_EA3_SDP_PARTACK_MASK 0x00100000L +#define PCTL0_CTRL__OVR_EA4_SDP_PARTACK_MASK 0x00200000L +#define PCTL0_CTRL__OVR_EA0_SDP_FULLACK_MASK 0x00400000L +#define PCTL0_CTRL__OVR_EA1_SDP_FULLACK_MASK 0x00800000L +#define PCTL0_CTRL__OVR_EA2_SDP_FULLACK_MASK 0x01000000L +#define PCTL0_CTRL__OVR_EA3_SDP_FULLACK_MASK 0x02000000L +#define PCTL0_CTRL__OVR_EA4_SDP_FULLACK_MASK 0x04000000L +#define PCTL0_CTRL__PGFSM_CMD_STATUS_MASK 0x18000000L +//PCTL0_MMHUB_DEEPSLEEP_IB +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS0__SHIFT 0x0 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS1__SHIFT 0x1 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS2__SHIFT 0x2 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS3__SHIFT 0x3 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS4__SHIFT 0x4 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS5__SHIFT 0x5 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS6__SHIFT 0x6 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS7__SHIFT 0x7 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS8__SHIFT 0x8 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS9__SHIFT 0x9 +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS10__SHIFT 0xa +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS11__SHIFT 0xb +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS12__SHIFT 0xc +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS13__SHIFT 0xd +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS14__SHIFT 0xe +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS15__SHIFT 0xf +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS16__SHIFT 0x10 +#define PCTL0_MMHUB_DEEPSLEEP_IB__SETCLEAR__SHIFT 0x1f +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS0_MASK 0x00000001L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS1_MASK 0x00000002L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS2_MASK 0x00000004L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS3_MASK 0x00000008L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS4_MASK 0x00000010L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS5_MASK 0x00000020L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS6_MASK 0x00000040L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS7_MASK 0x00000080L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS8_MASK 0x00000100L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS9_MASK 0x00000200L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS10_MASK 0x00000400L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS11_MASK 0x00000800L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS12_MASK 0x00001000L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS13_MASK 0x00002000L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS14_MASK 0x00004000L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS15_MASK 0x00008000L +#define PCTL0_MMHUB_DEEPSLEEP_IB__DS16_MASK 0x00010000L +#define PCTL0_MMHUB_DEEPSLEEP_IB__SETCLEAR_MASK 0x80000000L +//PCTL0_MMHUB_DEEPSLEEP_OVERRIDE +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS0__SHIFT 0x0 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS1__SHIFT 0x1 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS2__SHIFT 0x2 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS3__SHIFT 0x3 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS4__SHIFT 0x4 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS5__SHIFT 0x5 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS6__SHIFT 0x6 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS7__SHIFT 0x7 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS8__SHIFT 0x8 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS9__SHIFT 0x9 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS10__SHIFT 0xa +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS11__SHIFT 0xb +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS12__SHIFT 0xc +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS13__SHIFT 0xd +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS14__SHIFT 0xe +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS15__SHIFT 0xf +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS16__SHIFT 0x10 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB__SHIFT 0x11 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS0_MASK 0x00000001L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS1_MASK 0x00000002L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS2_MASK 0x00000004L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS3_MASK 0x00000008L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS4_MASK 0x00000010L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS5_MASK 0x00000020L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS6_MASK 0x00000040L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS7_MASK 0x00000080L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS8_MASK 0x00000100L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS9_MASK 0x00000200L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS10_MASK 0x00000400L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS11_MASK 0x00000800L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS12_MASK 0x00001000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS13_MASK 0x00002000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS14_MASK 0x00004000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS15_MASK 0x00008000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS16_MASK 0x00010000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB_MASK 0x00020000L +//PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0__SHIFT 0x0 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1__SHIFT 0x1 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2__SHIFT 0x2 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3__SHIFT 0x3 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4__SHIFT 0x4 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5__SHIFT 0x5 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6__SHIFT 0x6 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7__SHIFT 0x7 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8__SHIFT 0x8 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9__SHIFT 0x9 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10__SHIFT 0xa +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11__SHIFT 0xb +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12__SHIFT 0xc +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13__SHIFT 0xd +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14__SHIFT 0xe +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15__SHIFT 0xf +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16__SHIFT 0x10 +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0_MASK 0x00000001L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1_MASK 0x00000002L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2_MASK 0x00000004L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3_MASK 0x00000008L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4_MASK 0x00000010L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5_MASK 0x00000020L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6_MASK 0x00000040L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7_MASK 0x00000080L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8_MASK 0x00000100L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9_MASK 0x00000200L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10_MASK 0x00000400L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11_MASK 0x00000800L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12_MASK 0x00001000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13_MASK 0x00002000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14_MASK 0x00004000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15_MASK 0x00008000L +#define PCTL0_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16_MASK 0x00010000L +//PCTL0_PG_IGNORE_DEEPSLEEP +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS0__SHIFT 0x0 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS1__SHIFT 0x1 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS2__SHIFT 0x2 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS3__SHIFT 0x3 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS4__SHIFT 0x4 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS5__SHIFT 0x5 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS6__SHIFT 0x6 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS7__SHIFT 0x7 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS8__SHIFT 0x8 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS9__SHIFT 0x9 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS10__SHIFT 0xa +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS11__SHIFT 0xb +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS12__SHIFT 0xc +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS13__SHIFT 0xd +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS14__SHIFT 0xe +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS15__SHIFT 0xf +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS16__SHIFT 0x10 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS_ATHUB__SHIFT 0x11 +#define PCTL0_PG_IGNORE_DEEPSLEEP__ALLIPS__SHIFT 0x12 +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS0_MASK 0x00000001L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS1_MASK 0x00000002L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS2_MASK 0x00000004L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS3_MASK 0x00000008L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS4_MASK 0x00000010L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS5_MASK 0x00000020L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS6_MASK 0x00000040L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS7_MASK 0x00000080L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS8_MASK 0x00000100L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS9_MASK 0x00000200L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS10_MASK 0x00000400L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS11_MASK 0x00000800L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS12_MASK 0x00001000L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS13_MASK 0x00002000L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS14_MASK 0x00004000L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS15_MASK 0x00008000L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS16_MASK 0x00010000L +#define PCTL0_PG_IGNORE_DEEPSLEEP__DS_ATHUB_MASK 0x00020000L +#define PCTL0_PG_IGNORE_DEEPSLEEP__ALLIPS_MASK 0x00040000L +//PCTL0_PG_IGNORE_DEEPSLEEP_IB +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS0__SHIFT 0x0 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS1__SHIFT 0x1 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS2__SHIFT 0x2 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS3__SHIFT 0x3 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS4__SHIFT 0x4 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS5__SHIFT 0x5 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS6__SHIFT 0x6 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS7__SHIFT 0x7 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS8__SHIFT 0x8 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS9__SHIFT 0x9 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS10__SHIFT 0xa +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS11__SHIFT 0xb +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS12__SHIFT 0xc +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS13__SHIFT 0xd +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS14__SHIFT 0xe +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS15__SHIFT 0xf +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS16__SHIFT 0x10 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__ALLIPS__SHIFT 0x11 +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS0_MASK 0x00000001L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS1_MASK 0x00000002L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS2_MASK 0x00000004L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS3_MASK 0x00000008L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS4_MASK 0x00000010L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS5_MASK 0x00000020L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS6_MASK 0x00000040L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS7_MASK 0x00000080L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS8_MASK 0x00000100L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS9_MASK 0x00000200L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS10_MASK 0x00000400L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS11_MASK 0x00000800L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS12_MASK 0x00001000L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS13_MASK 0x00002000L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS14_MASK 0x00004000L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS15_MASK 0x00008000L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__DS16_MASK 0x00010000L +#define PCTL0_PG_IGNORE_DEEPSLEEP_IB__ALLIPS_MASK 0x00020000L +//PCTL0_SLICE0_CFG_DAGB_BUSY +#define PCTL0_SLICE0_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL0_SLICE0_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL0_SLICE0_CFG_DS_ALLOW +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL0_SLICE0_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL0_SLICE0_CFG_DS_ALLOW_IB +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL0_SLICE0_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL0_SLICE1_CFG_DAGB_BUSY +#define PCTL0_SLICE1_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL0_SLICE1_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL0_SLICE1_CFG_DS_ALLOW +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL0_SLICE1_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL0_SLICE1_CFG_DS_ALLOW_IB +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL0_SLICE1_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL0_SLICE2_CFG_DAGB_BUSY +#define PCTL0_SLICE2_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL0_SLICE2_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL0_SLICE2_CFG_DS_ALLOW +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL0_SLICE2_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL0_SLICE2_CFG_DS_ALLOW_IB +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL0_SLICE2_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL0_SLICE3_CFG_DAGB_BUSY +#define PCTL0_SLICE3_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL0_SLICE3_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL0_SLICE3_CFG_DS_ALLOW +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL0_SLICE3_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL0_SLICE3_CFG_DS_ALLOW_IB +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL0_SLICE3_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL0_SLICE4_CFG_DAGB_BUSY +#define PCTL0_SLICE4_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL0_SLICE4_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL0_SLICE4_CFG_DS_ALLOW +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL0_SLICE4_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL0_SLICE4_CFG_DS_ALLOW_IB +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL0_SLICE4_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL0_UTCL2_MISC +#define PCTL0_UTCL2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xb +#define PCTL0_UTCL2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xc +#define PCTL0_UTCL2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xf +#define PCTL0_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x10 +#define PCTL0_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL0_UTCL2_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL0_UTCL2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000800L +#define PCTL0_UTCL2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00007000L +#define PCTL0_UTCL2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00008000L +#define PCTL0_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00010000L +#define PCTL0_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL0_UTCL2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL0_SLICE0_MISC +#define PCTL0_SLICE0_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL0_SLICE0_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL0_SLICE0_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL0_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL0_SLICE0_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL0_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL0_SLICE0_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL0_SLICE0_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL0_SLICE0_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL0_SLICE0_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL0_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL0_SLICE0_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL0_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL0_SLICE0_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL0_SLICE1_MISC +#define PCTL0_SLICE1_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL0_SLICE1_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL0_SLICE1_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL0_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL0_SLICE1_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL0_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL0_SLICE1_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL0_SLICE1_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL0_SLICE1_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL0_SLICE1_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL0_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL0_SLICE1_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL0_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL0_SLICE1_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL0_SLICE2_MISC +#define PCTL0_SLICE2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL0_SLICE2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL0_SLICE2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL0_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL0_SLICE2_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL0_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL0_SLICE2_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL0_SLICE2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL0_SLICE2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL0_SLICE2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL0_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL0_SLICE2_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL0_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL0_SLICE2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL0_SLICE3_MISC +#define PCTL0_SLICE3_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL0_SLICE3_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL0_SLICE3_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL0_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL0_SLICE3_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL0_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL0_SLICE3_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL0_SLICE3_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL0_SLICE3_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL0_SLICE3_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL0_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL0_SLICE3_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL0_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL0_SLICE3_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL0_SLICE4_MISC +#define PCTL0_SLICE4_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL0_SLICE4_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL0_SLICE4_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL0_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL0_SLICE4_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL0_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL0_SLICE4_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL0_SLICE4_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL0_SLICE4_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL0_SLICE4_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL0_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL0_SLICE4_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL0_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL0_SLICE4_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL0_UTCL2_RENG_EXECUTE +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xd +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00001FFCL +#define PCTL0_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x00FFE000L +//PCTL0_SLICE0_RENG_EXECUTE +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL0_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL0_SLICE1_RENG_EXECUTE +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL0_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL0_SLICE2_RENG_EXECUTE +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL0_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL0_SLICE3_RENG_EXECUTE +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL0_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL0_SLICE4_RENG_EXECUTE +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL0_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL0_UTCL2_RENG_RAM_INDEX +#define PCTL0_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL0_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000007FFL +//PCTL0_UTCL2_RENG_RAM_DATA +#define PCTL0_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL0_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL0_SLICE0_RENG_RAM_INDEX +#define PCTL0_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL0_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL0_SLICE0_RENG_RAM_DATA +#define PCTL0_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL0_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL0_SLICE1_RENG_RAM_INDEX +#define PCTL0_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL0_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL0_SLICE1_RENG_RAM_DATA +#define PCTL0_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL0_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL0_SLICE2_RENG_RAM_INDEX +#define PCTL0_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL0_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL0_SLICE2_RENG_RAM_DATA +#define PCTL0_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL0_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL0_SLICE3_RENG_RAM_INDEX +#define PCTL0_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL0_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL0_SLICE3_RENG_RAM_DATA +#define PCTL0_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL0_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL0_SLICE4_RENG_RAM_INDEX +#define PCTL0_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL0_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL0_SLICE4_RENG_RAM_DATA +#define PCTL0_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL0_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL0_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L + + +// addressBlock: mmhub_l1tlb_vml1dec +//VML1_0_MC_VM_MX_L1_TLB0_STATUS +#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_0_MC_VM_MX_L1_TLB1_STATUS +#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_0_MC_VM_MX_L1_TLB2_STATUS +#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_0_MC_VM_MX_L1_TLB3_STATUS +#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_0_MC_VM_MX_L1_TLB4_STATUS +#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_0_MC_VM_MX_L1_TLB5_STATUS +#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_0_MC_VM_MX_L1_TLB6_STATUS +#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_0_MC_VM_MX_L1_TLB7_STATUS +#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__BUSY__SHIFT 0x0 +#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__BUSY_MASK 0x00000001L +#define VML1_0_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L + + +// addressBlock: mmhub_l1tlb_vml1pldec +//VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L +//VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define VML1PL0_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L + + +// addressBlock: mmhub_l1tlb_vml1prdec +//VML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO +#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI +#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define VML1PR0_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L + + +// addressBlock: mmhub_utcl2_atcl2dec +//ATCL2_0_ATC_L2_CNTL +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3 +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6 +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7 +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8 +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf +#define ATCL2_0_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10 +#define ATCL2_0_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13 +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L +#define ATCL2_0_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L +#define ATCL2_0_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L +#define ATCL2_0_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L +//ATCL2_0_ATC_L2_CNTL2 +#define ATCL2_0_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6 +#define ATCL2_0_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8 +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9 +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf +#define ATCL2_0_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE__SHIFT 0x15 +#define ATCL2_0_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE__SHIFT 0x1b +#define ATCL2_0_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L +#define ATCL2_0_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L +#define ATCL2_0_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L +#define ATCL2_0_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE_MASK 0x07E00000L +#define ATCL2_0_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE_MASK 0x08000000L +//ATCL2_0_ATC_L2_CACHE_DATA0 +#define ATCL2_0_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1 +#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2 +#define ATCL2_0_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17 +#define ATCL2_0_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L +#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L +#define ATCL2_0_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL +#define ATCL2_0_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L +//ATCL2_0_ATC_L2_CACHE_DATA1 +#define ATCL2_0_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL +//ATCL2_0_ATC_L2_CACHE_DATA2 +#define ATCL2_0_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL +//ATCL2_0_ATC_L2_CNTL3 +#define ATCL2_0_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3 +#define ATCL2_0_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x9 +#define ATCL2_0_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L +#define ATCL2_0_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L +#define ATCL2_0_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x00000E00L +//ATCL2_0_ATC_L2_STATUS +#define ATCL2_0_ATC_L2_STATUS__BUSY__SHIFT 0x0 +#define ATCL2_0_ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1 +#define ATCL2_0_ATC_L2_STATUS__BUSY_MASK 0x00000001L +#define ATCL2_0_ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL +//ATCL2_0_ATC_L2_STATUS2 +#define ATCL2_0_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0 +#define ATCL2_0_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8 +#define ATCL2_0_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL +#define ATCL2_0_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L +//ATCL2_0_ATC_L2_STATUS3 +#define ATCL2_0_ATC_L2_STATUS3__BUSY__SHIFT 0x0 +#define ATCL2_0_ATC_L2_STATUS3__PARITY_ERROR_INFO__SHIFT 0x1 +#define ATCL2_0_ATC_L2_STATUS3__BUSY_MASK 0x00000001L +#define ATCL2_0_ATC_L2_STATUS3__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL +//ATCL2_0_ATC_L2_MISC_CG +#define ATCL2_0_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6 +#define ATCL2_0_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12 +#define ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13 +#define ATCL2_0_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L +#define ATCL2_0_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L +#define ATCL2_0_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L +//ATCL2_0_ATC_L2_MEM_POWER_LS +#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0 +#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6 +#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL +#define ATCL2_0_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L +//ATCL2_0_ATC_L2_CGTT_CLK_CTRL +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10 +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18 +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L +#define ATCL2_0_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L +//ATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6 +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9 +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE__SHIFT 0x11 +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L +#define ATCL2_0_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE_MASK 0x00020000L +//ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6 +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9 +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE__SHIFT 0x11 +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L +#define ATCL2_0_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE_MASK 0x00020000L +//ATCL2_0_ATC_L2_CNTL4 +#define ATCL2_0_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x0 +#define ATCL2_0_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0xa +#define ATCL2_0_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000003FFL +#define ATCL2_0_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000FFC00L +//ATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES +#define ATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0 +#define ATCL2_0_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_utcl2_vml2pfdec +//VML2PF0_VM_L2_CNTL +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0 +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1 +#define VML2PF0_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2 +#define VML2PF0_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4 +#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8 +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9 +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa +#define VML2PF0_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb +#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc +#define VML2PF0_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf +#define VML2PF0_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12 +#define VML2PF0_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13 +#define VML2PF0_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15 +#define VML2PF0_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L +#define VML2PF0_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL +#define VML2PF0_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L +#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L +#define VML2PF0_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L +#define VML2PF0_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L +#define VML2PF0_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L +#define VML2PF0_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L +#define VML2PF0_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L +#define VML2PF0_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L +#define VML2PF0_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L +#define VML2PF0_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L +//VML2PF0_VM_L2_CNTL2 +#define VML2PF0_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0 +#define VML2PF0_VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1 +#define VML2PF0_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15 +#define VML2PF0_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16 +#define VML2PF0_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17 +#define VML2PF0_VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a +#define VML2PF0_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c +#define VML2PF0_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L +#define VML2PF0_VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L +#define VML2PF0_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L +#define VML2PF0_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L +#define VML2PF0_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L +#define VML2PF0_VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L +#define VML2PF0_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L +//VML2PF0_VM_L2_CNTL3 +#define VML2PF0_VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0 +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6 +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8 +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14 +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15 +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18 +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d +#define VML2PF0_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f +#define VML2PF0_VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L +#define VML2PF0_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L +#define VML2PF0_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L +//VML2PF0_VM_L2_STATUS +#define VML2PF0_VM_L2_STATUS__L2_BUSY__SHIFT 0x0 +#define VML2PF0_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1 +#define VML2PF0_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11 +#define VML2PF0_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12 +#define VML2PF0_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13 +#define VML2PF0_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14 +#define VML2PF0_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15 +#define VML2PF0_VM_L2_STATUS__L2_BUSY_MASK 0x00000001L +#define VML2PF0_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL +#define VML2PF0_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L +#define VML2PF0_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L +#define VML2PF0_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L +#define VML2PF0_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L +#define VML2PF0_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L +//VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL +#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0 +#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1 +#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2 +#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L +#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L +#define VML2PF0_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL +//VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32 +#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0 +#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL +//VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32 +#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0 +#define VML2PF0_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL +//VML2PF0_VM_L2_PROTECTION_FAULT_CNTL +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L +//VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13 +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L +//VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3 +#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL +//VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4 +#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL +//VML2PF0_VM_L2_PROTECTION_FAULT_STATUS +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19 +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L +#define VML2PF0_VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L +//VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32 +#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL +//VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32 +#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL +//VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 +#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL +//VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 +#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0 +#define VML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL +//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 +#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0 +#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL +//VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 +#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0 +#define VML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL +//VML2PF0_VM_L2_CNTL4 +#define VML2PF0_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0 +#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6 +#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7 +#define VML2PF0_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8 +#define VML2PF0_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12 +#define VML2PF0_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c +#define VML2PF0_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL +#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L +#define VML2PF0_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L +#define VML2PF0_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L +#define VML2PF0_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L +#define VML2PF0_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L +//VML2PF0_VM_L2_MM_GROUP_RT_CLASSES +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19 +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L +#define VML2PF0_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L +//VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L +//VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19 +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L +#define VML2PF0_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L +//VML2PF0_VM_L2_CACHE_PARITY_CNTL +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9 +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L +#define VML2PF0_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L +//VML2PF0_VM_L2_CGTT_CLK_CTRL +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10 +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18 +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L +#define VML2PF0_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L + + +// addressBlock: mmhub_utcl2_vml2vcdec +//VML2VC0_VM_CONTEXT0_CNTL +#define VML2VC0_VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT1_CNTL +#define VML2VC0_VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT2_CNTL +#define VML2VC0_VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT3_CNTL +#define VML2VC0_VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT4_CNTL +#define VML2VC0_VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT5_CNTL +#define VML2VC0_VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT6_CNTL +#define VML2VC0_VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT7_CNTL +#define VML2VC0_VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT8_CNTL +#define VML2VC0_VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT9_CNTL +#define VML2VC0_VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT10_CNTL +#define VML2VC0_VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT11_CNTL +#define VML2VC0_VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT12_CNTL +#define VML2VC0_VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT13_CNTL +#define VML2VC0_VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT14_CNTL +#define VML2VC0_VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXT15_CNTL +#define VML2VC0_VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC0_VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC0_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC0_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC0_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC0_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC0_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC0_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC0_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC0_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC0_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC0_VM_CONTEXTS_DISABLE +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9 +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L +#define VML2VC0_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L +//VML2VC0_VM_INVALIDATE_ENG0_SEM +#define VML2VC0_VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG1_SEM +#define VML2VC0_VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG2_SEM +#define VML2VC0_VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG3_SEM +#define VML2VC0_VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG4_SEM +#define VML2VC0_VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG5_SEM +#define VML2VC0_VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG6_SEM +#define VML2VC0_VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG7_SEM +#define VML2VC0_VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG8_SEM +#define VML2VC0_VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG9_SEM +#define VML2VC0_VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG10_SEM +#define VML2VC0_VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG11_SEM +#define VML2VC0_VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG12_SEM +#define VML2VC0_VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG13_SEM +#define VML2VC0_VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG14_SEM +#define VML2VC0_VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG15_SEM +#define VML2VC0_VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG16_SEM +#define VML2VC0_VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG17_SEM +#define VML2VC0_VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC0_VM_INVALIDATE_ENG0_REQ +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG1_REQ +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG2_REQ +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG3_REQ +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG4_REQ +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG5_REQ +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG6_REQ +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG7_REQ +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG8_REQ +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG9_REQ +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG10_REQ +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG11_REQ +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG12_REQ +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG13_REQ +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG14_REQ +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG15_REQ +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG16_REQ +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG17_REQ +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC0_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC0_VM_INVALIDATE_ENG0_ACK +#define VML2VC0_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG1_ACK +#define VML2VC0_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG2_ACK +#define VML2VC0_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG3_ACK +#define VML2VC0_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG4_ACK +#define VML2VC0_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG5_ACK +#define VML2VC0_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG6_ACK +#define VML2VC0_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG7_ACK +#define VML2VC0_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG8_ACK +#define VML2VC0_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG9_ACK +#define VML2VC0_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG10_ACK +#define VML2VC0_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG11_ACK +#define VML2VC0_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG12_ACK +#define VML2VC0_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG13_ACK +#define VML2VC0_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG14_ACK +#define VML2VC0_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG15_ACK +#define VML2VC0_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG16_ACK +#define VML2VC0_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG17_ACK +#define VML2VC0_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC0_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC0_VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32 +#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32 +#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC0_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC0_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL + + +// addressBlock: mmhub_utcl2_vmsharedpfdec +//VMSHAREDPF0_MC_VM_NB_MMIOBASE +#define VMSHAREDPF0_MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL +//VMSHAREDPF0_MC_VM_NB_MMIOLIMIT +#define VMSHAREDPF0_MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL +//VMSHAREDPF0_MC_VM_NB_PCI_CTRL +#define VMSHAREDPF0_MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17 +#define VMSHAREDPF0_MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L +//VMSHAREDPF0_MC_VM_NB_PCI_ARB +#define VMSHAREDPF0_MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3 +#define VMSHAREDPF0_MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L +//VMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1 +#define VMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17 +#define VMSHAREDPF0_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L +//VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2 +#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17 +#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L +#define VMSHAREDPF0_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L +//VMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2 +#define VMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL +//VMSHAREDPF0_MC_VM_FB_OFFSET +#define VMSHAREDPF0_MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL +//VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB +#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL +//VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB +#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL +//VMSHAREDPF0_MC_VM_STEERING +#define VMSHAREDPF0_MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L +//VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ +#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define VMSHAREDPF0_MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L +//VMSHAREDPF0_MC_MEM_POWER_LS +#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0 +#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6 +#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL +#define VMSHAREDPF0_MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L +//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START +#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END +#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF0_MC_VM_APT_CNTL +#define VMSHAREDPF0_MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1 +#define VMSHAREDPF0_MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L +#define VMSHAREDPF0_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L +//VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START +#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END +#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL +#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L +//VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL +#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 +#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x0000000FL +#define VMSHAREDPF0_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x000000F0L +//VMSHAREDPF0_MC_VM_XGMI_LFB_SIZE +#define VMSHAREDPF0_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0001FFFFL +//VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL +#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0 +#define VMSHAREDPF0_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L + + +// addressBlock: mmhub_utcl2_vmsharedvcdec +//VMSHAREDVC0_MC_VM_FB_LOCATION_BASE +#define VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL +//VMSHAREDVC0_MC_VM_FB_LOCATION_TOP +#define VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL +//VMSHAREDVC0_MC_VM_AGP_TOP +#define VMSHAREDVC0_MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL +//VMSHAREDVC0_MC_VM_AGP_BOT +#define VMSHAREDVC0_MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL +//VMSHAREDVC0_MC_VM_AGP_BASE +#define VMSHAREDVC0_MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL +//VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR +#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL +//VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR +#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL +//VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0 +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3 +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5 +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6 +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7 +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L +#define VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L + + +// addressBlock: mmhub_utcl2_vmsharedhvdec +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV0_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1 +#define VMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8 +#define VMSHAREDHV0_VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L +//VMSHAREDHV0_MC_VM_MARC_BASE_LO_0 +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_BASE_LO_1 +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_BASE_LO_2 +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_BASE_LO_3 +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_BASE_HI_0 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_BASE_HI_1 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_BASE_HI_2 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_BASE_HI_3 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L +#define VMSHAREDHV0_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_1 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_2 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_RELOC_HI_3 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_LEN_LO_0 +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_LEN_LO_1 +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_LEN_LO_2 +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_LEN_LO_3 +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L +//VMSHAREDHV0_MC_VM_MARC_LEN_HI_0 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_LEN_HI_1 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_LEN_HI_2 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL +//VMSHAREDHV0_MC_VM_MARC_LEN_HI_3 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL +//VMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER +#define VMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0 +#define VMSHAREDHV0_VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L +//VMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER +#define VMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd +#define VMSHAREDHV0_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__STU__SHIFT 0x10 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15 +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV0_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10 +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18 +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L +#define VMSHAREDHV0_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L +//VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID +#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define VMSHAREDHV0_MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9 +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L +#define VMSHAREDHV0_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L + + +// addressBlock: mmhub_utcl2_atcl2pfcntrdec +//ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO +#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI +#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define ATCL2PFCNTR0_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L + + +// addressBlock: mmhub_utcl2_atcl2pfcntldec +//ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define ATCL2PFCNTL0_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L + + +// addressBlock: mmhub_utcl2_vml2pldec +//VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L +//VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define VML2PL0_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L + + +// addressBlock: mmhub_utcl2_vml2prdec +//VML2PR0_MC_VM_L2_PERFCOUNTER_LO +#define VML2PR0_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define VML2PR0_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//VML2PR0_MC_VM_L2_PERFCOUNTER_HI +#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define VML2PR0_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L + + +// addressBlock: mmhub_dagb_dagbdec5 +//DAGB5_RDCLI0 +#define DAGB5_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI1 +#define DAGB5_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI2 +#define DAGB5_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI3 +#define DAGB5_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI4 +#define DAGB5_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI5 +#define DAGB5_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI6 +#define DAGB5_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI7 +#define DAGB5_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI8 +#define DAGB5_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI9 +#define DAGB5_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI10 +#define DAGB5_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI11 +#define DAGB5_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI12 +#define DAGB5_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI13 +#define DAGB5_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI14 +#define DAGB5_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB5_RDCLI15 +#define DAGB5_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB5_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB5_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB5_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB5_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB5_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB5_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB5_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB5_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB5_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB5_RD_CNTL +#define DAGB5_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB5_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB5_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB5_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB5_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB5_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB5_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB5_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB5_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB5_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB5_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB5_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB5_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB5_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB5_RD_GMI_CNTL +#define DAGB5_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB5_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB5_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB5_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB5_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB5_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB5_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB5_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB5_RD_ADDR_DAGB +#define DAGB5_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB5_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB5_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB5_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB5_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB5_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB5_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB5_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB5_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB5_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB5_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB5_RD_CGTT_CLK_CTRL +#define DAGB5_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB5_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB5_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB5_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB5_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB5_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB5_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB5_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB5_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB5_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB5_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB5_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB5_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB5_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB5_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB5_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB5_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB5_RD_VC0_CNTL +#define DAGB5_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_VC1_CNTL +#define DAGB5_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_VC2_CNTL +#define DAGB5_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_VC3_CNTL +#define DAGB5_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_VC4_CNTL +#define DAGB5_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_VC5_CNTL +#define DAGB5_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_VC6_CNTL +#define DAGB5_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_VC7_CNTL +#define DAGB5_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_RD_CNTL_MISC +#define DAGB5_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB5_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB5_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB5_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB5_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB5_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB5_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB5_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB5_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB5_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB5_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB5_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB5_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB5_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB5_RD_TLB_CREDIT +#define DAGB5_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB5_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB5_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB5_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB5_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB5_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB5_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB5_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB5_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB5_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB5_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB5_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB5_RDCLI_ASK_PENDING +#define DAGB5_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_RDCLI_GO_PENDING +#define DAGB5_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_RDCLI_GBLSEND_PENDING +#define DAGB5_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_RDCLI_TLB_PENDING +#define DAGB5_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_RDCLI_OARB_PENDING +#define DAGB5_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_RDCLI_OSD_PENDING +#define DAGB5_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI0 +#define DAGB5_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI1 +#define DAGB5_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI2 +#define DAGB5_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI3 +#define DAGB5_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI4 +#define DAGB5_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI5 +#define DAGB5_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI6 +#define DAGB5_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI7 +#define DAGB5_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI8 +#define DAGB5_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI9 +#define DAGB5_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI10 +#define DAGB5_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI11 +#define DAGB5_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI12 +#define DAGB5_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI13 +#define DAGB5_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI14 +#define DAGB5_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB5_WRCLI15 +#define DAGB5_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB5_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB5_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB5_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB5_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB5_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB5_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB5_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB5_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB5_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB5_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB5_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB5_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB5_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB5_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB5_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB5_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB5_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB5_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB5_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB5_WR_CNTL +#define DAGB5_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB5_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB5_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB5_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB5_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB5_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB5_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB5_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB5_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB5_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB5_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB5_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB5_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB5_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB5_WR_GMI_CNTL +#define DAGB5_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB5_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB5_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB5_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB5_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB5_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB5_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB5_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB5_WR_ADDR_DAGB +#define DAGB5_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB5_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB5_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB5_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB5_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB5_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB5_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB5_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB5_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB5_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB5_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB5_WR_CGTT_CLK_CTRL +#define DAGB5_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB5_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB5_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB5_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB5_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB5_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB5_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB5_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB5_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB5_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB5_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB5_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB5_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB5_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB5_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB5_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB5_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB5_WR_DATA_DAGB +#define DAGB5_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB5_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB5_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB5_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB5_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB5_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB5_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB5_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB5_WR_DATA_DAGB_MAX_BURST0 +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB5_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB5_WR_DATA_DAGB_MAX_BURST1 +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB5_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB5_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB5_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB5_WR_VC0_CNTL +#define DAGB5_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_VC1_CNTL +#define DAGB5_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_VC2_CNTL +#define DAGB5_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_VC3_CNTL +#define DAGB5_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_VC4_CNTL +#define DAGB5_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_VC5_CNTL +#define DAGB5_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_VC6_CNTL +#define DAGB5_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_VC7_CNTL +#define DAGB5_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB5_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB5_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB5_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB5_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB5_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB5_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB5_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB5_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB5_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB5_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB5_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB5_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB5_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB5_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB5_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB5_WR_CNTL_MISC +#define DAGB5_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB5_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB5_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB5_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB5_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB5_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB5_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB5_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB5_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB5_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB5_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB5_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB5_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB5_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB5_WR_TLB_CREDIT +#define DAGB5_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB5_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB5_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB5_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB5_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB5_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB5_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB5_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB5_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB5_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB5_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB5_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB5_WR_DATA_CREDIT +#define DAGB5_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB5_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB5_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB5_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB5_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB5_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB5_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB5_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB5_WR_MISC_CREDIT +#define DAGB5_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB5_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB5_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB5_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB5_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB5_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB5_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB5_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB5_WRCLI_ASK_PENDING +#define DAGB5_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_GO_PENDING +#define DAGB5_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_GBLSEND_PENDING +#define DAGB5_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_TLB_PENDING +#define DAGB5_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_OARB_PENDING +#define DAGB5_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_OSD_PENDING +#define DAGB5_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_DBUS_ASK_PENDING +#define DAGB5_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_WRCLI_DBUS_GO_PENDING +#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB5_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB5_DAGB_DLY +#define DAGB5_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB5_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB5_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB5_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB5_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB5_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB5_CNTL_MISC +#define DAGB5_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB5_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB5_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB5_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB5_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB5_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB5_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB5_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB5_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB5_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB5_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB5_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB5_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB5_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB5_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB5_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB5_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB5_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB5_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB5_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB5_CNTL_MISC2 +#define DAGB5_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB5_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB5_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB5_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB5_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB5_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB5_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB5_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB5_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB5_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB5_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB5_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB5_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB5_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB5_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB5_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB5_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB5_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB5_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB5_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB5_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB5_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB5_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB5_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB5_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB5_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB5_FIFO_EMPTY +#define DAGB5_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB5_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB5_FIFO_FULL +#define DAGB5_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB5_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB5_WR_CREDITS_FULL +#define DAGB5_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB5_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB5_RD_CREDITS_FULL +#define DAGB5_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB5_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB5_PERFCOUNTER_LO +#define DAGB5_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB5_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB5_PERFCOUNTER_HI +#define DAGB5_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB5_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB5_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB5_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB5_PERFCOUNTER0_CFG +#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB5_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB5_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB5_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB5_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB5_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB5_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB5_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB5_PERFCOUNTER1_CFG +#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB5_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB5_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB5_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB5_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB5_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB5_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB5_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB5_PERFCOUNTER2_CFG +#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB5_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB5_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB5_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB5_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB5_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB5_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB5_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB5_PERFCOUNTER_RSLT_CNTL +#define DAGB5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB5_RESERVE0 +#define DAGB5_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE1 +#define DAGB5_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE2 +#define DAGB5_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE3 +#define DAGB5_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE4 +#define DAGB5_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE5 +#define DAGB5_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE6 +#define DAGB5_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE7 +#define DAGB5_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE8 +#define DAGB5_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE9 +#define DAGB5_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE10 +#define DAGB5_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE11 +#define DAGB5_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE12 +#define DAGB5_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB5_RESERVE13 +#define DAGB5_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB5_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_dagb_dagbdec6 +//DAGB6_RDCLI0 +#define DAGB6_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI1 +#define DAGB6_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI2 +#define DAGB6_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI3 +#define DAGB6_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI4 +#define DAGB6_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI5 +#define DAGB6_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI6 +#define DAGB6_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI7 +#define DAGB6_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI8 +#define DAGB6_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI9 +#define DAGB6_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI10 +#define DAGB6_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI11 +#define DAGB6_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI12 +#define DAGB6_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI13 +#define DAGB6_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI14 +#define DAGB6_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB6_RDCLI15 +#define DAGB6_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB6_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB6_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB6_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB6_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB6_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB6_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB6_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB6_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB6_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB6_RD_CNTL +#define DAGB6_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB6_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB6_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB6_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB6_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB6_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB6_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB6_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB6_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB6_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB6_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB6_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB6_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB6_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB6_RD_GMI_CNTL +#define DAGB6_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB6_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB6_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB6_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB6_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB6_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB6_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB6_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB6_RD_ADDR_DAGB +#define DAGB6_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB6_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB6_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB6_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB6_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB6_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB6_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB6_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB6_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB6_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB6_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB6_RD_CGTT_CLK_CTRL +#define DAGB6_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB6_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB6_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB6_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB6_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB6_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB6_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB6_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB6_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB6_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB6_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB6_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB6_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB6_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB6_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB6_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB6_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB6_RD_VC0_CNTL +#define DAGB6_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_VC1_CNTL +#define DAGB6_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_VC2_CNTL +#define DAGB6_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_VC3_CNTL +#define DAGB6_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_VC4_CNTL +#define DAGB6_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_VC5_CNTL +#define DAGB6_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_VC6_CNTL +#define DAGB6_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_VC7_CNTL +#define DAGB6_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_RD_CNTL_MISC +#define DAGB6_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB6_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB6_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB6_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB6_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB6_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB6_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB6_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB6_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB6_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB6_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB6_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB6_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB6_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB6_RD_TLB_CREDIT +#define DAGB6_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB6_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB6_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB6_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB6_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB6_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB6_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB6_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB6_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB6_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB6_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB6_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB6_RDCLI_ASK_PENDING +#define DAGB6_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_RDCLI_GO_PENDING +#define DAGB6_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_RDCLI_GBLSEND_PENDING +#define DAGB6_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_RDCLI_TLB_PENDING +#define DAGB6_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_RDCLI_OARB_PENDING +#define DAGB6_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_RDCLI_OSD_PENDING +#define DAGB6_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI0 +#define DAGB6_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI1 +#define DAGB6_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI2 +#define DAGB6_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI3 +#define DAGB6_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI4 +#define DAGB6_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI5 +#define DAGB6_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI6 +#define DAGB6_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI7 +#define DAGB6_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI8 +#define DAGB6_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI9 +#define DAGB6_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI10 +#define DAGB6_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI11 +#define DAGB6_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI12 +#define DAGB6_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI13 +#define DAGB6_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI14 +#define DAGB6_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB6_WRCLI15 +#define DAGB6_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB6_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB6_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB6_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB6_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB6_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB6_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB6_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB6_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB6_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB6_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB6_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB6_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB6_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB6_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB6_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB6_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB6_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB6_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB6_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB6_WR_CNTL +#define DAGB6_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB6_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB6_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB6_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB6_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB6_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB6_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB6_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB6_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB6_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB6_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB6_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB6_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB6_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB6_WR_GMI_CNTL +#define DAGB6_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB6_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB6_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB6_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB6_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB6_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB6_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB6_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB6_WR_ADDR_DAGB +#define DAGB6_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB6_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB6_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB6_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB6_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB6_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB6_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB6_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB6_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB6_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB6_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB6_WR_CGTT_CLK_CTRL +#define DAGB6_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB6_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB6_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB6_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB6_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB6_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB6_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB6_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB6_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB6_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB6_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB6_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB6_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB6_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB6_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB6_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB6_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB6_WR_DATA_DAGB +#define DAGB6_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB6_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB6_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB6_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB6_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB6_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB6_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB6_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB6_WR_DATA_DAGB_MAX_BURST0 +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB6_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB6_WR_DATA_DAGB_MAX_BURST1 +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB6_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB6_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB6_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB6_WR_VC0_CNTL +#define DAGB6_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_VC1_CNTL +#define DAGB6_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_VC2_CNTL +#define DAGB6_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_VC3_CNTL +#define DAGB6_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_VC4_CNTL +#define DAGB6_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_VC5_CNTL +#define DAGB6_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_VC6_CNTL +#define DAGB6_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_VC7_CNTL +#define DAGB6_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB6_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB6_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB6_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB6_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB6_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB6_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB6_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB6_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB6_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB6_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB6_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB6_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB6_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB6_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB6_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB6_WR_CNTL_MISC +#define DAGB6_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB6_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB6_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB6_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB6_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB6_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB6_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB6_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB6_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB6_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB6_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB6_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB6_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB6_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB6_WR_TLB_CREDIT +#define DAGB6_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB6_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB6_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB6_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB6_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB6_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB6_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB6_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB6_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB6_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB6_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB6_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB6_WR_DATA_CREDIT +#define DAGB6_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB6_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB6_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB6_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB6_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB6_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB6_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB6_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB6_WR_MISC_CREDIT +#define DAGB6_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB6_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB6_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB6_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB6_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB6_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB6_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB6_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB6_WRCLI_ASK_PENDING +#define DAGB6_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_GO_PENDING +#define DAGB6_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_GBLSEND_PENDING +#define DAGB6_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_TLB_PENDING +#define DAGB6_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_OARB_PENDING +#define DAGB6_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_OSD_PENDING +#define DAGB6_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_DBUS_ASK_PENDING +#define DAGB6_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_WRCLI_DBUS_GO_PENDING +#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB6_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB6_DAGB_DLY +#define DAGB6_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB6_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB6_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB6_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB6_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB6_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB6_CNTL_MISC +#define DAGB6_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB6_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB6_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB6_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB6_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB6_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB6_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB6_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB6_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB6_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB6_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB6_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB6_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB6_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB6_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB6_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB6_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB6_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB6_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB6_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB6_CNTL_MISC2 +#define DAGB6_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB6_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB6_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB6_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB6_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB6_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB6_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB6_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB6_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB6_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB6_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB6_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB6_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB6_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB6_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB6_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB6_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB6_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB6_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB6_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB6_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB6_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB6_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB6_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB6_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB6_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB6_FIFO_EMPTY +#define DAGB6_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB6_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB6_FIFO_FULL +#define DAGB6_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB6_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB6_WR_CREDITS_FULL +#define DAGB6_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB6_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB6_RD_CREDITS_FULL +#define DAGB6_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB6_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB6_PERFCOUNTER_LO +#define DAGB6_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB6_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB6_PERFCOUNTER_HI +#define DAGB6_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB6_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB6_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB6_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB6_PERFCOUNTER0_CFG +#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB6_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB6_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB6_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB6_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB6_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB6_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB6_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB6_PERFCOUNTER1_CFG +#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB6_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB6_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB6_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB6_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB6_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB6_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB6_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB6_PERFCOUNTER2_CFG +#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB6_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB6_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB6_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB6_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB6_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB6_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB6_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB6_PERFCOUNTER_RSLT_CNTL +#define DAGB6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB6_RESERVE0 +#define DAGB6_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE1 +#define DAGB6_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE2 +#define DAGB6_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE3 +#define DAGB6_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE4 +#define DAGB6_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE5 +#define DAGB6_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE6 +#define DAGB6_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE7 +#define DAGB6_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE8 +#define DAGB6_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE9 +#define DAGB6_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE10 +#define DAGB6_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE11 +#define DAGB6_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE12 +#define DAGB6_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB6_RESERVE13 +#define DAGB6_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB6_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_dagb_dagbdec7 +//DAGB7_RDCLI0 +#define DAGB7_RDCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI0__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI0__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI0__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI1 +#define DAGB7_RDCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI1__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI1__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI1__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI2 +#define DAGB7_RDCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI2__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI2__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI2__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI3 +#define DAGB7_RDCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI3__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI3__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI3__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI4 +#define DAGB7_RDCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI4__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI4__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI4__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI5 +#define DAGB7_RDCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI5__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI5__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI5__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI6 +#define DAGB7_RDCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI6__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI6__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI6__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI7 +#define DAGB7_RDCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI7__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI7__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI7__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI8 +#define DAGB7_RDCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI8__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI8__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI8__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI9 +#define DAGB7_RDCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI9__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI9__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI9__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI10 +#define DAGB7_RDCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI10__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI10__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI10__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI11 +#define DAGB7_RDCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI11__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI11__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI11__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI12 +#define DAGB7_RDCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI12__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI12__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI12__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI13 +#define DAGB7_RDCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI13__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI13__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI13__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI14 +#define DAGB7_RDCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI14__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI14__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI14__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB7_RDCLI15 +#define DAGB7_RDCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_RDCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_RDCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB7_RDCLI15__URG_LOW__SHIFT 0x8 +#define DAGB7_RDCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_RDCLI15__MAX_BW__SHIFT 0xd +#define DAGB7_RDCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_RDCLI15__MIN_BW__SHIFT 0x16 +#define DAGB7_RDCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_RDCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB7_RDCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_RDCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_RDCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB7_RDCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB7_RDCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_RDCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB7_RDCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_RDCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB7_RDCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_RDCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB7_RD_CNTL +#define DAGB7_RD_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB7_RD_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB7_RD_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB7_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB7_RD_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB7_RD_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB7_RD_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB7_RD_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB7_RD_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB7_RD_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB7_RD_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB7_RD_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB7_RD_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB7_RD_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB7_RD_GMI_CNTL +#define DAGB7_RD_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB7_RD_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB7_RD_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB7_RD_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB7_RD_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB7_RD_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB7_RD_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB7_RD_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB7_RD_ADDR_DAGB +#define DAGB7_RD_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB7_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB7_RD_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB7_RD_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB7_RD_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB7_RD_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB7_RD_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB7_RD_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB7_RD_OUTPUT_DAGB_MAX_BURST +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB7_RD_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB7_RD_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB7_RD_CGTT_CLK_CTRL +#define DAGB7_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB7_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB7_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB7_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB7_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB7_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB7_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB7_L1TLB_RD_CGTT_CLK_CTRL +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB7_L1TLB_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB7_ATCVM_RD_CGTT_CLK_CTRL +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB7_ATCVM_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB7_RD_ADDR_DAGB_MAX_BURST0 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB7_RD_ADDR_DAGB_LAZY_TIMER0 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB7_RD_ADDR_DAGB_MAX_BURST1 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB7_RD_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB7_RD_ADDR_DAGB_LAZY_TIMER1 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB7_RD_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB7_RD_VC0_CNTL +#define DAGB7_RD_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_VC1_CNTL +#define DAGB7_RD_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_VC2_CNTL +#define DAGB7_RD_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_VC3_CNTL +#define DAGB7_RD_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_VC4_CNTL +#define DAGB7_RD_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_VC5_CNTL +#define DAGB7_RD_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_VC6_CNTL +#define DAGB7_RD_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_VC7_CNTL +#define DAGB7_RD_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_RD_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_RD_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_RD_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_RD_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_RD_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_RD_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_RD_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_RD_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_RD_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_RD_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_RD_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_RD_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_RD_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_RD_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_RD_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_RD_CNTL_MISC +#define DAGB7_RD_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB7_RD_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB7_RD_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB7_RD_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB7_RD_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB7_RD_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB7_RD_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB7_RD_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB7_RD_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB7_RD_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB7_RD_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB7_RD_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB7_RD_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB7_RD_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB7_RD_TLB_CREDIT +#define DAGB7_RD_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB7_RD_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB7_RD_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB7_RD_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB7_RD_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB7_RD_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB7_RD_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB7_RD_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB7_RD_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB7_RD_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB7_RD_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB7_RD_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB7_RDCLI_ASK_PENDING +#define DAGB7_RDCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_RDCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_RDCLI_GO_PENDING +#define DAGB7_RDCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_RDCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_RDCLI_GBLSEND_PENDING +#define DAGB7_RDCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_RDCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_RDCLI_TLB_PENDING +#define DAGB7_RDCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_RDCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_RDCLI_OARB_PENDING +#define DAGB7_RDCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_RDCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_RDCLI_OSD_PENDING +#define DAGB7_RDCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_RDCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI0 +#define DAGB7_WRCLI0__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI0__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI0__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI0__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI0__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI0__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI0__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI0__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI0__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI0__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI0__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI0__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI0__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI0__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI0__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI0__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI0__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI0__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI0__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI0__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI1 +#define DAGB7_WRCLI1__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI1__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI1__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI1__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI1__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI1__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI1__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI1__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI1__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI1__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI1__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI1__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI1__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI1__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI1__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI1__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI1__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI1__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI1__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI1__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI2 +#define DAGB7_WRCLI2__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI2__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI2__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI2__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI2__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI2__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI2__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI2__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI2__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI2__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI2__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI2__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI2__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI2__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI2__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI2__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI2__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI2__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI2__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI2__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI3 +#define DAGB7_WRCLI3__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI3__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI3__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI3__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI3__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI3__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI3__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI3__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI3__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI3__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI3__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI3__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI3__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI3__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI3__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI3__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI3__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI3__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI3__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI3__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI4 +#define DAGB7_WRCLI4__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI4__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI4__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI4__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI4__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI4__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI4__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI4__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI4__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI4__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI4__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI4__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI4__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI4__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI4__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI4__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI4__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI4__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI4__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI4__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI5 +#define DAGB7_WRCLI5__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI5__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI5__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI5__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI5__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI5__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI5__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI5__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI5__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI5__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI5__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI5__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI5__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI5__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI5__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI5__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI5__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI5__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI5__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI5__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI6 +#define DAGB7_WRCLI6__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI6__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI6__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI6__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI6__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI6__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI6__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI6__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI6__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI6__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI6__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI6__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI6__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI6__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI6__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI6__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI6__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI6__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI6__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI6__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI7 +#define DAGB7_WRCLI7__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI7__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI7__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI7__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI7__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI7__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI7__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI7__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI7__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI7__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI7__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI7__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI7__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI7__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI7__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI7__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI7__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI7__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI7__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI7__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI8 +#define DAGB7_WRCLI8__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI8__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI8__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI8__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI8__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI8__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI8__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI8__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI8__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI8__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI8__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI8__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI8__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI8__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI8__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI8__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI8__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI8__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI8__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI8__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI9 +#define DAGB7_WRCLI9__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI9__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI9__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI9__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI9__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI9__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI9__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI9__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI9__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI9__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI9__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI9__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI9__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI9__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI9__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI9__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI9__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI9__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI9__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI9__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI10 +#define DAGB7_WRCLI10__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI10__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI10__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI10__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI10__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI10__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI10__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI10__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI10__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI10__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI10__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI10__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI10__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI10__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI10__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI10__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI10__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI10__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI10__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI10__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI11 +#define DAGB7_WRCLI11__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI11__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI11__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI11__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI11__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI11__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI11__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI11__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI11__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI11__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI11__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI11__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI11__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI11__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI11__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI11__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI11__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI11__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI11__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI11__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI12 +#define DAGB7_WRCLI12__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI12__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI12__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI12__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI12__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI12__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI12__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI12__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI12__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI12__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI12__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI12__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI12__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI12__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI12__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI12__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI12__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI12__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI12__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI12__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI13 +#define DAGB7_WRCLI13__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI13__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI13__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI13__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI13__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI13__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI13__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI13__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI13__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI13__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI13__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI13__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI13__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI13__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI13__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI13__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI13__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI13__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI13__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI13__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI14 +#define DAGB7_WRCLI14__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI14__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI14__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI14__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI14__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI14__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI14__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI14__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI14__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI14__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI14__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI14__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI14__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI14__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI14__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI14__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI14__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI14__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI14__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI14__MAX_OSD_MASK 0xFC000000L +//DAGB7_WRCLI15 +#define DAGB7_WRCLI15__VIRT_CHAN__SHIFT 0x0 +#define DAGB7_WRCLI15__CHECK_TLB_CREDIT__SHIFT 0x3 +#define DAGB7_WRCLI15__URG_HIGH__SHIFT 0x4 +#define DAGB7_WRCLI15__URG_LOW__SHIFT 0x8 +#define DAGB7_WRCLI15__MAX_BW_ENABLE__SHIFT 0xc +#define DAGB7_WRCLI15__MAX_BW__SHIFT 0xd +#define DAGB7_WRCLI15__MIN_BW_ENABLE__SHIFT 0x15 +#define DAGB7_WRCLI15__MIN_BW__SHIFT 0x16 +#define DAGB7_WRCLI15__OSD_LIMITER_ENABLE__SHIFT 0x19 +#define DAGB7_WRCLI15__MAX_OSD__SHIFT 0x1a +#define DAGB7_WRCLI15__VIRT_CHAN_MASK 0x00000007L +#define DAGB7_WRCLI15__CHECK_TLB_CREDIT_MASK 0x00000008L +#define DAGB7_WRCLI15__URG_HIGH_MASK 0x000000F0L +#define DAGB7_WRCLI15__URG_LOW_MASK 0x00000F00L +#define DAGB7_WRCLI15__MAX_BW_ENABLE_MASK 0x00001000L +#define DAGB7_WRCLI15__MAX_BW_MASK 0x001FE000L +#define DAGB7_WRCLI15__MIN_BW_ENABLE_MASK 0x00200000L +#define DAGB7_WRCLI15__MIN_BW_MASK 0x01C00000L +#define DAGB7_WRCLI15__OSD_LIMITER_ENABLE_MASK 0x02000000L +#define DAGB7_WRCLI15__MAX_OSD_MASK 0xFC000000L +//DAGB7_WR_CNTL +#define DAGB7_WR_CNTL__SCLK_FREQ__SHIFT 0x0 +#define DAGB7_WR_CNTL__CLI_MAX_BW_WINDOW__SHIFT 0x4 +#define DAGB7_WR_CNTL__VC_MAX_BW_WINDOW__SHIFT 0xa +#define DAGB7_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE__SHIFT 0x10 +#define DAGB7_WR_CNTL__IO_LEVEL__SHIFT 0x11 +#define DAGB7_WR_CNTL__IO_LEVEL_COMPLY_VC__SHIFT 0x14 +#define DAGB7_WR_CNTL__SHARE_VC_NUM__SHIFT 0x17 +#define DAGB7_WR_CNTL__SCLK_FREQ_MASK 0x0000000FL +#define DAGB7_WR_CNTL__CLI_MAX_BW_WINDOW_MASK 0x000003F0L +#define DAGB7_WR_CNTL__VC_MAX_BW_WINDOW_MASK 0x0000FC00L +#define DAGB7_WR_CNTL__IO_LEVEL_OVERRIDE_ENABLE_MASK 0x00010000L +#define DAGB7_WR_CNTL__IO_LEVEL_MASK 0x000E0000L +#define DAGB7_WR_CNTL__IO_LEVEL_COMPLY_VC_MASK 0x00700000L +#define DAGB7_WR_CNTL__SHARE_VC_NUM_MASK 0x03800000L +//DAGB7_WR_GMI_CNTL +#define DAGB7_WR_GMI_CNTL__EA_CREDIT__SHIFT 0x0 +#define DAGB7_WR_GMI_CNTL__LEVEL__SHIFT 0x6 +#define DAGB7_WR_GMI_CNTL__MAX_BURST__SHIFT 0x9 +#define DAGB7_WR_GMI_CNTL__LAZY_TIMER__SHIFT 0xd +#define DAGB7_WR_GMI_CNTL__EA_CREDIT_MASK 0x0000003FL +#define DAGB7_WR_GMI_CNTL__LEVEL_MASK 0x000001C0L +#define DAGB7_WR_GMI_CNTL__MAX_BURST_MASK 0x00001E00L +#define DAGB7_WR_GMI_CNTL__LAZY_TIMER_MASK 0x0001E000L +//DAGB7_WR_ADDR_DAGB +#define DAGB7_WR_ADDR_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB7_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB7_WR_ADDR_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB7_WR_ADDR_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB7_WR_ADDR_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB7_WR_ADDR_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB7_WR_ADDR_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB7_WR_ADDR_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB7_WR_OUTPUT_DAGB_MAX_BURST +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC0__SHIFT 0x0 +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC1__SHIFT 0x4 +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC2__SHIFT 0x8 +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC3__SHIFT 0xc +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC4__SHIFT 0x10 +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC5__SHIFT 0x14 +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC6__SHIFT 0x18 +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC7__SHIFT 0x1c +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC0_MASK 0x0000000FL +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC1_MASK 0x000000F0L +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC2_MASK 0x00000F00L +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC3_MASK 0x0000F000L +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC4_MASK 0x000F0000L +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC5_MASK 0x00F00000L +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC6_MASK 0x0F000000L +#define DAGB7_WR_OUTPUT_DAGB_MAX_BURST__VC7_MASK 0xF0000000L +//DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC0__SHIFT 0x0 +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC1__SHIFT 0x4 +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC2__SHIFT 0x8 +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC3__SHIFT 0xc +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC4__SHIFT 0x10 +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC5__SHIFT 0x14 +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC6__SHIFT 0x18 +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC7__SHIFT 0x1c +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC0_MASK 0x0000000FL +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC1_MASK 0x000000F0L +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC2_MASK 0x00000F00L +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC3_MASK 0x0000F000L +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC4_MASK 0x000F0000L +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC5_MASK 0x00F00000L +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC6_MASK 0x0F000000L +#define DAGB7_WR_OUTPUT_DAGB_LAZY_TIMER__VC7_MASK 0xF0000000L +//DAGB7_WR_CGTT_CLK_CTRL +#define DAGB7_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB7_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB7_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB7_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB7_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB7_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB7_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB7_L1TLB_WR_CGTT_CLK_CTRL +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB7_L1TLB_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB7_ATCVM_WR_CGTT_CLK_CTRL +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x16 +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE__SHIFT 0x1c +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ__SHIFT 0x1d +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN__SHIFT 0x1e +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER__SHIFT 0x1f +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00400000L +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK 0x10000000L +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK 0x20000000L +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK 0x40000000L +#define DAGB7_ATCVM_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK 0x80000000L +//DAGB7_WR_ADDR_DAGB_MAX_BURST0 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB7_WR_ADDR_DAGB_LAZY_TIMER0 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB7_WR_ADDR_DAGB_MAX_BURST1 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB7_WR_ADDR_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB7_WR_ADDR_DAGB_LAZY_TIMER1 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB7_WR_ADDR_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB7_WR_DATA_DAGB +#define DAGB7_WR_DATA_DAGB__DAGB_ENABLE__SHIFT 0x0 +#define DAGB7_WR_DATA_DAGB__ENABLE_JUMP_AHEAD__SHIFT 0x3 +#define DAGB7_WR_DATA_DAGB__DISABLE_SELF_INIT__SHIFT 0x6 +#define DAGB7_WR_DATA_DAGB__WHOAMI__SHIFT 0x7 +#define DAGB7_WR_DATA_DAGB__DAGB_ENABLE_MASK 0x00000007L +#define DAGB7_WR_DATA_DAGB__ENABLE_JUMP_AHEAD_MASK 0x00000038L +#define DAGB7_WR_DATA_DAGB__DISABLE_SELF_INIT_MASK 0x00000040L +#define DAGB7_WR_DATA_DAGB__WHOAMI_MASK 0x00001F80L +//DAGB7_WR_DATA_DAGB_MAX_BURST0 +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT0__SHIFT 0x0 +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT1__SHIFT 0x4 +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT2__SHIFT 0x8 +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT3__SHIFT 0xc +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT4__SHIFT 0x10 +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT5__SHIFT 0x14 +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT6__SHIFT 0x18 +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT7__SHIFT 0x1c +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT0_MASK 0x0000000FL +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT1_MASK 0x000000F0L +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT2_MASK 0x00000F00L +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT3_MASK 0x0000F000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT4_MASK 0x000F0000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT5_MASK 0x00F00000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT6_MASK 0x0F000000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST0__CLIENT7_MASK 0xF0000000L +//DAGB7_WR_DATA_DAGB_LAZY_TIMER0 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0__SHIFT 0x0 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1__SHIFT 0x4 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2__SHIFT 0x8 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3__SHIFT 0xc +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4__SHIFT 0x10 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5__SHIFT 0x14 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6__SHIFT 0x18 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7__SHIFT 0x1c +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT0_MASK 0x0000000FL +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT1_MASK 0x000000F0L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT2_MASK 0x00000F00L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT3_MASK 0x0000F000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT4_MASK 0x000F0000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT5_MASK 0x00F00000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT6_MASK 0x0F000000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER0__CLIENT7_MASK 0xF0000000L +//DAGB7_WR_DATA_DAGB_MAX_BURST1 +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT8__SHIFT 0x0 +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT9__SHIFT 0x4 +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT10__SHIFT 0x8 +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT11__SHIFT 0xc +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT12__SHIFT 0x10 +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT13__SHIFT 0x14 +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT14__SHIFT 0x18 +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT15__SHIFT 0x1c +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT8_MASK 0x0000000FL +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT9_MASK 0x000000F0L +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT10_MASK 0x00000F00L +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT11_MASK 0x0000F000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT12_MASK 0x000F0000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT13_MASK 0x00F00000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT14_MASK 0x0F000000L +#define DAGB7_WR_DATA_DAGB_MAX_BURST1__CLIENT15_MASK 0xF0000000L +//DAGB7_WR_DATA_DAGB_LAZY_TIMER1 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8__SHIFT 0x0 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9__SHIFT 0x4 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10__SHIFT 0x8 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11__SHIFT 0xc +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12__SHIFT 0x10 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13__SHIFT 0x14 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14__SHIFT 0x18 +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15__SHIFT 0x1c +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT8_MASK 0x0000000FL +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT9_MASK 0x000000F0L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT10_MASK 0x00000F00L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT11_MASK 0x0000F000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT12_MASK 0x000F0000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT13_MASK 0x00F00000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT14_MASK 0x0F000000L +#define DAGB7_WR_DATA_DAGB_LAZY_TIMER1__CLIENT15_MASK 0xF0000000L +//DAGB7_WR_VC0_CNTL +#define DAGB7_WR_VC0_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC0_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC0_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC0_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC0_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC0_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC0_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC0_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC0_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC0_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC0_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC0_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC0_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC0_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC0_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC0_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_VC1_CNTL +#define DAGB7_WR_VC1_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC1_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC1_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC1_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC1_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC1_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC1_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC1_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC1_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC1_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC1_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC1_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC1_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC1_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC1_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC1_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_VC2_CNTL +#define DAGB7_WR_VC2_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC2_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC2_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC2_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC2_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC2_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC2_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC2_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC2_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC2_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC2_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC2_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC2_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC2_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC2_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC2_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_VC3_CNTL +#define DAGB7_WR_VC3_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC3_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC3_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC3_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC3_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC3_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC3_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC3_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC3_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC3_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC3_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC3_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC3_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC3_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC3_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC3_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_VC4_CNTL +#define DAGB7_WR_VC4_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC4_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC4_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC4_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC4_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC4_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC4_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC4_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC4_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC4_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC4_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC4_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC4_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC4_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC4_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC4_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_VC5_CNTL +#define DAGB7_WR_VC5_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC5_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC5_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC5_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC5_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC5_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC5_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC5_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC5_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC5_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC5_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC5_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC5_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC5_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC5_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC5_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_VC6_CNTL +#define DAGB7_WR_VC6_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC6_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC6_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC6_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC6_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC6_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC6_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC6_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC6_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC6_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC6_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC6_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC6_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC6_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC6_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC6_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_VC7_CNTL +#define DAGB7_WR_VC7_CNTL__STOR_CREDIT__SHIFT 0x0 +#define DAGB7_WR_VC7_CNTL__EA_CREDIT__SHIFT 0x5 +#define DAGB7_WR_VC7_CNTL__MAX_BW_ENABLE__SHIFT 0xb +#define DAGB7_WR_VC7_CNTL__MAX_BW__SHIFT 0xc +#define DAGB7_WR_VC7_CNTL__MIN_BW_ENABLE__SHIFT 0x14 +#define DAGB7_WR_VC7_CNTL__MIN_BW__SHIFT 0x15 +#define DAGB7_WR_VC7_CNTL__OSD_LIMITER_ENABLE__SHIFT 0x18 +#define DAGB7_WR_VC7_CNTL__MAX_OSD__SHIFT 0x19 +#define DAGB7_WR_VC7_CNTL__STOR_CREDIT_MASK 0x0000001FL +#define DAGB7_WR_VC7_CNTL__EA_CREDIT_MASK 0x000007E0L +#define DAGB7_WR_VC7_CNTL__MAX_BW_ENABLE_MASK 0x00000800L +#define DAGB7_WR_VC7_CNTL__MAX_BW_MASK 0x000FF000L +#define DAGB7_WR_VC7_CNTL__MIN_BW_ENABLE_MASK 0x00100000L +#define DAGB7_WR_VC7_CNTL__MIN_BW_MASK 0x00E00000L +#define DAGB7_WR_VC7_CNTL__OSD_LIMITER_ENABLE_MASK 0x01000000L +#define DAGB7_WR_VC7_CNTL__MAX_OSD_MASK 0xFE000000L +//DAGB7_WR_CNTL_MISC +#define DAGB7_WR_CNTL_MISC__STOR_POOL_CREDIT__SHIFT 0x0 +#define DAGB7_WR_CNTL_MISC__EA_POOL_CREDIT__SHIFT 0x6 +#define DAGB7_WR_CNTL_MISC__IO_EA_CREDIT__SHIFT 0xd +#define DAGB7_WR_CNTL_MISC__STOR_CC_LEGACY_MODE__SHIFT 0x13 +#define DAGB7_WR_CNTL_MISC__EA_CC_LEGACY_MODE__SHIFT 0x14 +#define DAGB7_WR_CNTL_MISC__UTCL2_CID__SHIFT 0x15 +#define DAGB7_WR_CNTL_MISC__RDRET_FIFO_CREDITS__SHIFT 0x1a +#define DAGB7_WR_CNTL_MISC__STOR_POOL_CREDIT_MASK 0x0000003FL +#define DAGB7_WR_CNTL_MISC__EA_POOL_CREDIT_MASK 0x00001FC0L +#define DAGB7_WR_CNTL_MISC__IO_EA_CREDIT_MASK 0x0007E000L +#define DAGB7_WR_CNTL_MISC__STOR_CC_LEGACY_MODE_MASK 0x00080000L +#define DAGB7_WR_CNTL_MISC__EA_CC_LEGACY_MODE_MASK 0x00100000L +#define DAGB7_WR_CNTL_MISC__UTCL2_CID_MASK 0x03E00000L +#define DAGB7_WR_CNTL_MISC__RDRET_FIFO_CREDITS_MASK 0xFC000000L +//DAGB7_WR_TLB_CREDIT +#define DAGB7_WR_TLB_CREDIT__TLB0__SHIFT 0x0 +#define DAGB7_WR_TLB_CREDIT__TLB1__SHIFT 0x5 +#define DAGB7_WR_TLB_CREDIT__TLB2__SHIFT 0xa +#define DAGB7_WR_TLB_CREDIT__TLB3__SHIFT 0xf +#define DAGB7_WR_TLB_CREDIT__TLB4__SHIFT 0x14 +#define DAGB7_WR_TLB_CREDIT__TLB5__SHIFT 0x19 +#define DAGB7_WR_TLB_CREDIT__TLB0_MASK 0x0000001FL +#define DAGB7_WR_TLB_CREDIT__TLB1_MASK 0x000003E0L +#define DAGB7_WR_TLB_CREDIT__TLB2_MASK 0x00007C00L +#define DAGB7_WR_TLB_CREDIT__TLB3_MASK 0x000F8000L +#define DAGB7_WR_TLB_CREDIT__TLB4_MASK 0x01F00000L +#define DAGB7_WR_TLB_CREDIT__TLB5_MASK 0x3E000000L +//DAGB7_WR_DATA_CREDIT +#define DAGB7_WR_DATA_CREDIT__DLOCK_VC_CREDITS__SHIFT 0x0 +#define DAGB7_WR_DATA_CREDIT__LARGE_BURST_CREDITS__SHIFT 0x8 +#define DAGB7_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS__SHIFT 0x10 +#define DAGB7_WR_DATA_CREDIT__SMALL_BURST_CREDITS__SHIFT 0x18 +#define DAGB7_WR_DATA_CREDIT__DLOCK_VC_CREDITS_MASK 0x000000FFL +#define DAGB7_WR_DATA_CREDIT__LARGE_BURST_CREDITS_MASK 0x0000FF00L +#define DAGB7_WR_DATA_CREDIT__MIDDLE_BURST_CREDITS_MASK 0x00FF0000L +#define DAGB7_WR_DATA_CREDIT__SMALL_BURST_CREDITS_MASK 0xFF000000L +//DAGB7_WR_MISC_CREDIT +#define DAGB7_WR_MISC_CREDIT__ATOMIC_CREDIT__SHIFT 0x0 +#define DAGB7_WR_MISC_CREDIT__DLOCK_VC_NUM__SHIFT 0x6 +#define DAGB7_WR_MISC_CREDIT__OSD_CREDIT__SHIFT 0x9 +#define DAGB7_WR_MISC_CREDIT__OSD_DLOCK_CREDIT__SHIFT 0x10 +#define DAGB7_WR_MISC_CREDIT__ATOMIC_CREDIT_MASK 0x0000003FL +#define DAGB7_WR_MISC_CREDIT__DLOCK_VC_NUM_MASK 0x000001C0L +#define DAGB7_WR_MISC_CREDIT__OSD_CREDIT_MASK 0x0000FE00L +#define DAGB7_WR_MISC_CREDIT__OSD_DLOCK_CREDIT_MASK 0x007F0000L +//DAGB7_WRCLI_ASK_PENDING +#define DAGB7_WRCLI_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_GO_PENDING +#define DAGB7_WRCLI_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_GBLSEND_PENDING +#define DAGB7_WRCLI_GBLSEND_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_GBLSEND_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_TLB_PENDING +#define DAGB7_WRCLI_TLB_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_TLB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_OARB_PENDING +#define DAGB7_WRCLI_OARB_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_OARB_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_OSD_PENDING +#define DAGB7_WRCLI_OSD_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_OSD_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_DBUS_ASK_PENDING +#define DAGB7_WRCLI_DBUS_ASK_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_DBUS_ASK_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_WRCLI_DBUS_GO_PENDING +#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY__SHIFT 0x0 +#define DAGB7_WRCLI_DBUS_GO_PENDING__BUSY_MASK 0xFFFFFFFFL +//DAGB7_DAGB_DLY +#define DAGB7_DAGB_DLY__DLY__SHIFT 0x0 +#define DAGB7_DAGB_DLY__CLI__SHIFT 0x8 +#define DAGB7_DAGB_DLY__POS__SHIFT 0x10 +#define DAGB7_DAGB_DLY__DLY_MASK 0x000000FFL +#define DAGB7_DAGB_DLY__CLI_MASK 0x0000FF00L +#define DAGB7_DAGB_DLY__POS_MASK 0x000F0000L +//DAGB7_CNTL_MISC +#define DAGB7_CNTL_MISC__EA_VC0_REMAP__SHIFT 0x0 +#define DAGB7_CNTL_MISC__EA_VC1_REMAP__SHIFT 0x3 +#define DAGB7_CNTL_MISC__EA_VC2_REMAP__SHIFT 0x6 +#define DAGB7_CNTL_MISC__EA_VC3_REMAP__SHIFT 0x9 +#define DAGB7_CNTL_MISC__EA_VC4_REMAP__SHIFT 0xc +#define DAGB7_CNTL_MISC__EA_VC5_REMAP__SHIFT 0xf +#define DAGB7_CNTL_MISC__EA_VC6_REMAP__SHIFT 0x12 +#define DAGB7_CNTL_MISC__EA_VC7_REMAP__SHIFT 0x15 +#define DAGB7_CNTL_MISC__BW_INIT_CYCLE__SHIFT 0x18 +#define DAGB7_CNTL_MISC__BW_RW_GAP_CYCLE__SHIFT 0x1e +#define DAGB7_CNTL_MISC__EA_VC0_REMAP_MASK 0x00000007L +#define DAGB7_CNTL_MISC__EA_VC1_REMAP_MASK 0x00000038L +#define DAGB7_CNTL_MISC__EA_VC2_REMAP_MASK 0x000001C0L +#define DAGB7_CNTL_MISC__EA_VC3_REMAP_MASK 0x00000E00L +#define DAGB7_CNTL_MISC__EA_VC4_REMAP_MASK 0x00007000L +#define DAGB7_CNTL_MISC__EA_VC5_REMAP_MASK 0x00038000L +#define DAGB7_CNTL_MISC__EA_VC6_REMAP_MASK 0x001C0000L +#define DAGB7_CNTL_MISC__EA_VC7_REMAP_MASK 0x00E00000L +#define DAGB7_CNTL_MISC__BW_INIT_CYCLE_MASK 0x3F000000L +#define DAGB7_CNTL_MISC__BW_RW_GAP_CYCLE_MASK 0xC0000000L +//DAGB7_CNTL_MISC2 +#define DAGB7_CNTL_MISC2__URG_BOOST_ENABLE__SHIFT 0x0 +#define DAGB7_CNTL_MISC2__URG_HALT_ENABLE__SHIFT 0x1 +#define DAGB7_CNTL_MISC2__DISABLE_WRREQ_CG__SHIFT 0x2 +#define DAGB7_CNTL_MISC2__DISABLE_WRRET_CG__SHIFT 0x3 +#define DAGB7_CNTL_MISC2__DISABLE_RDREQ_CG__SHIFT 0x4 +#define DAGB7_CNTL_MISC2__DISABLE_RDRET_CG__SHIFT 0x5 +#define DAGB7_CNTL_MISC2__DISABLE_TLBWR_CG__SHIFT 0x6 +#define DAGB7_CNTL_MISC2__DISABLE_TLBRD_CG__SHIFT 0x7 +#define DAGB7_CNTL_MISC2__DISABLE_EAWRREQ_BUSY__SHIFT 0x8 +#define DAGB7_CNTL_MISC2__DISABLE_EARDREQ_BUSY__SHIFT 0x9 +#define DAGB7_CNTL_MISC2__SWAP_CTL__SHIFT 0xa +#define DAGB7_CNTL_MISC2__RDRET_FIFO_PERF__SHIFT 0xb +#define DAGB7_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS__SHIFT 0x11 +#define DAGB7_CNTL_MISC2__URG_BOOST_ENABLE_MASK 0x00000001L +#define DAGB7_CNTL_MISC2__URG_HALT_ENABLE_MASK 0x00000002L +#define DAGB7_CNTL_MISC2__DISABLE_WRREQ_CG_MASK 0x00000004L +#define DAGB7_CNTL_MISC2__DISABLE_WRRET_CG_MASK 0x00000008L +#define DAGB7_CNTL_MISC2__DISABLE_RDREQ_CG_MASK 0x00000010L +#define DAGB7_CNTL_MISC2__DISABLE_RDRET_CG_MASK 0x00000020L +#define DAGB7_CNTL_MISC2__DISABLE_TLBWR_CG_MASK 0x00000040L +#define DAGB7_CNTL_MISC2__DISABLE_TLBRD_CG_MASK 0x00000080L +#define DAGB7_CNTL_MISC2__DISABLE_EAWRREQ_BUSY_MASK 0x00000100L +#define DAGB7_CNTL_MISC2__DISABLE_EARDREQ_BUSY_MASK 0x00000200L +#define DAGB7_CNTL_MISC2__SWAP_CTL_MASK 0x00000400L +#define DAGB7_CNTL_MISC2__RDRET_FIFO_PERF_MASK 0x00000800L +#define DAGB7_CNTL_MISC2__RDRET_FIFO_DLOCK_CREDITS_MASK 0x007E0000L +//DAGB7_FIFO_EMPTY +#define DAGB7_FIFO_EMPTY__EMPTY__SHIFT 0x0 +#define DAGB7_FIFO_EMPTY__EMPTY_MASK 0x00FFFFFFL +//DAGB7_FIFO_FULL +#define DAGB7_FIFO_FULL__FULL__SHIFT 0x0 +#define DAGB7_FIFO_FULL__FULL_MASK 0x007FFFFFL +//DAGB7_WR_CREDITS_FULL +#define DAGB7_WR_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB7_WR_CREDITS_FULL__FULL_MASK 0x1FFFFFFFL +//DAGB7_RD_CREDITS_FULL +#define DAGB7_RD_CREDITS_FULL__FULL__SHIFT 0x0 +#define DAGB7_RD_CREDITS_FULL__FULL_MASK 0x0003FFFFL +//DAGB7_PERFCOUNTER_LO +#define DAGB7_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define DAGB7_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//DAGB7_PERFCOUNTER_HI +#define DAGB7_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define DAGB7_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define DAGB7_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define DAGB7_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//DAGB7_PERFCOUNTER0_CFG +#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB7_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB7_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define DAGB7_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB7_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB7_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB7_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define DAGB7_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//DAGB7_PERFCOUNTER1_CFG +#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB7_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB7_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define DAGB7_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB7_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB7_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB7_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define DAGB7_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//DAGB7_PERFCOUNTER2_CFG +#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define DAGB7_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define DAGB7_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define DAGB7_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define DAGB7_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define DAGB7_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define DAGB7_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define DAGB7_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//DAGB7_PERFCOUNTER_RSLT_CNTL +#define DAGB7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define DAGB7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define DAGB7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define DAGB7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define DAGB7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define DAGB7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define DAGB7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define DAGB7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define DAGB7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//DAGB7_RESERVE0 +#define DAGB7_RESERVE0__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE0__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE1 +#define DAGB7_RESERVE1__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE1__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE2 +#define DAGB7_RESERVE2__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE2__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE3 +#define DAGB7_RESERVE3__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE3__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE4 +#define DAGB7_RESERVE4__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE4__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE5 +#define DAGB7_RESERVE5__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE5__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE6 +#define DAGB7_RESERVE6__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE6__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE7 +#define DAGB7_RESERVE7__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE7__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE8 +#define DAGB7_RESERVE8__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE8__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE9 +#define DAGB7_RESERVE9__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE9__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE10 +#define DAGB7_RESERVE10__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE10__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE11 +#define DAGB7_RESERVE11__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE11__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE12 +#define DAGB7_RESERVE12__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE12__RESERVE_MASK 0xFFFFFFFFL +//DAGB7_RESERVE13 +#define DAGB7_RESERVE13__RESERVE__SHIFT 0x0 +#define DAGB7_RESERVE13__RESERVE_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_ea_mmeadec5 +//MMEA5_DRAM_RD_CLI2GRP_MAP0 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA5_DRAM_RD_CLI2GRP_MAP1 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA5_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA5_DRAM_WR_CLI2GRP_MAP0 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA5_DRAM_WR_CLI2GRP_MAP1 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA5_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA5_DRAM_RD_GRP2VC_MAP +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA5_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA5_DRAM_WR_GRP2VC_MAP +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA5_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA5_DRAM_RD_LAZY +#define MMEA5_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA5_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA5_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA5_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA5_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA5_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA5_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA5_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA5_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA5_DRAM_WR_LAZY +#define MMEA5_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA5_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA5_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA5_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA5_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA5_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA5_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA5_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA5_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA5_DRAM_RD_CAM_CNTL +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA5_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA5_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA5_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA5_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA5_DRAM_WR_CAM_CNTL +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA5_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA5_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA5_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA5_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA5_DRAM_PAGE_BURST +#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA5_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA5_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA5_DRAM_RD_PRI_AGE +#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA5_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA5_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA5_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA5_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA5_DRAM_WR_PRI_AGE +#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA5_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA5_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA5_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA5_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA5_DRAM_RD_PRI_QUEUING +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA5_DRAM_WR_PRI_QUEUING +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA5_DRAM_RD_PRI_FIXED +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA5_DRAM_WR_PRI_FIXED +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA5_DRAM_RD_PRI_URGENCY +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA5_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA5_DRAM_WR_PRI_URGENCY +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA5_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA5_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_GMI_RD_CLI2GRP_MAP0 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA5_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA5_GMI_RD_CLI2GRP_MAP1 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA5_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA5_GMI_WR_CLI2GRP_MAP0 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA5_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA5_GMI_WR_CLI2GRP_MAP1 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA5_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA5_GMI_RD_GRP2VC_MAP +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA5_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA5_GMI_WR_GRP2VC_MAP +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA5_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA5_GMI_RD_LAZY +#define MMEA5_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA5_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA5_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA5_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA5_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA5_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA5_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA5_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA5_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA5_GMI_WR_LAZY +#define MMEA5_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA5_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA5_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA5_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA5_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA5_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA5_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA5_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA5_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA5_GMI_RD_CAM_CNTL +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA5_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA5_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA5_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA5_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA5_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA5_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA5_GMI_WR_CAM_CNTL +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA5_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA5_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA5_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA5_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA5_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA5_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA5_GMI_PAGE_BURST +#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA5_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA5_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA5_GMI_RD_PRI_AGE +#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA5_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA5_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA5_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA5_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA5_GMI_WR_PRI_AGE +#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA5_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA5_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA5_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA5_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA5_GMI_RD_PRI_QUEUING +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA5_GMI_WR_PRI_QUEUING +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA5_GMI_RD_PRI_FIXED +#define MMEA5_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA5_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA5_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA5_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA5_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA5_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA5_GMI_WR_PRI_FIXED +#define MMEA5_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA5_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA5_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA5_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA5_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA5_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA5_GMI_RD_PRI_URGENCY +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA5_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA5_GMI_WR_PRI_URGENCY +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA5_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA5_GMI_RD_PRI_URGENCY_MASKING +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA5_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA5_GMI_WR_PRI_URGENCY_MASKING +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA5_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA5_GMI_RD_PRI_QUANT_PRI1 +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_GMI_RD_PRI_QUANT_PRI2 +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_GMI_RD_PRI_QUANT_PRI3 +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_GMI_WR_PRI_QUANT_PRI1 +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_GMI_WR_PRI_QUANT_PRI2 +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_GMI_WR_PRI_QUANT_PRI3 +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_ADDRNORM_BASE_ADDR0 +#define MMEA5_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA5_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA5_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA5_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA5_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA5_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_LIMIT_ADDR0 +#define MMEA5_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA5_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA5_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_BASE_ADDR1 +#define MMEA5_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA5_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA5_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA5_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA5_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA5_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_LIMIT_ADDR1 +#define MMEA5_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA5_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA5_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_OFFSET_ADDR1 +#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA5_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA5_ADDRNORM_BASE_ADDR2 +#define MMEA5_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA5_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA5_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA5_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA5_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA5_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_LIMIT_ADDR2 +#define MMEA5_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA5_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA5_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_BASE_ADDR3 +#define MMEA5_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA5_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA5_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA5_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA5_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA5_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_LIMIT_ADDR3 +#define MMEA5_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA5_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA5_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_OFFSET_ADDR3 +#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA5_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA5_ADDRNORM_BASE_ADDR4 +#define MMEA5_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA5_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA5_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA5_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA5_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA5_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_LIMIT_ADDR4 +#define MMEA5_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA5_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA5_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_BASE_ADDR5 +#define MMEA5_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA5_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA5_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA5_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA5_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA5_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_LIMIT_ADDR5 +#define MMEA5_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA5_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA5_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA5_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA5_ADDRNORM_OFFSET_ADDR5 +#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA5_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA5_ADDRNORMDRAM_HOLE_CNTL +#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA5_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA5_ADDRNORMGMI_HOLE_CNTL +#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA5_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA5_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA5_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA5_ADDRDEC_BANK_CFG +#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA5_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA5_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA5_ADDRDEC_MISC_CFG +#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA5_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA5_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA5_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA5_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA5_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA5_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA5_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA5_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECGMI_ADDR_HASH_PC +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA5_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA5_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDECGMI_HARVEST_ENABLE +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA5_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA5_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA5_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA5_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA5_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA5_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA5_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA5_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA5_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA5_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA5_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA5_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA5_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA5_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA5_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA5_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA5_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA5_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA5_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA5_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA5_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA5_ADDRDEC0_RM_SEL_CS01 +#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC0_RM_SEL_CS23 +#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA5_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA5_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA5_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA5_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA5_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA5_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA5_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA5_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA5_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA5_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA5_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA5_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA5_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA5_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA5_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA5_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA5_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA5_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA5_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA5_ADDRDEC1_RM_SEL_CS01 +#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC1_RM_SEL_CS23 +#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA5_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA5_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA5_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA5_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA5_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA5_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA5_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA5_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA5_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA5_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA5_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA5_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA5_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA5_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA5_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA5_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA5_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA5_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA5_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA5_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA5_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA5_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA5_ADDRDEC2_RM_SEL_CS01 +#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC2_RM_SEL_CS23 +#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA5_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA5_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA5_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA5_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA5_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA5_IO_RD_CLI2GRP_MAP0 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA5_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA5_IO_RD_CLI2GRP_MAP1 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA5_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA5_IO_WR_CLI2GRP_MAP0 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA5_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA5_IO_WR_CLI2GRP_MAP1 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA5_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA5_IO_RD_COMBINE_FLUSH +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA5_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA5_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA5_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA5_IO_WR_COMBINE_FLUSH +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA5_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA5_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA5_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA5_IO_GROUP_BURST +#define MMEA5_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA5_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA5_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA5_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA5_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA5_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA5_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA5_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA5_IO_RD_PRI_AGE +#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA5_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA5_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA5_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA5_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA5_IO_WR_PRI_AGE +#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA5_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA5_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA5_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA5_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA5_IO_RD_PRI_QUEUING +#define MMEA5_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA5_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA5_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA5_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA5_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA5_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA5_IO_WR_PRI_QUEUING +#define MMEA5_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA5_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA5_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA5_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA5_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA5_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA5_IO_RD_PRI_FIXED +#define MMEA5_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA5_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA5_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA5_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA5_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA5_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA5_IO_WR_PRI_FIXED +#define MMEA5_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA5_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA5_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA5_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA5_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA5_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA5_IO_RD_PRI_URGENCY +#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA5_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA5_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA5_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA5_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA5_IO_WR_PRI_URGENCY +#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA5_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA5_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA5_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA5_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA5_IO_RD_PRI_URGENCY_MASKING +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA5_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA5_IO_WR_PRI_URGENCY_MASKING +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA5_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA5_IO_RD_PRI_QUANT_PRI1 +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_IO_RD_PRI_QUANT_PRI2 +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_IO_RD_PRI_QUANT_PRI3 +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_IO_WR_PRI_QUANT_PRI1 +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_IO_WR_PRI_QUANT_PRI2 +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_IO_WR_PRI_QUANT_PRI3 +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA5_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA5_SDP_ARB_DRAM +#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA5_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA5_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA5_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA5_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA5_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA5_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA5_SDP_ARB_GMI +#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA5_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA5_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA5_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA5_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA5_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA5_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA5_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA5_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA5_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA5_SDP_ARB_FINAL +#define MMEA5_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA5_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA5_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA5_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA5_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA5_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA5_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA5_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA5_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA5_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA5_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA5_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA5_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA5_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA5_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA5_SDP_DRAM_PRIORITY +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA5_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA5_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA5_SDP_GMI_PRIORITY +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA5_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA5_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA5_SDP_IO_PRIORITY +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA5_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA5_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA5_SDP_CREDITS +#define MMEA5_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA5_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA5_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA5_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA5_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA5_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA5_SDP_TAG_RESERVE0 +#define MMEA5_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA5_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA5_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA5_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA5_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA5_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA5_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA5_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA5_SDP_TAG_RESERVE1 +#define MMEA5_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA5_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA5_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA5_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA5_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA5_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA5_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA5_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA5_SDP_VCC_RESERVE0 +#define MMEA5_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA5_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA5_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA5_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA5_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA5_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA5_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA5_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA5_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA5_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA5_SDP_VCC_RESERVE1 +#define MMEA5_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA5_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA5_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA5_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA5_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA5_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA5_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA5_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA5_SDP_VCD_RESERVE0 +#define MMEA5_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA5_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA5_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA5_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA5_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA5_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA5_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA5_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA5_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA5_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA5_SDP_VCD_RESERVE1 +#define MMEA5_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA5_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA5_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA5_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA5_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA5_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA5_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA5_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA5_SDP_REQ_CNTL +#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA5_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA5_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA5_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA5_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA5_MISC +#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA5_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA5_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA5_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA5_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA5_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA5_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA5_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA5_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA5_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA5_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA5_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA5_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA5_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA5_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA5_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA5_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA5_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA5_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA5_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA5_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA5_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA5_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA5_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA5_LATENCY_SAMPLING +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA5_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA5_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA5_PERFCOUNTER_LO +#define MMEA5_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA5_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA5_PERFCOUNTER_HI +#define MMEA5_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA5_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA5_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA5_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA5_PERFCOUNTER0_CFG +#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA5_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA5_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA5_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA5_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA5_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA5_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA5_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA5_PERFCOUNTER1_CFG +#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA5_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA5_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA5_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA5_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA5_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA5_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA5_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA5_PERFCOUNTER_RSLT_CNTL +#define MMEA5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA5_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA5_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA5_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA5_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA5_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA5_EDC_CNT +#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA5_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA5_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA5_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA5_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA5_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA5_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA5_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA5_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA5_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA5_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA5_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA5_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA5_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA5_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA5_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA5_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA5_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA5_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA5_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA5_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA5_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA5_EDC_CNT2 +#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA5_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA5_DSM_CNTL +#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA5_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA5_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA5_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA5_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA5_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA5_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA5_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA5_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA5_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA5_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA5_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA5_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA5_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA5_DSM_CNTLA +#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA5_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA5_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA5_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA5_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA5_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA5_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA5_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA5_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA5_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA5_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA5_DSM_CNTL2 +#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA5_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA5_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA5_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA5_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA5_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA5_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA5_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA5_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA5_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA5_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA5_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA5_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA5_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA5_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA5_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA5_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA5_DSM_CNTL2A +#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA5_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA5_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA5_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA5_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA5_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA5_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA5_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA5_CGTT_CLK_CTRL +#define MMEA5_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA5_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA5_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA5_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA5_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA5_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA5_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA5_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA5_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA5_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA5_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA5_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA5_EDC_MODE +#define MMEA5_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA5_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA5_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA5_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA5_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA5_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA5_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA5_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA5_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA5_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA5_ERR_STATUS +#define MMEA5_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA5_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA5_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA5_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA5_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA5_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA5_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA5_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA5_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA5_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA5_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA5_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA5_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA5_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA5_MISC2 +#define MMEA5_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA5_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA5_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA5_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA5_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA5_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA5_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA5_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA5_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA5_ADDRDEC_SELECT +#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA5_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA5_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA5_EDC_CNT3 +#define MMEA5_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA5_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA5_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA5_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA5_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA5_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA5_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA5_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA5_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA5_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA5_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA5_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA5_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA5_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_ea_mmeadec6 +//MMEA6_DRAM_RD_CLI2GRP_MAP0 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA6_DRAM_RD_CLI2GRP_MAP1 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA6_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA6_DRAM_WR_CLI2GRP_MAP0 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA6_DRAM_WR_CLI2GRP_MAP1 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA6_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA6_DRAM_RD_GRP2VC_MAP +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA6_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA6_DRAM_WR_GRP2VC_MAP +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA6_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA6_DRAM_RD_LAZY +#define MMEA6_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA6_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA6_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA6_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA6_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA6_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA6_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA6_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA6_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA6_DRAM_WR_LAZY +#define MMEA6_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA6_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA6_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA6_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA6_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA6_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA6_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA6_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA6_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA6_DRAM_RD_CAM_CNTL +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA6_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA6_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA6_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA6_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA6_DRAM_WR_CAM_CNTL +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA6_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA6_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA6_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA6_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA6_DRAM_PAGE_BURST +#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA6_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA6_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA6_DRAM_RD_PRI_AGE +#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA6_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA6_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA6_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA6_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA6_DRAM_WR_PRI_AGE +#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA6_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA6_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA6_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA6_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA6_DRAM_RD_PRI_QUEUING +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA6_DRAM_WR_PRI_QUEUING +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA6_DRAM_RD_PRI_FIXED +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA6_DRAM_WR_PRI_FIXED +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA6_DRAM_RD_PRI_URGENCY +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA6_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA6_DRAM_WR_PRI_URGENCY +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA6_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA6_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_GMI_RD_CLI2GRP_MAP0 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA6_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA6_GMI_RD_CLI2GRP_MAP1 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA6_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA6_GMI_WR_CLI2GRP_MAP0 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA6_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA6_GMI_WR_CLI2GRP_MAP1 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA6_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA6_GMI_RD_GRP2VC_MAP +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA6_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA6_GMI_WR_GRP2VC_MAP +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA6_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA6_GMI_RD_LAZY +#define MMEA6_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA6_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA6_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA6_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA6_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA6_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA6_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA6_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA6_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA6_GMI_WR_LAZY +#define MMEA6_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA6_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA6_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA6_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA6_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA6_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA6_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA6_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA6_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA6_GMI_RD_CAM_CNTL +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA6_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA6_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA6_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA6_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA6_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA6_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA6_GMI_WR_CAM_CNTL +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA6_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA6_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA6_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA6_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA6_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA6_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA6_GMI_PAGE_BURST +#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA6_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA6_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA6_GMI_RD_PRI_AGE +#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA6_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA6_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA6_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA6_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA6_GMI_WR_PRI_AGE +#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA6_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA6_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA6_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA6_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA6_GMI_RD_PRI_QUEUING +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA6_GMI_WR_PRI_QUEUING +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA6_GMI_RD_PRI_FIXED +#define MMEA6_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA6_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA6_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA6_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA6_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA6_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA6_GMI_WR_PRI_FIXED +#define MMEA6_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA6_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA6_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA6_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA6_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA6_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA6_GMI_RD_PRI_URGENCY +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA6_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA6_GMI_WR_PRI_URGENCY +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA6_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA6_GMI_RD_PRI_URGENCY_MASKING +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA6_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA6_GMI_WR_PRI_URGENCY_MASKING +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA6_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA6_GMI_RD_PRI_QUANT_PRI1 +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_GMI_RD_PRI_QUANT_PRI2 +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_GMI_RD_PRI_QUANT_PRI3 +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_GMI_WR_PRI_QUANT_PRI1 +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_GMI_WR_PRI_QUANT_PRI2 +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_GMI_WR_PRI_QUANT_PRI3 +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_ADDRNORM_BASE_ADDR0 +#define MMEA6_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA6_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA6_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA6_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA6_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA6_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_LIMIT_ADDR0 +#define MMEA6_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA6_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA6_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_BASE_ADDR1 +#define MMEA6_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA6_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA6_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA6_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA6_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA6_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_LIMIT_ADDR1 +#define MMEA6_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA6_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA6_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_OFFSET_ADDR1 +#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA6_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA6_ADDRNORM_BASE_ADDR2 +#define MMEA6_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA6_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA6_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA6_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA6_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA6_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_LIMIT_ADDR2 +#define MMEA6_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA6_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA6_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_BASE_ADDR3 +#define MMEA6_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA6_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA6_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA6_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA6_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA6_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_LIMIT_ADDR3 +#define MMEA6_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA6_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA6_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_OFFSET_ADDR3 +#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA6_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA6_ADDRNORM_BASE_ADDR4 +#define MMEA6_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA6_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA6_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA6_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA6_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA6_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_LIMIT_ADDR4 +#define MMEA6_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA6_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA6_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_BASE_ADDR5 +#define MMEA6_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA6_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA6_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA6_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA6_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA6_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_LIMIT_ADDR5 +#define MMEA6_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA6_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA6_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA6_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA6_ADDRNORM_OFFSET_ADDR5 +#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA6_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA6_ADDRNORMDRAM_HOLE_CNTL +#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA6_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA6_ADDRNORMGMI_HOLE_CNTL +#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA6_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA6_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA6_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA6_ADDRDEC_BANK_CFG +#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA6_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA6_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA6_ADDRDEC_MISC_CFG +#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA6_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA6_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA6_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA6_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA6_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA6_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA6_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA6_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECGMI_ADDR_HASH_PC +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA6_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA6_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDECGMI_HARVEST_ENABLE +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA6_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA6_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA6_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA6_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA6_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA6_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA6_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA6_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA6_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA6_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA6_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA6_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA6_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA6_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA6_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA6_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA6_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA6_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA6_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA6_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA6_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA6_ADDRDEC0_RM_SEL_CS01 +#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC0_RM_SEL_CS23 +#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA6_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA6_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA6_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA6_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA6_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA6_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA6_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA6_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA6_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA6_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA6_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA6_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA6_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA6_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA6_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA6_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA6_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA6_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA6_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA6_ADDRDEC1_RM_SEL_CS01 +#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC1_RM_SEL_CS23 +#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA6_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA6_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA6_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA6_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA6_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA6_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA6_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA6_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA6_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA6_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA6_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA6_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA6_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA6_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA6_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA6_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA6_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA6_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA6_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA6_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA6_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA6_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA6_ADDRDEC2_RM_SEL_CS01 +#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC2_RM_SEL_CS23 +#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA6_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA6_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA6_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA6_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA6_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA6_IO_RD_CLI2GRP_MAP0 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA6_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA6_IO_RD_CLI2GRP_MAP1 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA6_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA6_IO_WR_CLI2GRP_MAP0 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA6_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA6_IO_WR_CLI2GRP_MAP1 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA6_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA6_IO_RD_COMBINE_FLUSH +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA6_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA6_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA6_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA6_IO_WR_COMBINE_FLUSH +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA6_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA6_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA6_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA6_IO_GROUP_BURST +#define MMEA6_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA6_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA6_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA6_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA6_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA6_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA6_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA6_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA6_IO_RD_PRI_AGE +#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA6_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA6_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA6_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA6_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA6_IO_WR_PRI_AGE +#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA6_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA6_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA6_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA6_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA6_IO_RD_PRI_QUEUING +#define MMEA6_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA6_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA6_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA6_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA6_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA6_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA6_IO_WR_PRI_QUEUING +#define MMEA6_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA6_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA6_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA6_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA6_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA6_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA6_IO_RD_PRI_FIXED +#define MMEA6_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA6_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA6_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA6_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA6_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA6_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA6_IO_WR_PRI_FIXED +#define MMEA6_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA6_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA6_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA6_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA6_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA6_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA6_IO_RD_PRI_URGENCY +#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA6_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA6_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA6_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA6_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA6_IO_WR_PRI_URGENCY +#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA6_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA6_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA6_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA6_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA6_IO_RD_PRI_URGENCY_MASKING +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA6_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA6_IO_WR_PRI_URGENCY_MASKING +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA6_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA6_IO_RD_PRI_QUANT_PRI1 +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_IO_RD_PRI_QUANT_PRI2 +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_IO_RD_PRI_QUANT_PRI3 +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_IO_WR_PRI_QUANT_PRI1 +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_IO_WR_PRI_QUANT_PRI2 +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_IO_WR_PRI_QUANT_PRI3 +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA6_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA6_SDP_ARB_DRAM +#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA6_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA6_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA6_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA6_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA6_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA6_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA6_SDP_ARB_GMI +#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA6_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA6_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA6_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA6_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA6_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA6_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA6_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA6_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA6_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA6_SDP_ARB_FINAL +#define MMEA6_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA6_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA6_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA6_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA6_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA6_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA6_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA6_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA6_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA6_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA6_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA6_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA6_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA6_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA6_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA6_SDP_DRAM_PRIORITY +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA6_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA6_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA6_SDP_GMI_PRIORITY +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA6_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA6_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA6_SDP_IO_PRIORITY +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA6_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA6_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA6_SDP_CREDITS +#define MMEA6_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA6_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA6_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA6_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA6_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA6_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA6_SDP_TAG_RESERVE0 +#define MMEA6_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA6_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA6_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA6_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA6_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA6_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA6_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA6_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA6_SDP_TAG_RESERVE1 +#define MMEA6_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA6_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA6_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA6_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA6_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA6_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA6_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA6_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA6_SDP_VCC_RESERVE0 +#define MMEA6_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA6_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA6_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA6_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA6_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA6_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA6_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA6_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA6_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA6_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA6_SDP_VCC_RESERVE1 +#define MMEA6_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA6_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA6_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA6_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA6_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA6_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA6_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA6_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA6_SDP_VCD_RESERVE0 +#define MMEA6_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA6_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA6_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA6_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA6_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA6_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA6_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA6_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA6_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA6_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA6_SDP_VCD_RESERVE1 +#define MMEA6_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA6_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA6_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA6_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA6_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA6_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA6_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA6_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA6_SDP_REQ_CNTL +#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA6_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA6_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA6_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA6_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA6_MISC +#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA6_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA6_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA6_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA6_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA6_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA6_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA6_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA6_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA6_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA6_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA6_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA6_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA6_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA6_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA6_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA6_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA6_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA6_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA6_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA6_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA6_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA6_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA6_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA6_LATENCY_SAMPLING +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA6_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA6_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA6_PERFCOUNTER_LO +#define MMEA6_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA6_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA6_PERFCOUNTER_HI +#define MMEA6_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA6_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA6_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA6_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA6_PERFCOUNTER0_CFG +#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA6_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA6_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA6_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA6_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA6_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA6_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA6_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA6_PERFCOUNTER1_CFG +#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA6_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA6_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA6_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA6_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA6_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA6_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA6_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA6_PERFCOUNTER_RSLT_CNTL +#define MMEA6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA6_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA6_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA6_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA6_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA6_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA6_EDC_CNT +#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA6_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA6_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA6_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA6_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA6_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA6_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA6_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA6_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA6_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA6_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA6_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA6_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA6_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA6_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA6_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA6_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA6_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA6_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA6_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA6_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA6_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA6_EDC_CNT2 +#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA6_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA6_DSM_CNTL +#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA6_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA6_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA6_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA6_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA6_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA6_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA6_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA6_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA6_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA6_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA6_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA6_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA6_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA6_DSM_CNTLA +#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA6_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA6_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA6_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA6_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA6_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA6_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA6_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA6_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA6_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA6_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA6_DSM_CNTL2 +#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA6_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA6_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA6_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA6_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA6_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA6_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA6_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA6_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA6_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA6_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA6_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA6_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA6_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA6_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA6_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA6_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA6_DSM_CNTL2A +#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA6_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA6_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA6_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA6_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA6_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA6_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA6_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA6_CGTT_CLK_CTRL +#define MMEA6_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA6_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA6_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA6_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA6_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA6_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA6_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA6_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA6_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA6_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA6_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA6_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA6_EDC_MODE +#define MMEA6_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA6_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA6_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA6_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA6_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA6_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA6_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA6_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA6_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA6_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA6_ERR_STATUS +#define MMEA6_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA6_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA6_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA6_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA6_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA6_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA6_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA6_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA6_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA6_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA6_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA6_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA6_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA6_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA6_MISC2 +#define MMEA6_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA6_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA6_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA6_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA6_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA6_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA6_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA6_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA6_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA6_ADDRDEC_SELECT +#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA6_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA6_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA6_EDC_CNT3 +#define MMEA6_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA6_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA6_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA6_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA6_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA6_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA6_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA6_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA6_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA6_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA6_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA6_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA6_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA6_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_ea_mmeadec7 +//MMEA7_DRAM_RD_CLI2GRP_MAP0 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA7_DRAM_RD_CLI2GRP_MAP1 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA7_DRAM_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA7_DRAM_WR_CLI2GRP_MAP0 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA7_DRAM_WR_CLI2GRP_MAP1 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA7_DRAM_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA7_DRAM_RD_GRP2VC_MAP +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA7_DRAM_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA7_DRAM_WR_GRP2VC_MAP +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA7_DRAM_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA7_DRAM_RD_LAZY +#define MMEA7_DRAM_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA7_DRAM_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA7_DRAM_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA7_DRAM_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA7_DRAM_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA7_DRAM_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA7_DRAM_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA7_DRAM_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA7_DRAM_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA7_DRAM_WR_LAZY +#define MMEA7_DRAM_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA7_DRAM_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA7_DRAM_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA7_DRAM_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA7_DRAM_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA7_DRAM_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA7_DRAM_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA7_DRAM_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA7_DRAM_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA7_DRAM_RD_CAM_CNTL +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA7_DRAM_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA7_DRAM_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA7_DRAM_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA7_DRAM_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA7_DRAM_WR_CAM_CNTL +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA7_DRAM_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA7_DRAM_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA7_DRAM_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA7_DRAM_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +//MMEA7_DRAM_PAGE_BURST +#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA7_DRAM_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA7_DRAM_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA7_DRAM_RD_PRI_AGE +#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA7_DRAM_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA7_DRAM_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA7_DRAM_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA7_DRAM_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA7_DRAM_WR_PRI_AGE +#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA7_DRAM_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA7_DRAM_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA7_DRAM_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA7_DRAM_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA7_DRAM_RD_PRI_QUEUING +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_DRAM_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA7_DRAM_WR_PRI_QUEUING +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_DRAM_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA7_DRAM_RD_PRI_FIXED +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_DRAM_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA7_DRAM_WR_PRI_FIXED +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_DRAM_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA7_DRAM_RD_PRI_URGENCY +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA7_DRAM_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA7_DRAM_WR_PRI_URGENCY +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA7_DRAM_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA7_DRAM_RD_PRI_QUANT_PRI1 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_DRAM_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_DRAM_RD_PRI_QUANT_PRI2 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_DRAM_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_DRAM_RD_PRI_QUANT_PRI3 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_DRAM_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_DRAM_WR_PRI_QUANT_PRI1 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_DRAM_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_DRAM_WR_PRI_QUANT_PRI2 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_DRAM_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_DRAM_WR_PRI_QUANT_PRI3 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_DRAM_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_GMI_RD_CLI2GRP_MAP0 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA7_GMI_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA7_GMI_RD_CLI2GRP_MAP1 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA7_GMI_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA7_GMI_WR_CLI2GRP_MAP0 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA7_GMI_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA7_GMI_WR_CLI2GRP_MAP1 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA7_GMI_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA7_GMI_RD_GRP2VC_MAP +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA7_GMI_RD_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA7_GMI_WR_GRP2VC_MAP +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP0_VC__SHIFT 0x0 +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP1_VC__SHIFT 0x3 +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP2_VC__SHIFT 0x6 +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP3_VC__SHIFT 0x9 +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP0_VC_MASK 0x00000007L +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP1_VC_MASK 0x00000038L +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP2_VC_MASK 0x000001C0L +#define MMEA7_GMI_WR_GRP2VC_MAP__GROUP3_VC_MASK 0x00000E00L +//MMEA7_GMI_RD_LAZY +#define MMEA7_GMI_RD_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA7_GMI_RD_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA7_GMI_RD_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA7_GMI_RD_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA7_GMI_RD_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA7_GMI_RD_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA7_GMI_RD_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA7_GMI_RD_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA7_GMI_RD_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA7_GMI_WR_LAZY +#define MMEA7_GMI_WR_LAZY__GROUP0_DELAY__SHIFT 0x0 +#define MMEA7_GMI_WR_LAZY__GROUP1_DELAY__SHIFT 0x3 +#define MMEA7_GMI_WR_LAZY__GROUP2_DELAY__SHIFT 0x6 +#define MMEA7_GMI_WR_LAZY__GROUP3_DELAY__SHIFT 0x9 +#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_THRESH__SHIFT 0xc +#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT__SHIFT 0x14 +#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX__SHIFT 0x1b +#define MMEA7_GMI_WR_LAZY__GROUP0_DELAY_MASK 0x00000007L +#define MMEA7_GMI_WR_LAZY__GROUP1_DELAY_MASK 0x00000038L +#define MMEA7_GMI_WR_LAZY__GROUP2_DELAY_MASK 0x000001C0L +#define MMEA7_GMI_WR_LAZY__GROUP3_DELAY_MASK 0x00000E00L +#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_THRESH_MASK 0x0003F000L +#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_TIMEOUT_MASK 0x07F00000L +#define MMEA7_GMI_WR_LAZY__REQ_ACCUM_IDLEMAX_MASK 0x78000000L +//MMEA7_GMI_RD_CAM_CNTL +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA7_GMI_RD_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA7_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA7_GMI_RD_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA7_GMI_RD_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA7_GMI_RD_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA7_GMI_RD_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA7_GMI_WR_CAM_CNTL +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP0__SHIFT 0x0 +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP1__SHIFT 0x4 +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP2__SHIFT 0x8 +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP3__SHIFT 0xc +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0__SHIFT 0x10 +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1__SHIFT 0x13 +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2__SHIFT 0x16 +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3__SHIFT 0x19 +#define MMEA7_GMI_WR_CAM_CNTL__REFILL_CHAIN__SHIFT 0x1c +#define MMEA7_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING__SHIFT 0x1d +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP0_MASK 0x0000000FL +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP1_MASK 0x000000F0L +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP2_MASK 0x00000F00L +#define MMEA7_GMI_WR_CAM_CNTL__DEPTH_GROUP3_MASK 0x0000F000L +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP0_MASK 0x00070000L +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP1_MASK 0x00380000L +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP2_MASK 0x01C00000L +#define MMEA7_GMI_WR_CAM_CNTL__REORDER_LIMIT_GROUP3_MASK 0x0E000000L +#define MMEA7_GMI_WR_CAM_CNTL__REFILL_CHAIN_MASK 0x10000000L +#define MMEA7_GMI_WR_CAM_CNTL__PAGEBASED_CHAINING_MASK 0x20000000L +//MMEA7_GMI_PAGE_BURST +#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA7_GMI_PAGE_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA7_GMI_PAGE_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA7_GMI_RD_PRI_AGE +#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA7_GMI_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA7_GMI_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA7_GMI_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA7_GMI_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA7_GMI_WR_PRI_AGE +#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA7_GMI_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA7_GMI_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA7_GMI_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA7_GMI_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA7_GMI_RD_PRI_QUEUING +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_GMI_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA7_GMI_WR_PRI_QUEUING +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_GMI_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA7_GMI_RD_PRI_FIXED +#define MMEA7_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA7_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA7_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA7_GMI_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA7_GMI_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA7_GMI_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_GMI_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA7_GMI_WR_PRI_FIXED +#define MMEA7_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA7_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA7_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA7_GMI_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA7_GMI_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA7_GMI_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_GMI_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA7_GMI_RD_PRI_URGENCY +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA7_GMI_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA7_GMI_WR_PRI_URGENCY +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA7_GMI_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA7_GMI_RD_PRI_URGENCY_MASKING +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA7_GMI_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA7_GMI_WR_PRI_URGENCY_MASKING +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA7_GMI_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA7_GMI_RD_PRI_QUANT_PRI1 +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_GMI_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_GMI_RD_PRI_QUANT_PRI2 +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_GMI_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_GMI_RD_PRI_QUANT_PRI3 +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_GMI_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_GMI_WR_PRI_QUANT_PRI1 +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_GMI_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_GMI_WR_PRI_QUANT_PRI2 +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_GMI_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_GMI_WR_PRI_QUANT_PRI3 +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_GMI_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_ADDRNORM_BASE_ADDR0 +#define MMEA7_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA7_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA7_ADDRNORM_BASE_ADDR0__BASE_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_BASE_ADDR0__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA7_ADDRNORM_BASE_ADDR0__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA7_ADDRNORM_BASE_ADDR0__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA7_ADDRNORM_BASE_ADDR0__BASE_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_LIMIT_ADDR0 +#define MMEA7_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA7_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_LIMIT_ADDR0__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA7_ADDRNORM_LIMIT_ADDR0__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_BASE_ADDR1 +#define MMEA7_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA7_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA7_ADDRNORM_BASE_ADDR1__BASE_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_BASE_ADDR1__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA7_ADDRNORM_BASE_ADDR1__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA7_ADDRNORM_BASE_ADDR1__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA7_ADDRNORM_BASE_ADDR1__BASE_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_LIMIT_ADDR1 +#define MMEA7_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA7_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_LIMIT_ADDR1__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA7_ADDRNORM_LIMIT_ADDR1__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_OFFSET_ADDR1 +#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA7_ADDRNORM_OFFSET_ADDR1__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA7_ADDRNORM_BASE_ADDR2 +#define MMEA7_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA7_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA7_ADDRNORM_BASE_ADDR2__BASE_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_BASE_ADDR2__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA7_ADDRNORM_BASE_ADDR2__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA7_ADDRNORM_BASE_ADDR2__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA7_ADDRNORM_BASE_ADDR2__BASE_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_LIMIT_ADDR2 +#define MMEA7_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA7_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_LIMIT_ADDR2__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA7_ADDRNORM_LIMIT_ADDR2__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_BASE_ADDR3 +#define MMEA7_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA7_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA7_ADDRNORM_BASE_ADDR3__BASE_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_BASE_ADDR3__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA7_ADDRNORM_BASE_ADDR3__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA7_ADDRNORM_BASE_ADDR3__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA7_ADDRNORM_BASE_ADDR3__BASE_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_LIMIT_ADDR3 +#define MMEA7_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA7_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_LIMIT_ADDR3__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA7_ADDRNORM_LIMIT_ADDR3__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_OFFSET_ADDR3 +#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA7_ADDRNORM_OFFSET_ADDR3__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA7_ADDRNORM_BASE_ADDR4 +#define MMEA7_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA7_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA7_ADDRNORM_BASE_ADDR4__BASE_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_BASE_ADDR4__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA7_ADDRNORM_BASE_ADDR4__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA7_ADDRNORM_BASE_ADDR4__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA7_ADDRNORM_BASE_ADDR4__BASE_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_LIMIT_ADDR4 +#define MMEA7_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA7_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_LIMIT_ADDR4__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA7_ADDRNORM_LIMIT_ADDR4__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_BASE_ADDR5 +#define MMEA7_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL__SHIFT 0x0 +#define MMEA7_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN__SHIFT 0x1 +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN__SHIFT 0x2 +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES__SHIFT 0x6 +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS__SHIFT 0x8 +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL__SHIFT 0x9 +#define MMEA7_ADDRNORM_BASE_ADDR5__BASE_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_BASE_ADDR5__ADDR_RNG_VAL_MASK 0x00000001L +#define MMEA7_ADDRNORM_BASE_ADDR5__LGCY_MMIO_HOLE_EN_MASK 0x00000002L +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_CHAN_MASK 0x0000003CL +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_DIES_MASK 0x000000C0L +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_NUM_SOCKETS_MASK 0x00000100L +#define MMEA7_ADDRNORM_BASE_ADDR5__INTLV_ADDR_SEL_MASK 0x00000E00L +#define MMEA7_ADDRNORM_BASE_ADDR5__BASE_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_LIMIT_ADDR5 +#define MMEA7_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID__SHIFT 0x0 +#define MMEA7_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR__SHIFT 0xc +#define MMEA7_ADDRNORM_LIMIT_ADDR5__DST_FABRIC_ID_MASK 0x0000001FL +#define MMEA7_ADDRNORM_LIMIT_ADDR5__LIMIT_ADDR_MASK 0xFFFFF000L +//MMEA7_ADDRNORM_OFFSET_ADDR5 +#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN__SHIFT 0x0 +#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET__SHIFT 0x14 +#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_EN_MASK 0x00000001L +#define MMEA7_ADDRNORM_OFFSET_ADDR5__HI_ADDR_OFFSET_MASK 0xFFF00000L +//MMEA7_ADDRNORMDRAM_HOLE_CNTL +#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA7_ADDRNORMDRAM_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA7_ADDRNORMGMI_HOLE_CNTL +#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID__SHIFT 0x0 +#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET__SHIFT 0x7 +#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_VALID_MASK 0x00000001L +#define MMEA7_ADDRNORMGMI_HOLE_CNTL__DRAM_HOLE_OFFSET_MASK 0x0000FF80L +//MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG +#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0__SHIFT 0x0 +#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1__SHIFT 0x6 +#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE0_MASK 0x0000003FL +#define MMEA7_ADDRNORMDRAM_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE1_MASK 0x00000FC0L +//MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG +#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2__SHIFT 0x0 +#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3__SHIFT 0x6 +#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE2_MASK 0x0000003FL +#define MMEA7_ADDRNORMGMI_NP2_CHANNEL_CFG__LOG2_ADDR64K_SPACE3_MASK 0x00000FC0L +//MMEA7_ADDRDEC_BANK_CFG +#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_DRAM__SHIFT 0x0 +#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_GMI__SHIFT 0x6 +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM__SHIFT 0xc +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI__SHIFT 0xf +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM__SHIFT 0x12 +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI__SHIFT 0x13 +#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_DRAM_MASK 0x0000003FL +#define MMEA7_ADDRDEC_BANK_CFG__BANK_MASK_GMI_MASK 0x00000FC0L +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_DRAM_MASK 0x00007000L +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_SEL_GMI_MASK 0x00038000L +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_DRAM_MASK 0x00040000L +#define MMEA7_ADDRDEC_BANK_CFG__BANKGROUP_INTERLEAVE_GMI_MASK 0x00080000L +//MMEA7_ADDRDEC_MISC_CFG +#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN0__SHIFT 0x0 +#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN1__SHIFT 0x1 +#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN2__SHIFT 0x2 +#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_DRAM__SHIFT 0x8 +#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_GMI__SHIFT 0x9 +#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_DRAM__SHIFT 0xc +#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_GMI__SHIFT 0x11 +#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_DRAM__SHIFT 0x16 +#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_GMI__SHIFT 0x18 +#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_DRAM__SHIFT 0x1a +#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_GMI__SHIFT 0x1d +#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN0_MASK 0x00000001L +#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN1_MASK 0x00000002L +#define MMEA7_ADDRDEC_MISC_CFG__VCM_EN2_MASK 0x00000004L +#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_DRAM_MASK 0x00000100L +#define MMEA7_ADDRDEC_MISC_CFG__PCH_MASK_GMI_MASK 0x00000200L +#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_DRAM_MASK 0x0001F000L +#define MMEA7_ADDRDEC_MISC_CFG__CH_MASK_GMI_MASK 0x003E0000L +#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_DRAM_MASK 0x00C00000L +#define MMEA7_ADDRDEC_MISC_CFG__CS_MASK_GMI_MASK 0x03000000L +#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_DRAM_MASK 0x1C000000L +#define MMEA7_ADDRDEC_MISC_CFG__RM_MASK_GMI_MASK 0xE0000000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECDRAM_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_PC +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECDRAM_ADDR_HASH_PC2 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA7_ADDRDECDRAM_ADDR_HASH_CS0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDECDRAM_ADDR_HASH_CS1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDECDRAM_HARVEST_ENABLE +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA7_ADDRDECDRAM_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA7_ADDRDECGMI_ADDR_HASH_BANK0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK0__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECGMI_ADDR_HASH_BANK1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK1__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECGMI_ADDR_HASH_BANK2 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK2__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECGMI_ADDR_HASH_BANK3 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK3__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECGMI_ADDR_HASH_BANK4 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK4__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECGMI_ADDR_HASH_BANK5 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECGMI_ADDR_HASH_BANK5__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECGMI_ADDR_HASH_PC +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__COL_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR__SHIFT 0xe +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__COL_XOR_MASK 0x00003FFEL +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC__ROW_XOR_MASK 0xFFFFC000L +//MMEA7_ADDRDECGMI_ADDR_HASH_PC2 +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_PC2__BANK_XOR_MASK 0x0000003FL +//MMEA7_ADDRDECGMI_ADDR_HASH_CS0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS0__NA_XOR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDECGMI_ADDR_HASH_CS1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__XOR_ENABLE_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_ADDR_HASH_CS1__NA_XOR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDECGMI_HARVEST_ENABLE +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN__SHIFT 0x0 +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL__SHIFT 0x1 +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN__SHIFT 0x2 +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL__SHIFT 0x3 +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN__SHIFT 0x4 +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL__SHIFT 0x5 +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_EN_MASK 0x00000001L +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B3_VAL_MASK 0x00000002L +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_EN_MASK 0x00000004L +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B4_VAL_MASK 0x00000008L +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_EN_MASK 0x00000010L +#define MMEA7_ADDRDECGMI_HARVEST_ENABLE__FORCE_B5_VAL_MASK 0x00000020L +//MMEA7_ADDRDEC0_BASE_ADDR_CS0 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_BASE_ADDR_CS1 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_BASE_ADDR_CS2 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_BASE_ADDR_CS3 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_BASE_ADDR_SECCS0 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_BASE_ADDR_SECCS1 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_BASE_ADDR_SECCS2 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_BASE_ADDR_SECCS3 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC0_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_ADDR_MASK_CS01 +#define MMEA7_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC0_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_ADDR_MASK_CS23 +#define MMEA7_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC0_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_ADDR_MASK_SECCS01 +#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_ADDR_MASK_SECCS23 +#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC0_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC0_ADDR_CFG_CS01 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA7_ADDRDEC0_ADDR_CFG_CS23 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA7_ADDRDEC0_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA7_ADDRDEC0_ADDR_SEL_CS01 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA7_ADDRDEC0_ADDR_SEL_CS23 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA7_ADDRDEC0_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA7_ADDRDEC0_ADDR_SEL2_CS01 +#define MMEA7_ADDRDEC0_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA7_ADDRDEC0_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA7_ADDRDEC0_ADDR_SEL2_CS23 +#define MMEA7_ADDRDEC0_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA7_ADDRDEC0_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA7_ADDRDEC0_COL_SEL_LO_CS01 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA7_ADDRDEC0_COL_SEL_LO_CS23 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA7_ADDRDEC0_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA7_ADDRDEC0_COL_SEL_HI_CS01 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA7_ADDRDEC0_COL_SEL_HI_CS23 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA7_ADDRDEC0_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA7_ADDRDEC0_RM_SEL_CS01 +#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC0_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC0_RM_SEL_CS23 +#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC0_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC0_RM_SEL_SECCS01 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC0_RM_SEL_SECCS23 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC0_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC1_BASE_ADDR_CS0 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_BASE_ADDR_CS1 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_BASE_ADDR_CS2 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_BASE_ADDR_CS3 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_BASE_ADDR_SECCS0 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_BASE_ADDR_SECCS1 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_BASE_ADDR_SECCS2 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_BASE_ADDR_SECCS3 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC1_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_ADDR_MASK_CS01 +#define MMEA7_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC1_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_ADDR_MASK_CS23 +#define MMEA7_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC1_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_ADDR_MASK_SECCS01 +#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_ADDR_MASK_SECCS23 +#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC1_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC1_ADDR_CFG_CS01 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA7_ADDRDEC1_ADDR_CFG_CS23 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA7_ADDRDEC1_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA7_ADDRDEC1_ADDR_SEL_CS01 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA7_ADDRDEC1_ADDR_SEL_CS23 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA7_ADDRDEC1_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA7_ADDRDEC1_ADDR_SEL2_CS01 +#define MMEA7_ADDRDEC1_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA7_ADDRDEC1_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA7_ADDRDEC1_ADDR_SEL2_CS23 +#define MMEA7_ADDRDEC1_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA7_ADDRDEC1_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA7_ADDRDEC1_COL_SEL_LO_CS01 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA7_ADDRDEC1_COL_SEL_LO_CS23 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA7_ADDRDEC1_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA7_ADDRDEC1_COL_SEL_HI_CS01 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA7_ADDRDEC1_COL_SEL_HI_CS23 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA7_ADDRDEC1_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA7_ADDRDEC1_RM_SEL_CS01 +#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC1_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC1_RM_SEL_CS23 +#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC1_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC1_RM_SEL_SECCS01 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC1_RM_SEL_SECCS23 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC1_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC2_BASE_ADDR_CS0 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_CS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_BASE_ADDR_CS1 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_CS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_BASE_ADDR_CS2 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_CS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_BASE_ADDR_CS3 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_CS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_BASE_ADDR_SECCS0 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS0__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_BASE_ADDR_SECCS1 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS1__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_BASE_ADDR_SECCS2 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS2__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_BASE_ADDR_SECCS3 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN__SHIFT 0x0 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR__SHIFT 0x1 +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__CS_EN_MASK 0x00000001L +#define MMEA7_ADDRDEC2_BASE_ADDR_SECCS3__BASE_ADDR_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_ADDR_MASK_CS01 +#define MMEA7_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC2_ADDR_MASK_CS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_ADDR_MASK_CS23 +#define MMEA7_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC2_ADDR_MASK_CS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_ADDR_MASK_SECCS01 +#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS01__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_ADDR_MASK_SECCS23 +#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK__SHIFT 0x1 +#define MMEA7_ADDRDEC2_ADDR_MASK_SECCS23__ADDR_MASK_MASK 0xFFFFFFFEL +//MMEA7_ADDRDEC2_ADDR_CFG_CS01 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_RM__SHIFT 0x4 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO__SHIFT 0x8 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI__SHIFT 0xc +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_COL__SHIFT 0x10 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS__SHIFT 0x14 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN__SHIFT 0x1f +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_RM_MASK 0x00000030L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_COL_MASK 0x000F0000L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__NUM_BANKS_MASK 0x00300000L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS01__HI_COL_EN_MASK 0x80000000L +//MMEA7_ADDRDEC2_ADDR_CFG_CS23 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS__SHIFT 0x1 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_RM__SHIFT 0x4 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO__SHIFT 0x8 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI__SHIFT 0xc +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_COL__SHIFT 0x10 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS__SHIFT 0x14 +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN__SHIFT 0x1f +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANK_GROUPS_MASK 0x0000000EL +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_RM_MASK 0x00000030L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_LO_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_ROW_HI_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_COL_MASK 0x000F0000L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__NUM_BANKS_MASK 0x00300000L +#define MMEA7_ADDRDEC2_ADDR_CFG_CS23__HI_COL_EN_MASK 0x80000000L +//MMEA7_ADDRDEC2_ADDR_SEL_CS01 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK3__SHIFT 0xc +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK4__SHIFT 0x10 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_LO__SHIFT 0x18 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_HI__SHIFT 0x1c +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK3_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__BANK4_MASK 0x001F0000L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_LO_MASK 0x0F000000L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS01__ROW_HI_MASK 0xF0000000L +//MMEA7_ADDRDEC2_ADDR_SEL_CS23 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK3__SHIFT 0xc +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK4__SHIFT 0x10 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_LO__SHIFT 0x18 +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_HI__SHIFT 0x1c +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK3_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__BANK4_MASK 0x001F0000L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_LO_MASK 0x0F000000L +#define MMEA7_ADDRDEC2_ADDR_SEL_CS23__ROW_HI_MASK 0xF0000000L +//MMEA7_ADDRDEC2_ADDR_SEL2_CS01 +#define MMEA7_ADDRDEC2_ADDR_SEL2_CS01__BANK5__SHIFT 0x0 +#define MMEA7_ADDRDEC2_ADDR_SEL2_CS01__BANK5_MASK 0x0000001FL +//MMEA7_ADDRDEC2_ADDR_SEL2_CS23 +#define MMEA7_ADDRDEC2_ADDR_SEL2_CS23__BANK5__SHIFT 0x0 +#define MMEA7_ADDRDEC2_ADDR_SEL2_CS23__BANK5_MASK 0x0000001FL +//MMEA7_ADDRDEC2_COL_SEL_LO_CS01 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL3__SHIFT 0xc +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL4__SHIFT 0x10 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL5__SHIFT 0x14 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL6__SHIFT 0x18 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL7__SHIFT 0x1c +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL3_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL4_MASK 0x000F0000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL5_MASK 0x00F00000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL6_MASK 0x0F000000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS01__COL7_MASK 0xF0000000L +//MMEA7_ADDRDEC2_COL_SEL_LO_CS23 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL3__SHIFT 0xc +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL4__SHIFT 0x10 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL5__SHIFT 0x14 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL6__SHIFT 0x18 +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL7__SHIFT 0x1c +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL3_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL4_MASK 0x000F0000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL5_MASK 0x00F00000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL6_MASK 0x0F000000L +#define MMEA7_ADDRDEC2_COL_SEL_LO_CS23__COL7_MASK 0xF0000000L +//MMEA7_ADDRDEC2_COL_SEL_HI_CS01 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL8__SHIFT 0x0 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL9__SHIFT 0x4 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL10__SHIFT 0x8 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL11__SHIFT 0xc +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL12__SHIFT 0x10 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL13__SHIFT 0x14 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL14__SHIFT 0x18 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL15__SHIFT 0x1c +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL8_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL9_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL10_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL11_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL12_MASK 0x000F0000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL13_MASK 0x00F00000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL14_MASK 0x0F000000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS01__COL15_MASK 0xF0000000L +//MMEA7_ADDRDEC2_COL_SEL_HI_CS23 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL8__SHIFT 0x0 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL9__SHIFT 0x4 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL10__SHIFT 0x8 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL11__SHIFT 0xc +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL12__SHIFT 0x10 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL13__SHIFT 0x14 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL14__SHIFT 0x18 +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL15__SHIFT 0x1c +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL8_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL9_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL10_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL11_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL12_MASK 0x000F0000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL13_MASK 0x00F00000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL14_MASK 0x0F000000L +#define MMEA7_ADDRDEC2_COL_SEL_HI_CS23__COL15_MASK 0xF0000000L +//MMEA7_ADDRDEC2_RM_SEL_CS01 +#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_RM_SEL_CS01__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_RM_SEL_CS01__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_RM_SEL_CS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC2_RM_SEL_CS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC2_RM_SEL_CS23 +#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_RM_SEL_CS23__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_RM_SEL_CS23__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_RM_SEL_CS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC2_RM_SEL_CS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC2_RM_SEL_SECCS01 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS01__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRDEC2_RM_SEL_SECCS23 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM0__SHIFT 0x0 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM1__SHIFT 0x4 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM2__SHIFT 0x8 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT__SHIFT 0xc +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN__SHIFT 0x10 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD__SHIFT 0x12 +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM0_MASK 0x0000000FL +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM1_MASK 0x000000F0L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__RM2_MASK 0x00000F00L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__CHAN_BIT_MASK 0x0000F000L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_EVEN_MASK 0x00030000L +#define MMEA7_ADDRDEC2_RM_SEL_SECCS23__INVERT_ROW_MSBS_ODD_MASK 0x000C0000L +//MMEA7_ADDRNORMDRAM_GLOBAL_CNTL +#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA7_ADDRNORMDRAM_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA7_ADDRNORMGMI_GLOBAL_CNTL +#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K__SHIFT 0x14 +#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M__SHIFT 0x15 +#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G__SHIFT 0x16 +#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_64K_MASK 0x00100000L +#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_2M_MASK 0x00200000L +#define MMEA7_ADDRNORMGMI_GLOBAL_CNTL__GLB_HASH_INTLV_CTL_1G_MASK 0x00400000L +//MMEA7_IO_RD_CLI2GRP_MAP0 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA7_IO_RD_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA7_IO_RD_CLI2GRP_MAP1 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA7_IO_RD_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA7_IO_WR_CLI2GRP_MAP0 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID0_GROUP__SHIFT 0x0 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID1_GROUP__SHIFT 0x2 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID2_GROUP__SHIFT 0x4 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID3_GROUP__SHIFT 0x6 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID4_GROUP__SHIFT 0x8 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID5_GROUP__SHIFT 0xa +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID6_GROUP__SHIFT 0xc +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID7_GROUP__SHIFT 0xe +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID8_GROUP__SHIFT 0x10 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID9_GROUP__SHIFT 0x12 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID10_GROUP__SHIFT 0x14 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID11_GROUP__SHIFT 0x16 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID12_GROUP__SHIFT 0x18 +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID13_GROUP__SHIFT 0x1a +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID14_GROUP__SHIFT 0x1c +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID15_GROUP__SHIFT 0x1e +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID0_GROUP_MASK 0x00000003L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID1_GROUP_MASK 0x0000000CL +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID2_GROUP_MASK 0x00000030L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID3_GROUP_MASK 0x000000C0L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID4_GROUP_MASK 0x00000300L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID5_GROUP_MASK 0x00000C00L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID6_GROUP_MASK 0x00003000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID7_GROUP_MASK 0x0000C000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID8_GROUP_MASK 0x00030000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID9_GROUP_MASK 0x000C0000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID10_GROUP_MASK 0x00300000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID11_GROUP_MASK 0x00C00000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID12_GROUP_MASK 0x03000000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID13_GROUP_MASK 0x0C000000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID14_GROUP_MASK 0x30000000L +#define MMEA7_IO_WR_CLI2GRP_MAP0__CID15_GROUP_MASK 0xC0000000L +//MMEA7_IO_WR_CLI2GRP_MAP1 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID16_GROUP__SHIFT 0x0 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID17_GROUP__SHIFT 0x2 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID18_GROUP__SHIFT 0x4 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID19_GROUP__SHIFT 0x6 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID20_GROUP__SHIFT 0x8 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID21_GROUP__SHIFT 0xa +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID22_GROUP__SHIFT 0xc +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID23_GROUP__SHIFT 0xe +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID24_GROUP__SHIFT 0x10 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID25_GROUP__SHIFT 0x12 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID26_GROUP__SHIFT 0x14 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID27_GROUP__SHIFT 0x16 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID28_GROUP__SHIFT 0x18 +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID29_GROUP__SHIFT 0x1a +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID30_GROUP__SHIFT 0x1c +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID31_GROUP__SHIFT 0x1e +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID16_GROUP_MASK 0x00000003L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID17_GROUP_MASK 0x0000000CL +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID18_GROUP_MASK 0x00000030L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID19_GROUP_MASK 0x000000C0L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID20_GROUP_MASK 0x00000300L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID21_GROUP_MASK 0x00000C00L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID22_GROUP_MASK 0x00003000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID23_GROUP_MASK 0x0000C000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID24_GROUP_MASK 0x00030000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID25_GROUP_MASK 0x000C0000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID26_GROUP_MASK 0x00300000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID27_GROUP_MASK 0x00C00000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID28_GROUP_MASK 0x03000000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID29_GROUP_MASK 0x0C000000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID30_GROUP_MASK 0x30000000L +#define MMEA7_IO_WR_CLI2GRP_MAP1__CID31_GROUP_MASK 0xC0000000L +//MMEA7_IO_RD_COMBINE_FLUSH +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA7_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA7_IO_RD_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA7_IO_RD_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA7_IO_WR_COMBINE_FLUSH +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP0_TIMER__SHIFT 0x0 +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP1_TIMER__SHIFT 0x4 +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP2_TIMER__SHIFT 0x8 +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP3_TIMER__SHIFT 0xc +#define MMEA7_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY__SHIFT 0x10 +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP0_TIMER_MASK 0x0000000FL +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP1_TIMER_MASK 0x000000F0L +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP2_TIMER_MASK 0x00000F00L +#define MMEA7_IO_WR_COMBINE_FLUSH__GROUP3_TIMER_MASK 0x0000F000L +#define MMEA7_IO_WR_COMBINE_FLUSH__FORWARD_COMB_ONLY_MASK 0x00010000L +//MMEA7_IO_GROUP_BURST +#define MMEA7_IO_GROUP_BURST__RD_LIMIT_LO__SHIFT 0x0 +#define MMEA7_IO_GROUP_BURST__RD_LIMIT_HI__SHIFT 0x8 +#define MMEA7_IO_GROUP_BURST__WR_LIMIT_LO__SHIFT 0x10 +#define MMEA7_IO_GROUP_BURST__WR_LIMIT_HI__SHIFT 0x18 +#define MMEA7_IO_GROUP_BURST__RD_LIMIT_LO_MASK 0x000000FFL +#define MMEA7_IO_GROUP_BURST__RD_LIMIT_HI_MASK 0x0000FF00L +#define MMEA7_IO_GROUP_BURST__WR_LIMIT_LO_MASK 0x00FF0000L +#define MMEA7_IO_GROUP_BURST__WR_LIMIT_HI_MASK 0xFF000000L +//MMEA7_IO_RD_PRI_AGE +#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA7_IO_RD_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA7_IO_RD_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA7_IO_RD_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA7_IO_RD_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA7_IO_WR_PRI_AGE +#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGING_RATE__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGING_RATE__SHIFT 0x3 +#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGING_RATE__SHIFT 0x6 +#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGING_RATE__SHIFT 0x9 +#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT__SHIFT 0xc +#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT__SHIFT 0xf +#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT__SHIFT 0x12 +#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT__SHIFT 0x15 +#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGING_RATE_MASK 0x00000007L +#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGING_RATE_MASK 0x00000038L +#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGING_RATE_MASK 0x000001C0L +#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGING_RATE_MASK 0x00000E00L +#define MMEA7_IO_WR_PRI_AGE__GROUP0_AGE_COEFFICIENT_MASK 0x00007000L +#define MMEA7_IO_WR_PRI_AGE__GROUP1_AGE_COEFFICIENT_MASK 0x00038000L +#define MMEA7_IO_WR_PRI_AGE__GROUP2_AGE_COEFFICIENT_MASK 0x001C0000L +#define MMEA7_IO_WR_PRI_AGE__GROUP3_AGE_COEFFICIENT_MASK 0x00E00000L +//MMEA7_IO_RD_PRI_QUEUING +#define MMEA7_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA7_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA7_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA7_IO_RD_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA7_IO_RD_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA7_IO_RD_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_IO_RD_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA7_IO_WR_PRI_QUEUING +#define MMEA7_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT__SHIFT 0x3 +#define MMEA7_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT__SHIFT 0x6 +#define MMEA7_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT__SHIFT 0x9 +#define MMEA7_IO_WR_PRI_QUEUING__GROUP0_QUEUING_COEFFICIENT_MASK 0x00000007L +#define MMEA7_IO_WR_PRI_QUEUING__GROUP1_QUEUING_COEFFICIENT_MASK 0x00000038L +#define MMEA7_IO_WR_PRI_QUEUING__GROUP2_QUEUING_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_IO_WR_PRI_QUEUING__GROUP3_QUEUING_COEFFICIENT_MASK 0x00000E00L +//MMEA7_IO_RD_PRI_FIXED +#define MMEA7_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA7_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA7_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA7_IO_RD_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA7_IO_RD_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA7_IO_RD_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_IO_RD_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA7_IO_WR_PRI_FIXED +#define MMEA7_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT__SHIFT 0x3 +#define MMEA7_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT__SHIFT 0x6 +#define MMEA7_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT__SHIFT 0x9 +#define MMEA7_IO_WR_PRI_FIXED__GROUP0_FIXED_COEFFICIENT_MASK 0x00000007L +#define MMEA7_IO_WR_PRI_FIXED__GROUP1_FIXED_COEFFICIENT_MASK 0x00000038L +#define MMEA7_IO_WR_PRI_FIXED__GROUP2_FIXED_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_IO_WR_PRI_FIXED__GROUP3_FIXED_COEFFICIENT_MASK 0x00000E00L +//MMEA7_IO_RD_PRI_URGENCY +#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA7_IO_RD_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA7_IO_RD_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA7_IO_RD_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA7_IO_RD_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA7_IO_WR_PRI_URGENCY +#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT__SHIFT 0x3 +#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT__SHIFT 0x6 +#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT__SHIFT 0x9 +#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE__SHIFT 0xc +#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE__SHIFT 0xd +#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE__SHIFT 0xe +#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE__SHIFT 0xf +#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_COEFFICIENT_MASK 0x00000007L +#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_COEFFICIENT_MASK 0x00000038L +#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_COEFFICIENT_MASK 0x000001C0L +#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_COEFFICIENT_MASK 0x00000E00L +#define MMEA7_IO_WR_PRI_URGENCY__GROUP0_URGENCY_MODE_MASK 0x00001000L +#define MMEA7_IO_WR_PRI_URGENCY__GROUP1_URGENCY_MODE_MASK 0x00002000L +#define MMEA7_IO_WR_PRI_URGENCY__GROUP2_URGENCY_MODE_MASK 0x00004000L +#define MMEA7_IO_WR_PRI_URGENCY__GROUP3_URGENCY_MODE_MASK 0x00008000L +//MMEA7_IO_RD_PRI_URGENCY_MASKING +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA7_IO_RD_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA7_IO_WR_PRI_URGENCY_MASKING +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID0_MASK__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID1_MASK__SHIFT 0x1 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID2_MASK__SHIFT 0x2 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID3_MASK__SHIFT 0x3 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID4_MASK__SHIFT 0x4 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID5_MASK__SHIFT 0x5 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID6_MASK__SHIFT 0x6 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID7_MASK__SHIFT 0x7 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID8_MASK__SHIFT 0x8 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID9_MASK__SHIFT 0x9 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID10_MASK__SHIFT 0xa +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID11_MASK__SHIFT 0xb +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID12_MASK__SHIFT 0xc +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID13_MASK__SHIFT 0xd +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID14_MASK__SHIFT 0xe +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID15_MASK__SHIFT 0xf +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID16_MASK__SHIFT 0x10 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID17_MASK__SHIFT 0x11 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID18_MASK__SHIFT 0x12 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID19_MASK__SHIFT 0x13 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID20_MASK__SHIFT 0x14 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID21_MASK__SHIFT 0x15 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID22_MASK__SHIFT 0x16 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID23_MASK__SHIFT 0x17 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID24_MASK__SHIFT 0x18 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID25_MASK__SHIFT 0x19 +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID26_MASK__SHIFT 0x1a +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID27_MASK__SHIFT 0x1b +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID28_MASK__SHIFT 0x1c +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID29_MASK__SHIFT 0x1d +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID30_MASK__SHIFT 0x1e +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID31_MASK__SHIFT 0x1f +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID0_MASK_MASK 0x00000001L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID1_MASK_MASK 0x00000002L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID2_MASK_MASK 0x00000004L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID3_MASK_MASK 0x00000008L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID4_MASK_MASK 0x00000010L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID5_MASK_MASK 0x00000020L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID6_MASK_MASK 0x00000040L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID7_MASK_MASK 0x00000080L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID8_MASK_MASK 0x00000100L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID9_MASK_MASK 0x00000200L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID10_MASK_MASK 0x00000400L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID11_MASK_MASK 0x00000800L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID12_MASK_MASK 0x00001000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID13_MASK_MASK 0x00002000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID14_MASK_MASK 0x00004000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID15_MASK_MASK 0x00008000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID16_MASK_MASK 0x00010000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID17_MASK_MASK 0x00020000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID18_MASK_MASK 0x00040000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID19_MASK_MASK 0x00080000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID20_MASK_MASK 0x00100000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID21_MASK_MASK 0x00200000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID22_MASK_MASK 0x00400000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID23_MASK_MASK 0x00800000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID24_MASK_MASK 0x01000000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID25_MASK_MASK 0x02000000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID26_MASK_MASK 0x04000000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID27_MASK_MASK 0x08000000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID28_MASK_MASK 0x10000000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID29_MASK_MASK 0x20000000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID30_MASK_MASK 0x40000000L +#define MMEA7_IO_WR_PRI_URGENCY_MASKING__CID31_MASK_MASK 0x80000000L +//MMEA7_IO_RD_PRI_QUANT_PRI1 +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_IO_RD_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_IO_RD_PRI_QUANT_PRI2 +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_IO_RD_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_IO_RD_PRI_QUANT_PRI3 +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_IO_RD_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_IO_WR_PRI_QUANT_PRI1 +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_IO_WR_PRI_QUANT_PRI1__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_IO_WR_PRI_QUANT_PRI2 +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_IO_WR_PRI_QUANT_PRI2__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_IO_WR_PRI_QUANT_PRI3 +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD__SHIFT 0x0 +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD__SHIFT 0x8 +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD__SHIFT 0x10 +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD__SHIFT 0x18 +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP0_THRESHOLD_MASK 0x000000FFL +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP1_THRESHOLD_MASK 0x0000FF00L +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP2_THRESHOLD_MASK 0x00FF0000L +#define MMEA7_IO_WR_PRI_QUANT_PRI3__GROUP3_THRESHOLD_MASK 0xFF000000L +//MMEA7_SDP_ARB_DRAM +#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA7_SDP_ARB_DRAM__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA7_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA7_SDP_ARB_DRAM__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA7_SDP_ARB_DRAM__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA7_SDP_ARB_DRAM__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA7_SDP_ARB_DRAM__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +//MMEA7_SDP_ARB_GMI +#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL__SHIFT 0x0 +#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA__SHIFT 0x8 +#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI__SHIFT 0x10 +#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI__SHIFT 0x11 +#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_RES__SHIFT 0x12 +#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_RES__SHIFT 0x13 +#define MMEA7_SDP_ARB_GMI__EOB_ON_EXPIRE__SHIFT 0x14 +#define MMEA7_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE__SHIFT 0x15 +#define MMEA7_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING__SHIFT 0x16 +#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_CYCL_MASK 0x0000007FL +#define MMEA7_SDP_ARB_GMI__RDWR_BURST_LIMIT_DATA_MASK 0x00007F00L +#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_PRI_MASK 0x00010000L +#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_PRI_MASK 0x00020000L +#define MMEA7_SDP_ARB_GMI__EARLY_SW2RD_ON_RES_MASK 0x00040000L +#define MMEA7_SDP_ARB_GMI__EARLY_SW2WR_ON_RES_MASK 0x00080000L +#define MMEA7_SDP_ARB_GMI__EOB_ON_EXPIRE_MASK 0x00100000L +#define MMEA7_SDP_ARB_GMI__DECOUPLE_RDWR_BNKSTATE_MASK 0x00200000L +#define MMEA7_SDP_ARB_GMI__ALLOW_CHAIN_BREAKING_MASK 0x00400000L +//MMEA7_SDP_ARB_FINAL +#define MMEA7_SDP_ARB_FINAL__DRAM_BURST_LIMIT__SHIFT 0x0 +#define MMEA7_SDP_ARB_FINAL__GMI_BURST_LIMIT__SHIFT 0x5 +#define MMEA7_SDP_ARB_FINAL__IO_BURST_LIMIT__SHIFT 0xa +#define MMEA7_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER__SHIFT 0xf +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC0__SHIFT 0x11 +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC1__SHIFT 0x12 +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC2__SHIFT 0x13 +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC3__SHIFT 0x14 +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC4__SHIFT 0x15 +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC5__SHIFT 0x16 +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC6__SHIFT 0x17 +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC7__SHIFT 0x18 +#define MMEA7_SDP_ARB_FINAL__ERREVENT_ON_ERROR__SHIFT 0x19 +#define MMEA7_SDP_ARB_FINAL__HALTREQ_ON_ERROR__SHIFT 0x1a +#define MMEA7_SDP_ARB_FINAL__GMI_BURST_STRETCH__SHIFT 0x1b +#define MMEA7_SDP_ARB_FINAL__DRAM_BURST_LIMIT_MASK 0x0000001FL +#define MMEA7_SDP_ARB_FINAL__GMI_BURST_LIMIT_MASK 0x000003E0L +#define MMEA7_SDP_ARB_FINAL__IO_BURST_LIMIT_MASK 0x00007C00L +#define MMEA7_SDP_ARB_FINAL__BURST_LIMIT_MULTIPLIER_MASK 0x00018000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC0_MASK 0x00020000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC1_MASK 0x00040000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC2_MASK 0x00080000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC3_MASK 0x00100000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC4_MASK 0x00200000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC5_MASK 0x00400000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC6_MASK 0x00800000L +#define MMEA7_SDP_ARB_FINAL__RDONLY_VC7_MASK 0x01000000L +#define MMEA7_SDP_ARB_FINAL__ERREVENT_ON_ERROR_MASK 0x02000000L +#define MMEA7_SDP_ARB_FINAL__HALTREQ_ON_ERROR_MASK 0x04000000L +#define MMEA7_SDP_ARB_FINAL__GMI_BURST_STRETCH_MASK 0x08000000L +//MMEA7_SDP_DRAM_PRIORITY +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA7_SDP_DRAM_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA7_SDP_DRAM_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA7_SDP_GMI_PRIORITY +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA7_SDP_GMI_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA7_SDP_GMI_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA7_SDP_IO_PRIORITY +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY__SHIFT 0x0 +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY__SHIFT 0x4 +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY__SHIFT 0x8 +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY__SHIFT 0xc +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY__SHIFT 0x10 +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY__SHIFT 0x14 +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY__SHIFT 0x18 +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY__SHIFT 0x1c +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP0_PRIORITY_MASK 0x0000000FL +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP1_PRIORITY_MASK 0x000000F0L +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP2_PRIORITY_MASK 0x00000F00L +#define MMEA7_SDP_IO_PRIORITY__RD_GROUP3_PRIORITY_MASK 0x0000F000L +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP0_PRIORITY_MASK 0x000F0000L +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP1_PRIORITY_MASK 0x00F00000L +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP2_PRIORITY_MASK 0x0F000000L +#define MMEA7_SDP_IO_PRIORITY__WR_GROUP3_PRIORITY_MASK 0xF0000000L +//MMEA7_SDP_CREDITS +#define MMEA7_SDP_CREDITS__TAG_LIMIT__SHIFT 0x0 +#define MMEA7_SDP_CREDITS__WR_RESP_CREDITS__SHIFT 0x8 +#define MMEA7_SDP_CREDITS__RD_RESP_CREDITS__SHIFT 0x10 +#define MMEA7_SDP_CREDITS__TAG_LIMIT_MASK 0x000000FFL +#define MMEA7_SDP_CREDITS__WR_RESP_CREDITS_MASK 0x00007F00L +#define MMEA7_SDP_CREDITS__RD_RESP_CREDITS_MASK 0x007F0000L +//MMEA7_SDP_TAG_RESERVE0 +#define MMEA7_SDP_TAG_RESERVE0__VC0__SHIFT 0x0 +#define MMEA7_SDP_TAG_RESERVE0__VC1__SHIFT 0x8 +#define MMEA7_SDP_TAG_RESERVE0__VC2__SHIFT 0x10 +#define MMEA7_SDP_TAG_RESERVE0__VC3__SHIFT 0x18 +#define MMEA7_SDP_TAG_RESERVE0__VC0_MASK 0x000000FFL +#define MMEA7_SDP_TAG_RESERVE0__VC1_MASK 0x0000FF00L +#define MMEA7_SDP_TAG_RESERVE0__VC2_MASK 0x00FF0000L +#define MMEA7_SDP_TAG_RESERVE0__VC3_MASK 0xFF000000L +//MMEA7_SDP_TAG_RESERVE1 +#define MMEA7_SDP_TAG_RESERVE1__VC4__SHIFT 0x0 +#define MMEA7_SDP_TAG_RESERVE1__VC5__SHIFT 0x8 +#define MMEA7_SDP_TAG_RESERVE1__VC6__SHIFT 0x10 +#define MMEA7_SDP_TAG_RESERVE1__VC7__SHIFT 0x18 +#define MMEA7_SDP_TAG_RESERVE1__VC4_MASK 0x000000FFL +#define MMEA7_SDP_TAG_RESERVE1__VC5_MASK 0x0000FF00L +#define MMEA7_SDP_TAG_RESERVE1__VC6_MASK 0x00FF0000L +#define MMEA7_SDP_TAG_RESERVE1__VC7_MASK 0xFF000000L +//MMEA7_SDP_VCC_RESERVE0 +#define MMEA7_SDP_VCC_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA7_SDP_VCC_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA7_SDP_VCC_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA7_SDP_VCC_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA7_SDP_VCC_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA7_SDP_VCC_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA7_SDP_VCC_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA7_SDP_VCC_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA7_SDP_VCC_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA7_SDP_VCC_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA7_SDP_VCC_RESERVE1 +#define MMEA7_SDP_VCC_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA7_SDP_VCC_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA7_SDP_VCC_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA7_SDP_VCC_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA7_SDP_VCC_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA7_SDP_VCC_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA7_SDP_VCC_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA7_SDP_VCC_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA7_SDP_VCD_RESERVE0 +#define MMEA7_SDP_VCD_RESERVE0__VC0_CREDITS__SHIFT 0x0 +#define MMEA7_SDP_VCD_RESERVE0__VC1_CREDITS__SHIFT 0x6 +#define MMEA7_SDP_VCD_RESERVE0__VC2_CREDITS__SHIFT 0xc +#define MMEA7_SDP_VCD_RESERVE0__VC3_CREDITS__SHIFT 0x12 +#define MMEA7_SDP_VCD_RESERVE0__VC4_CREDITS__SHIFT 0x18 +#define MMEA7_SDP_VCD_RESERVE0__VC0_CREDITS_MASK 0x0000003FL +#define MMEA7_SDP_VCD_RESERVE0__VC1_CREDITS_MASK 0x00000FC0L +#define MMEA7_SDP_VCD_RESERVE0__VC2_CREDITS_MASK 0x0003F000L +#define MMEA7_SDP_VCD_RESERVE0__VC3_CREDITS_MASK 0x00FC0000L +#define MMEA7_SDP_VCD_RESERVE0__VC4_CREDITS_MASK 0x3F000000L +//MMEA7_SDP_VCD_RESERVE1 +#define MMEA7_SDP_VCD_RESERVE1__VC5_CREDITS__SHIFT 0x0 +#define MMEA7_SDP_VCD_RESERVE1__VC6_CREDITS__SHIFT 0x6 +#define MMEA7_SDP_VCD_RESERVE1__VC7_CREDITS__SHIFT 0xc +#define MMEA7_SDP_VCD_RESERVE1__DISTRIBUTE_POOL__SHIFT 0x1f +#define MMEA7_SDP_VCD_RESERVE1__VC5_CREDITS_MASK 0x0000003FL +#define MMEA7_SDP_VCD_RESERVE1__VC6_CREDITS_MASK 0x00000FC0L +#define MMEA7_SDP_VCD_RESERVE1__VC7_CREDITS_MASK 0x0003F000L +#define MMEA7_SDP_VCD_RESERVE1__DISTRIBUTE_POOL_MASK 0x80000000L +//MMEA7_SDP_REQ_CNTL +#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ__SHIFT 0x0 +#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE__SHIFT 0x1 +#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC__SHIFT 0x2 +#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM__SHIFT 0x3 +#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI__SHIFT 0x4 +#define MMEA7_SDP_REQ_CNTL__INNER_DOMAIN_MODE__SHIFT 0x5 +#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_READ_MASK 0x00000001L +#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_WRITE_MASK 0x00000002L +#define MMEA7_SDP_REQ_CNTL__REQ_PASS_PW_OVERRIDE_ATOMIC_MASK 0x00000004L +#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_DRAM_MASK 0x00000008L +#define MMEA7_SDP_REQ_CNTL__REQ_CHAIN_OVERRIDE_GMI_MASK 0x00000010L +#define MMEA7_SDP_REQ_CNTL__INNER_DOMAIN_MODE_MASK 0x00000020L +//MMEA7_MISC +#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB__SHIFT 0x0 +#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB__SHIFT 0x1 +#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_RD_ARB__SHIFT 0x2 +#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_WR_ARB__SHIFT 0x3 +#define MMEA7_MISC__RELATIVE_PRI_IN_IO_RD_ARB__SHIFT 0x4 +#define MMEA7_MISC__RELATIVE_PRI_IN_IO_WR_ARB__SHIFT 0x5 +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC0__SHIFT 0x6 +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC1__SHIFT 0x7 +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC2__SHIFT 0x8 +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC3__SHIFT 0x9 +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC4__SHIFT 0xa +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC5__SHIFT 0xb +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC6__SHIFT 0xc +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC7__SHIFT 0xd +#define MMEA7_MISC__EARLY_SDP_ORIGDATA__SHIFT 0xe +#define MMEA7_MISC__LINKMGR_DYNAMIC_MODE__SHIFT 0xf +#define MMEA7_MISC__LINKMGR_HALT_THRESHOLD__SHIFT 0x11 +#define MMEA7_MISC__LINKMGR_RECONNECT_DELAY__SHIFT 0x13 +#define MMEA7_MISC__LINKMGR_IDLE_THRESHOLD__SHIFT 0x15 +#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB__SHIFT 0x1a +#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB__SHIFT 0x1b +#define MMEA7_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB__SHIFT 0x1c +#define MMEA7_MISC__FAVOUR_LAST_CS_IN_GMI_ARB__SHIFT 0x1d +#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB__SHIFT 0x1e +#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB__SHIFT 0x1f +#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_RD_ARB_MASK 0x00000001L +#define MMEA7_MISC__RELATIVE_PRI_IN_DRAM_WR_ARB_MASK 0x00000002L +#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_RD_ARB_MASK 0x00000004L +#define MMEA7_MISC__RELATIVE_PRI_IN_GMI_WR_ARB_MASK 0x00000008L +#define MMEA7_MISC__RELATIVE_PRI_IN_IO_RD_ARB_MASK 0x00000010L +#define MMEA7_MISC__RELATIVE_PRI_IN_IO_WR_ARB_MASK 0x00000020L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC0_MASK 0x00000040L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC1_MASK 0x00000080L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC2_MASK 0x00000100L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC3_MASK 0x00000200L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC4_MASK 0x00000400L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC5_MASK 0x00000800L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC6_MASK 0x00001000L +#define MMEA7_MISC__EARLYWRRET_ENABLE_VC7_MASK 0x00002000L +#define MMEA7_MISC__EARLY_SDP_ORIGDATA_MASK 0x00004000L +#define MMEA7_MISC__LINKMGR_DYNAMIC_MODE_MASK 0x00018000L +#define MMEA7_MISC__LINKMGR_HALT_THRESHOLD_MASK 0x00060000L +#define MMEA7_MISC__LINKMGR_RECONNECT_DELAY_MASK 0x00180000L +#define MMEA7_MISC__LINKMGR_IDLE_THRESHOLD_MASK 0x03E00000L +#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_DRAM_ARB_MASK 0x04000000L +#define MMEA7_MISC__FAVOUR_MIDCHAIN_CS_IN_GMI_ARB_MASK 0x08000000L +#define MMEA7_MISC__FAVOUR_LAST_CS_IN_DRAM_ARB_MASK 0x10000000L +#define MMEA7_MISC__FAVOUR_LAST_CS_IN_GMI_ARB_MASK 0x20000000L +#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_DRAM_ARB_MASK 0x40000000L +#define MMEA7_MISC__SWITCH_CS_ON_W2R_IN_GMI_ARB_MASK 0x80000000L +//MMEA7_LATENCY_SAMPLING +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_DRAM__SHIFT 0x0 +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_DRAM__SHIFT 0x1 +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_GMI__SHIFT 0x2 +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_GMI__SHIFT 0x3 +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_IO__SHIFT 0x4 +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_IO__SHIFT 0x5 +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_READ__SHIFT 0x6 +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_READ__SHIFT 0x7 +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_WRITE__SHIFT 0x8 +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_WRITE__SHIFT 0x9 +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET__SHIFT 0xa +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET__SHIFT 0xb +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET__SHIFT 0xc +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET__SHIFT 0xd +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_VC__SHIFT 0xe +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_VC__SHIFT 0x16 +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_DRAM_MASK 0x00000001L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_DRAM_MASK 0x00000002L +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_GMI_MASK 0x00000004L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_GMI_MASK 0x00000008L +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_IO_MASK 0x00000010L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_IO_MASK 0x00000020L +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_READ_MASK 0x00000040L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_READ_MASK 0x00000080L +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_WRITE_MASK 0x00000100L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_WRITE_MASK 0x00000200L +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_RET_MASK 0x00000400L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_RET_MASK 0x00000800L +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_ATOMIC_NORET_MASK 0x00001000L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_ATOMIC_NORET_MASK 0x00002000L +#define MMEA7_LATENCY_SAMPLING__SAMPLER0_VC_MASK 0x003FC000L +#define MMEA7_LATENCY_SAMPLING__SAMPLER1_VC_MASK 0x3FC00000L +//MMEA7_PERFCOUNTER_LO +#define MMEA7_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define MMEA7_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//MMEA7_PERFCOUNTER_HI +#define MMEA7_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define MMEA7_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define MMEA7_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define MMEA7_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L +//MMEA7_PERFCOUNTER0_CFG +#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA7_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA7_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define MMEA7_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA7_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA7_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA7_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define MMEA7_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//MMEA7_PERFCOUNTER1_CFG +#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define MMEA7_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define MMEA7_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define MMEA7_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define MMEA7_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define MMEA7_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define MMEA7_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define MMEA7_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//MMEA7_PERFCOUNTER_RSLT_CNTL +#define MMEA7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define MMEA7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define MMEA7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define MMEA7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define MMEA7_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define MMEA7_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define MMEA7_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define MMEA7_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define MMEA7_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L +//MMEA7_EDC_CNT +#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA7_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define MMEA7_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define MMEA7_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define MMEA7_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define MMEA7_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define MMEA7_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define MMEA7_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define MMEA7_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define MMEA7_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA7_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA7_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA7_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA7_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define MMEA7_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define MMEA7_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define MMEA7_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define MMEA7_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define MMEA7_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define MMEA7_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define MMEA7_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define MMEA7_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L +//MMEA7_EDC_CNT2 +#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define MMEA7_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +//MMEA7_DSM_CNTL +#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA7_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA7_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA7_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA7_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0x15 +#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0x17 +#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA7_DSM_CNTL__DRAMWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA7_DSM_CNTL__DRAMWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA7_DSM_CNTL__RRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA7_DSM_CNTL__RRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA7_DSM_CNTL__WRET_TAGMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA7_DSM_CNTL__WRET_TAGMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA7_DSM_CNTL__GMIRD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA7_DSM_CNTL__GMIWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00600000L +#define MMEA7_DSM_CNTL__GMIWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00800000L +//MMEA7_DSM_CNTLA +#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 +#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 +#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x3 +#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x5 +#define MMEA7_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x6 +#define MMEA7_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x9 +#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0xb +#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA__SHIFT 0xc +#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE__SHIFT 0xe +#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0xf +#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x11 +#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA__SHIFT 0x12 +#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE__SHIFT 0x14 +#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000003L +#define MMEA7_DSM_CNTLA__DRAMRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000004L +#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00000018L +#define MMEA7_DSM_CNTLA__DRAMWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00000020L +#define MMEA7_DSM_CNTLA__IORD_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define MMEA7_DSM_CNTLA__IORD_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_DSM_IRRITATOR_DATA_MASK 0x00000600L +#define MMEA7_DSM_CNTLA__IOWR_CMDMEM_ENABLE_SINGLE_WRITE_MASK 0x00000800L +#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_DSM_IRRITATOR_DATA_MASK 0x00003000L +#define MMEA7_DSM_CNTLA__IOWR_DATAMEM_ENABLE_SINGLE_WRITE_MASK 0x00004000L +#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x00018000L +#define MMEA7_DSM_CNTLA__GMIRD_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00020000L +#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_DSM_IRRITATOR_DATA_MASK 0x000C0000L +#define MMEA7_DSM_CNTLA__GMIWR_PAGEMEM_ENABLE_SINGLE_WRITE_MASK 0x00100000L +//MMEA7_DSM_CNTL2 +#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA7_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA7_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA7_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA7_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0x15 +#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0x17 +#define MMEA7_DSM_CNTL2__INJECT_DELAY__SHIFT 0x1a +#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA7_DSM_CNTL2__DRAMRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA7_DSM_CNTL2__DRAMWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA7_DSM_CNTL2__DRAMWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA7_DSM_CNTL2__RRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA7_DSM_CNTL2__RRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA7_DSM_CNTL2__WRET_TAGMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA7_DSM_CNTL2__WRET_TAGMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA7_DSM_CNTL2__GMIRD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA7_DSM_CNTL2__GMIWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00600000L +#define MMEA7_DSM_CNTL2__GMIWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00800000L +#define MMEA7_DSM_CNTL2__INJECT_DELAY_MASK 0xFC000000L +//MMEA7_DSM_CNTL2A +#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x0 +#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x2 +#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x3 +#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x5 +#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x6 +#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0x8 +#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT__SHIFT 0x9 +#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY__SHIFT 0xb +#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT__SHIFT 0xc +#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY__SHIFT 0xe +#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0xf +#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x11 +#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT__SHIFT 0x12 +#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY__SHIFT 0x14 +#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000003L +#define MMEA7_DSM_CNTL2A__DRAMRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000004L +#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00000018L +#define MMEA7_DSM_CNTL2A__DRAMWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00000020L +#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x000000C0L +#define MMEA7_DSM_CNTL2A__IORD_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000100L +#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_ENABLE_ERROR_INJECT_MASK 0x00000600L +#define MMEA7_DSM_CNTL2A__IOWR_CMDMEM_SELECT_INJECT_DELAY_MASK 0x00000800L +#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_ENABLE_ERROR_INJECT_MASK 0x00003000L +#define MMEA7_DSM_CNTL2A__IOWR_DATAMEM_SELECT_INJECT_DELAY_MASK 0x00004000L +#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x00018000L +#define MMEA7_DSM_CNTL2A__GMIRD_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00020000L +#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_ENABLE_ERROR_INJECT_MASK 0x000C0000L +#define MMEA7_DSM_CNTL2A__GMIWR_PAGEMEM_SELECT_INJECT_DELAY_MASK 0x00100000L +//MMEA7_CGTT_CLK_CTRL +#define MMEA7_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define MMEA7_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define MMEA7_CGTT_CLK_CTRL__SPARE0__SHIFT 0xc +#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE__SHIFT 0x14 +#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ__SHIFT 0x15 +#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN__SHIFT 0x16 +#define MMEA7_CGTT_CLK_CTRL__SPARE1__SHIFT 0x17 +#define MMEA7_CGTT_CLK_CTRL__LS_OVERRIDE__SHIFT 0x1b +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE__SHIFT 0x1c +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ__SHIFT 0x1d +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN__SHIFT 0x1e +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER__SHIFT 0x1f +#define MMEA7_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define MMEA7_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define MMEA7_CGTT_CLK_CTRL__SPARE0_MASK 0x000FF000L +#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_WRITE_MASK 0x00100000L +#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_READ_MASK 0x00200000L +#define MMEA7_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_RETURN_MASK 0x00400000L +#define MMEA7_CGTT_CLK_CTRL__SPARE1_MASK 0x07800000L +#define MMEA7_CGTT_CLK_CTRL__LS_OVERRIDE_MASK 0x08000000L +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_WRITE_MASK 0x10000000L +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_READ_MASK 0x20000000L +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_RETURN_MASK 0x40000000L +#define MMEA7_CGTT_CLK_CTRL__SOFT_OVERRIDE_REGISTER_MASK 0x80000000L +//MMEA7_EDC_MODE +#define MMEA7_EDC_MODE__COUNT_FED_OUT__SHIFT 0x10 +#define MMEA7_EDC_MODE__GATE_FUE__SHIFT 0x11 +#define MMEA7_EDC_MODE__DED_MODE__SHIFT 0x14 +#define MMEA7_EDC_MODE__PROP_FED__SHIFT 0x1d +#define MMEA7_EDC_MODE__BYPASS__SHIFT 0x1f +#define MMEA7_EDC_MODE__COUNT_FED_OUT_MASK 0x00010000L +#define MMEA7_EDC_MODE__GATE_FUE_MASK 0x00020000L +#define MMEA7_EDC_MODE__DED_MODE_MASK 0x00300000L +#define MMEA7_EDC_MODE__PROP_FED_MASK 0x20000000L +#define MMEA7_EDC_MODE__BYPASS_MASK 0x80000000L +//MMEA7_ERR_STATUS +#define MMEA7_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0 +#define MMEA7_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4 +#define MMEA7_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8 +#define MMEA7_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa +#define MMEA7_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb +#define MMEA7_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc +#define MMEA7_ERR_STATUS__FUE_FLAG__SHIFT 0xd +#define MMEA7_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL +#define MMEA7_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L +#define MMEA7_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L +#define MMEA7_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L +#define MMEA7_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L +#define MMEA7_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L +#define MMEA7_ERR_STATUS__FUE_FLAG_MASK 0x00002000L +//MMEA7_MISC2 +#define MMEA7_MISC2__CSGROUP_SWAP_IN_DRAM_ARB__SHIFT 0x0 +#define MMEA7_MISC2__CSGROUP_SWAP_IN_GMI_ARB__SHIFT 0x1 +#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM__SHIFT 0x2 +#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_GMI__SHIFT 0x7 +#define MMEA7_MISC2__IO_RDWR_PRIORITY_ENABLE__SHIFT 0xc +#define MMEA7_MISC2__RRET_SWAP_MODE__SHIFT 0xd +#define MMEA7_MISC2__CSGROUP_SWAP_IN_DRAM_ARB_MASK 0x00000001L +#define MMEA7_MISC2__CSGROUP_SWAP_IN_GMI_ARB_MASK 0x00000002L +#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_DRAM_MASK 0x0000007CL +#define MMEA7_MISC2__CSGRP_BURST_LIMIT_DATA_GMI_MASK 0x00000F80L +#define MMEA7_MISC2__IO_RDWR_PRIORITY_ENABLE_MASK 0x00001000L +#define MMEA7_MISC2__RRET_SWAP_MODE_MASK 0x00002000L +//MMEA7_ADDRDEC_SELECT +#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START__SHIFT 0x0 +#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END__SHIFT 0x5 +#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START__SHIFT 0xa +#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END__SHIFT 0xf +#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_START_MASK 0x0000001FL +#define MMEA7_ADDRDEC_SELECT__DRAM_ADDRDEC_CHANNEL_END_MASK 0x000003E0L +#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_START_MASK 0x00007C00L +#define MMEA7_ADDRDEC_SELECT__GMI_ADDRDEC_CHANNEL_END_MASK 0x000F8000L +//MMEA7_EDC_CNT3 +#define MMEA7_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 +#define MMEA7_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 +#define MMEA7_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 +#define MMEA7_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define MMEA7_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 +#define MMEA7_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa +#define MMEA7_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc +#define MMEA7_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L +#define MMEA7_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL +#define MMEA7_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L +#define MMEA7_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define MMEA7_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L +#define MMEA7_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L +#define MMEA7_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L + + +// addressBlock: mmhub_pctldec1 +//PCTL1_CTRL +#define PCTL1_CTRL__PG_ENABLE__SHIFT 0x0 +#define PCTL1_CTRL__ALLOW_DEEP_SLEEP_MODE__SHIFT 0x1 +#define PCTL1_CTRL__STCTRL_RSMU_IDLE_THRESHOLD__SHIFT 0x4 +#define PCTL1_CTRL__STCTRL_DAGB_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL1_CTRL__STCTRL_IGNORE_PROTECTION_FAULT__SHIFT 0x10 +#define PCTL1_CTRL__OVR_EA0_SDP_PARTACK__SHIFT 0x11 +#define PCTL1_CTRL__OVR_EA1_SDP_PARTACK__SHIFT 0x12 +#define PCTL1_CTRL__OVR_EA2_SDP_PARTACK__SHIFT 0x13 +#define PCTL1_CTRL__OVR_EA3_SDP_PARTACK__SHIFT 0x14 +#define PCTL1_CTRL__OVR_EA4_SDP_PARTACK__SHIFT 0x15 +#define PCTL1_CTRL__OVR_EA0_SDP_FULLACK__SHIFT 0x16 +#define PCTL1_CTRL__OVR_EA1_SDP_FULLACK__SHIFT 0x17 +#define PCTL1_CTRL__OVR_EA2_SDP_FULLACK__SHIFT 0x18 +#define PCTL1_CTRL__OVR_EA3_SDP_FULLACK__SHIFT 0x19 +#define PCTL1_CTRL__OVR_EA4_SDP_FULLACK__SHIFT 0x1a +#define PCTL1_CTRL__PGFSM_CMD_STATUS__SHIFT 0x1b +#define PCTL1_CTRL__PG_ENABLE_MASK 0x00000001L +#define PCTL1_CTRL__ALLOW_DEEP_SLEEP_MODE_MASK 0x0000000EL +#define PCTL1_CTRL__STCTRL_RSMU_IDLE_THRESHOLD_MASK 0x000007F0L +#define PCTL1_CTRL__STCTRL_DAGB_IDLE_THRESHOLD_MASK 0x0000F800L +#define PCTL1_CTRL__STCTRL_IGNORE_PROTECTION_FAULT_MASK 0x00010000L +#define PCTL1_CTRL__OVR_EA0_SDP_PARTACK_MASK 0x00020000L +#define PCTL1_CTRL__OVR_EA1_SDP_PARTACK_MASK 0x00040000L +#define PCTL1_CTRL__OVR_EA2_SDP_PARTACK_MASK 0x00080000L +#define PCTL1_CTRL__OVR_EA3_SDP_PARTACK_MASK 0x00100000L +#define PCTL1_CTRL__OVR_EA4_SDP_PARTACK_MASK 0x00200000L +#define PCTL1_CTRL__OVR_EA0_SDP_FULLACK_MASK 0x00400000L +#define PCTL1_CTRL__OVR_EA1_SDP_FULLACK_MASK 0x00800000L +#define PCTL1_CTRL__OVR_EA2_SDP_FULLACK_MASK 0x01000000L +#define PCTL1_CTRL__OVR_EA3_SDP_FULLACK_MASK 0x02000000L +#define PCTL1_CTRL__OVR_EA4_SDP_FULLACK_MASK 0x04000000L +#define PCTL1_CTRL__PGFSM_CMD_STATUS_MASK 0x18000000L +//PCTL1_MMHUB_DEEPSLEEP_IB +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS0__SHIFT 0x0 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS1__SHIFT 0x1 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS2__SHIFT 0x2 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS3__SHIFT 0x3 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS4__SHIFT 0x4 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS5__SHIFT 0x5 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS6__SHIFT 0x6 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS7__SHIFT 0x7 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS8__SHIFT 0x8 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS9__SHIFT 0x9 +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS10__SHIFT 0xa +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS11__SHIFT 0xb +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS12__SHIFT 0xc +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS13__SHIFT 0xd +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS14__SHIFT 0xe +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS15__SHIFT 0xf +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS16__SHIFT 0x10 +#define PCTL1_MMHUB_DEEPSLEEP_IB__SETCLEAR__SHIFT 0x1f +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS0_MASK 0x00000001L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS1_MASK 0x00000002L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS2_MASK 0x00000004L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS3_MASK 0x00000008L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS4_MASK 0x00000010L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS5_MASK 0x00000020L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS6_MASK 0x00000040L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS7_MASK 0x00000080L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS8_MASK 0x00000100L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS9_MASK 0x00000200L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS10_MASK 0x00000400L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS11_MASK 0x00000800L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS12_MASK 0x00001000L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS13_MASK 0x00002000L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS14_MASK 0x00004000L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS15_MASK 0x00008000L +#define PCTL1_MMHUB_DEEPSLEEP_IB__DS16_MASK 0x00010000L +#define PCTL1_MMHUB_DEEPSLEEP_IB__SETCLEAR_MASK 0x80000000L +//PCTL1_MMHUB_DEEPSLEEP_OVERRIDE +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS0__SHIFT 0x0 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS1__SHIFT 0x1 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS2__SHIFT 0x2 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS3__SHIFT 0x3 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS4__SHIFT 0x4 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS5__SHIFT 0x5 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS6__SHIFT 0x6 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS7__SHIFT 0x7 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS8__SHIFT 0x8 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS9__SHIFT 0x9 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS10__SHIFT 0xa +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS11__SHIFT 0xb +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS12__SHIFT 0xc +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS13__SHIFT 0xd +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS14__SHIFT 0xe +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS15__SHIFT 0xf +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS16__SHIFT 0x10 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB__SHIFT 0x11 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS0_MASK 0x00000001L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS1_MASK 0x00000002L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS2_MASK 0x00000004L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS3_MASK 0x00000008L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS4_MASK 0x00000010L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS5_MASK 0x00000020L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS6_MASK 0x00000040L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS7_MASK 0x00000080L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS8_MASK 0x00000100L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS9_MASK 0x00000200L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS10_MASK 0x00000400L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS11_MASK 0x00000800L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS12_MASK 0x00001000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS13_MASK 0x00002000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS14_MASK 0x00004000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS15_MASK 0x00008000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS16_MASK 0x00010000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE__DS_ATHUB_MASK 0x00020000L +//PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0__SHIFT 0x0 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1__SHIFT 0x1 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2__SHIFT 0x2 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3__SHIFT 0x3 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4__SHIFT 0x4 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5__SHIFT 0x5 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6__SHIFT 0x6 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7__SHIFT 0x7 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8__SHIFT 0x8 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9__SHIFT 0x9 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10__SHIFT 0xa +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11__SHIFT 0xb +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12__SHIFT 0xc +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13__SHIFT 0xd +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14__SHIFT 0xe +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15__SHIFT 0xf +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16__SHIFT 0x10 +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS0_MASK 0x00000001L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS1_MASK 0x00000002L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS2_MASK 0x00000004L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS3_MASK 0x00000008L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS4_MASK 0x00000010L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS5_MASK 0x00000020L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS6_MASK 0x00000040L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS7_MASK 0x00000080L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS8_MASK 0x00000100L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS9_MASK 0x00000200L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS10_MASK 0x00000400L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS11_MASK 0x00000800L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS12_MASK 0x00001000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS13_MASK 0x00002000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS14_MASK 0x00004000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS15_MASK 0x00008000L +#define PCTL1_MMHUB_DEEPSLEEP_OVERRIDE_IB__DS16_MASK 0x00010000L +//PCTL1_PG_IGNORE_DEEPSLEEP +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS0__SHIFT 0x0 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS1__SHIFT 0x1 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS2__SHIFT 0x2 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS3__SHIFT 0x3 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS4__SHIFT 0x4 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS5__SHIFT 0x5 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS6__SHIFT 0x6 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS7__SHIFT 0x7 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS8__SHIFT 0x8 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS9__SHIFT 0x9 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS10__SHIFT 0xa +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS11__SHIFT 0xb +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS12__SHIFT 0xc +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS13__SHIFT 0xd +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS14__SHIFT 0xe +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS15__SHIFT 0xf +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS16__SHIFT 0x10 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS_ATHUB__SHIFT 0x11 +#define PCTL1_PG_IGNORE_DEEPSLEEP__ALLIPS__SHIFT 0x12 +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS0_MASK 0x00000001L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS1_MASK 0x00000002L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS2_MASK 0x00000004L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS3_MASK 0x00000008L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS4_MASK 0x00000010L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS5_MASK 0x00000020L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS6_MASK 0x00000040L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS7_MASK 0x00000080L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS8_MASK 0x00000100L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS9_MASK 0x00000200L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS10_MASK 0x00000400L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS11_MASK 0x00000800L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS12_MASK 0x00001000L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS13_MASK 0x00002000L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS14_MASK 0x00004000L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS15_MASK 0x00008000L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS16_MASK 0x00010000L +#define PCTL1_PG_IGNORE_DEEPSLEEP__DS_ATHUB_MASK 0x00020000L +#define PCTL1_PG_IGNORE_DEEPSLEEP__ALLIPS_MASK 0x00040000L +//PCTL1_PG_IGNORE_DEEPSLEEP_IB +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS0__SHIFT 0x0 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS1__SHIFT 0x1 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS2__SHIFT 0x2 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS3__SHIFT 0x3 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS4__SHIFT 0x4 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS5__SHIFT 0x5 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS6__SHIFT 0x6 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS7__SHIFT 0x7 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS8__SHIFT 0x8 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS9__SHIFT 0x9 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS10__SHIFT 0xa +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS11__SHIFT 0xb +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS12__SHIFT 0xc +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS13__SHIFT 0xd +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS14__SHIFT 0xe +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS15__SHIFT 0xf +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS16__SHIFT 0x10 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__ALLIPS__SHIFT 0x11 +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS0_MASK 0x00000001L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS1_MASK 0x00000002L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS2_MASK 0x00000004L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS3_MASK 0x00000008L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS4_MASK 0x00000010L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS5_MASK 0x00000020L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS6_MASK 0x00000040L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS7_MASK 0x00000080L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS8_MASK 0x00000100L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS9_MASK 0x00000200L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS10_MASK 0x00000400L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS11_MASK 0x00000800L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS12_MASK 0x00001000L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS13_MASK 0x00002000L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS14_MASK 0x00004000L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS15_MASK 0x00008000L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__DS16_MASK 0x00010000L +#define PCTL1_PG_IGNORE_DEEPSLEEP_IB__ALLIPS_MASK 0x00020000L +//PCTL1_SLICE0_CFG_DAGB_BUSY +#define PCTL1_SLICE0_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL1_SLICE0_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL1_SLICE0_CFG_DS_ALLOW +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL1_SLICE0_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL1_SLICE0_CFG_DS_ALLOW_IB +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL1_SLICE0_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL1_SLICE1_CFG_DAGB_BUSY +#define PCTL1_SLICE1_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL1_SLICE1_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL1_SLICE1_CFG_DS_ALLOW +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL1_SLICE1_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL1_SLICE1_CFG_DS_ALLOW_IB +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL1_SLICE1_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL1_SLICE2_CFG_DAGB_BUSY +#define PCTL1_SLICE2_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL1_SLICE2_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL1_SLICE2_CFG_DS_ALLOW +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL1_SLICE2_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL1_SLICE2_CFG_DS_ALLOW_IB +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL1_SLICE2_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL1_SLICE3_CFG_DAGB_BUSY +#define PCTL1_SLICE3_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL1_SLICE3_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL1_SLICE3_CFG_DS_ALLOW +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL1_SLICE3_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL1_SLICE3_CFG_DS_ALLOW_IB +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL1_SLICE3_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL1_SLICE4_CFG_DAGB_BUSY +#define PCTL1_SLICE4_CFG_DAGB_BUSY__DB_LNCFG__SHIFT 0x0 +#define PCTL1_SLICE4_CFG_DAGB_BUSY__DB_LNCFG_MASK 0xFFFFFFFFL +//PCTL1_SLICE4_CFG_DS_ALLOW +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS0__SHIFT 0x0 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS1__SHIFT 0x1 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS2__SHIFT 0x2 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS3__SHIFT 0x3 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS4__SHIFT 0x4 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS5__SHIFT 0x5 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS6__SHIFT 0x6 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS7__SHIFT 0x7 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS8__SHIFT 0x8 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS9__SHIFT 0x9 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS10__SHIFT 0xa +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS11__SHIFT 0xb +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS12__SHIFT 0xc +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS13__SHIFT 0xd +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS14__SHIFT 0xe +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS15__SHIFT 0xf +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS16__SHIFT 0x10 +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS0_MASK 0x00000001L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS1_MASK 0x00000002L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS2_MASK 0x00000004L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS3_MASK 0x00000008L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS4_MASK 0x00000010L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS5_MASK 0x00000020L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS6_MASK 0x00000040L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS7_MASK 0x00000080L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS8_MASK 0x00000100L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS9_MASK 0x00000200L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS10_MASK 0x00000400L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS11_MASK 0x00000800L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS12_MASK 0x00001000L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS13_MASK 0x00002000L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS14_MASK 0x00004000L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS15_MASK 0x00008000L +#define PCTL1_SLICE4_CFG_DS_ALLOW__DS16_MASK 0x00010000L +//PCTL1_SLICE4_CFG_DS_ALLOW_IB +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS0__SHIFT 0x0 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS1__SHIFT 0x1 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS2__SHIFT 0x2 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS3__SHIFT 0x3 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS4__SHIFT 0x4 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS5__SHIFT 0x5 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS6__SHIFT 0x6 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS7__SHIFT 0x7 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS8__SHIFT 0x8 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS9__SHIFT 0x9 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS10__SHIFT 0xa +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS11__SHIFT 0xb +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS12__SHIFT 0xc +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS13__SHIFT 0xd +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS14__SHIFT 0xe +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS15__SHIFT 0xf +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS16__SHIFT 0x10 +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS0_MASK 0x00000001L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS1_MASK 0x00000002L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS2_MASK 0x00000004L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS3_MASK 0x00000008L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS4_MASK 0x00000010L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS5_MASK 0x00000020L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS6_MASK 0x00000040L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS7_MASK 0x00000080L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS8_MASK 0x00000100L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS9_MASK 0x00000200L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS10_MASK 0x00000400L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS11_MASK 0x00000800L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS12_MASK 0x00001000L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS13_MASK 0x00002000L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS14_MASK 0x00004000L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS15_MASK 0x00008000L +#define PCTL1_SLICE4_CFG_DS_ALLOW_IB__DS16_MASK 0x00010000L +//PCTL1_UTCL2_MISC +#define PCTL1_UTCL2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xb +#define PCTL1_UTCL2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xc +#define PCTL1_UTCL2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xf +#define PCTL1_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0x10 +#define PCTL1_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL1_UTCL2_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL1_UTCL2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000800L +#define PCTL1_UTCL2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00007000L +#define PCTL1_UTCL2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00008000L +#define PCTL1_UTCL2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00010000L +#define PCTL1_UTCL2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL1_UTCL2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL1_SLICE0_MISC +#define PCTL1_SLICE0_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL1_SLICE0_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL1_SLICE0_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL1_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL1_SLICE0_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL1_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL1_SLICE0_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL1_SLICE0_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL1_SLICE0_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL1_SLICE0_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL1_SLICE0_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL1_SLICE0_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL1_SLICE0_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL1_SLICE0_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL1_SLICE1_MISC +#define PCTL1_SLICE1_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL1_SLICE1_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL1_SLICE1_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL1_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL1_SLICE1_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL1_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL1_SLICE1_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL1_SLICE1_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL1_SLICE1_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL1_SLICE1_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL1_SLICE1_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL1_SLICE1_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL1_SLICE1_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL1_SLICE1_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL1_SLICE2_MISC +#define PCTL1_SLICE2_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL1_SLICE2_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL1_SLICE2_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL1_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL1_SLICE2_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL1_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL1_SLICE2_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL1_SLICE2_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL1_SLICE2_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL1_SLICE2_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL1_SLICE2_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL1_SLICE2_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL1_SLICE2_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL1_SLICE2_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL1_SLICE3_MISC +#define PCTL1_SLICE3_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL1_SLICE3_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL1_SLICE3_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL1_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL1_SLICE3_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL1_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL1_SLICE3_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL1_SLICE3_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL1_SLICE3_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL1_SLICE3_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL1_SLICE3_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL1_SLICE3_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL1_SLICE3_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL1_SLICE3_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL1_SLICE4_MISC +#define PCTL1_SLICE4_MISC__CRITICAL_REGS_LOCK__SHIFT 0xa +#define PCTL1_SLICE4_MISC__TILE_IDLE_THRESHOLD__SHIFT 0xb +#define PCTL1_SLICE4_MISC__RENG_MEM_LS_ENABLE__SHIFT 0xe +#define PCTL1_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE__SHIFT 0xf +#define PCTL1_SLICE4_MISC__DEEPSLEEP_DISCSDP__SHIFT 0x10 +#define PCTL1_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE__SHIFT 0x11 +#define PCTL1_SLICE4_MISC__RD_TIMER_ENABLE__SHIFT 0x12 +#define PCTL1_SLICE4_MISC__CRITICAL_REGS_LOCK_MASK 0x00000400L +#define PCTL1_SLICE4_MISC__TILE_IDLE_THRESHOLD_MASK 0x00003800L +#define PCTL1_SLICE4_MISC__RENG_MEM_LS_ENABLE_MASK 0x00004000L +#define PCTL1_SLICE4_MISC__STCTRL_FORCE_PGFSM_CMD_DONE_MASK 0x00008000L +#define PCTL1_SLICE4_MISC__DEEPSLEEP_DISCSDP_MASK 0x00010000L +#define PCTL1_SLICE4_MISC__RENG_EXECUTE_ON_REG_UPDATE_MASK 0x00020000L +#define PCTL1_SLICE4_MISC__RD_TIMER_ENABLE_MASK 0x00040000L +//PCTL1_UTCL2_RENG_EXECUTE +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xd +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00001FFCL +#define PCTL1_UTCL2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x00FFE000L +//PCTL1_SLICE0_RENG_EXECUTE +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL1_SLICE0_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL1_SLICE1_RENG_EXECUTE +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL1_SLICE1_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL1_SLICE2_RENG_EXECUTE +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL1_SLICE2_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL1_SLICE3_RENG_EXECUTE +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL1_SLICE3_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL1_SLICE4_RENG_EXECUTE +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW__SHIFT 0x0 +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE__SHIFT 0x1 +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR__SHIFT 0x2 +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR__SHIFT 0xc +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MASK 0x00000001L +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_MODE_MASK 0x00000002L +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_NOW_START_PTR_MASK 0x00000FFCL +#define PCTL1_SLICE4_RENG_EXECUTE__RENG_EXECUTE_END_PTR_MASK 0x003FF000L +//PCTL1_UTCL2_RENG_RAM_INDEX +#define PCTL1_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL1_UTCL2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000007FFL +//PCTL1_UTCL2_RENG_RAM_DATA +#define PCTL1_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL1_UTCL2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL1_SLICE0_RENG_RAM_INDEX +#define PCTL1_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL1_SLICE0_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL1_SLICE0_RENG_RAM_DATA +#define PCTL1_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL1_SLICE0_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL1_SLICE1_RENG_RAM_INDEX +#define PCTL1_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL1_SLICE1_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL1_SLICE1_RENG_RAM_DATA +#define PCTL1_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL1_SLICE1_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL1_SLICE2_RENG_RAM_INDEX +#define PCTL1_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL1_SLICE2_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL1_SLICE2_RENG_RAM_DATA +#define PCTL1_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL1_SLICE2_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL1_SLICE3_RENG_RAM_INDEX +#define PCTL1_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL1_SLICE3_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL1_SLICE3_RENG_RAM_DATA +#define PCTL1_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL1_SLICE3_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL1_SLICE4_RENG_RAM_INDEX +#define PCTL1_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX__SHIFT 0x0 +#define PCTL1_SLICE4_RENG_RAM_INDEX__RENG_RAM_INDEX_MASK 0x000003FFL +//PCTL1_SLICE4_RENG_RAM_DATA +#define PCTL1_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA__SHIFT 0x0 +#define PCTL1_SLICE4_RENG_RAM_DATA__RENG_RAM_DATA_MASK 0xFFFFFFFFL +//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_UTCL2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE0_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE1_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE2_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE3_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE0__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE1__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE2__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE3__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE__SHIFT 0x0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT__SHIFT 0x10 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_BASE_MASK 0x0000FFFFL +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_RANGE4__STCTRL_REGISTER_SAVE_LIMIT_MASK 0xFFFF0000L +//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET0__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L +//PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0__SHIFT 0x0 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1__SHIFT 0x10 +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL0_MASK 0x0000FFFFL +#define PCTL1_SLICE4_STCTRL_REGISTER_SAVE_EXCL_SET1__STCTRL_REGISTER_SAVE_EXCL1_MASK 0xFFFF0000L + + +// addressBlock: mmhub_l1tlb_vml1dec:1 +//VML1_1_MC_VM_MX_L1_TLB0_STATUS +#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB0_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_1_MC_VM_MX_L1_TLB1_STATUS +#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB1_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_1_MC_VM_MX_L1_TLB2_STATUS +#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB2_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_1_MC_VM_MX_L1_TLB3_STATUS +#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB3_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_1_MC_VM_MX_L1_TLB4_STATUS +#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB4_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_1_MC_VM_MX_L1_TLB5_STATUS +#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB5_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_1_MC_VM_MX_L1_TLB6_STATUS +#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB6_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L +//VML1_1_MC_VM_MX_L1_TLB7_STATUS +#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__BUSY__SHIFT 0x0 +#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS__SHIFT 0x1 +#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__BUSY_MASK 0x00000001L +#define VML1_1_MC_VM_MX_L1_TLB7_STATUS__FOUND_PARITY_ERRORS_MASK 0x00000002L + + +// addressBlock: mmhub_l1tlb_vml1pldec:1 +//VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L +//VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define VML1PL1_MC_VM_MX_L1_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L + + +// addressBlock: mmhub_l1tlb_vml1prdec:1 +//VML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO +#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI +#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define VML1PR1_MC_VM_MX_L1_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L + + +// addressBlock: mmhub_utcl2_atcl2dec:1 +//ATCL2_1_ATC_L2_CNTL +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS__SHIFT 0x3 +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0x6 +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0x7 +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS__SHIFT 0x8 +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS__SHIFT 0xb +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD__SHIFT 0xe +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD__SHIFT 0xf +#define ATCL2_1_ATC_L2_CNTL__CACHE_INVALIDATE_MODE__SHIFT 0x10 +#define ATCL2_1_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0x13 +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READ_REQUESTS_MASK 0x00000003L +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITE_REQUESTS_MASK 0x00000018L +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00000040L +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00000080L +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READ_REQUESTS_MASK 0x00000300L +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITE_REQUESTS_MASK 0x00001800L +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_READS_DEPENDS_ON_ADDR_MOD_MASK 0x00004000L +#define ATCL2_1_ATC_L2_CNTL__NUMBER_OF_HOST_TRANSLATION_WRITES_DEPENDS_ON_ADDR_MOD_MASK 0x00008000L +#define ATCL2_1_ATC_L2_CNTL__CACHE_INVALIDATE_MODE_MASK 0x00070000L +#define ATCL2_1_ATC_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00080000L +//ATCL2_1_ATC_L2_CNTL2 +#define ATCL2_1_ATC_L2_CNTL2__BANK_SELECT__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE__SHIFT 0x6 +#define ATCL2_1_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x8 +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS__SHIFT 0x9 +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_VMID_MODE__SHIFT 0xc +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0xf +#define ATCL2_1_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE__SHIFT 0x15 +#define ATCL2_1_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE__SHIFT 0x1b +#define ATCL2_1_ATC_L2_CNTL2__BANK_SELECT_MASK 0x0000003FL +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L +#define ATCL2_1_ATC_L2_CNTL2__ENABLE_L2_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000100L +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_SWAP_TAG_INDEX_LSBS_MASK 0x00000E00L +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_VMID_MODE_MASK 0x00007000L +#define ATCL2_1_ATC_L2_CNTL2__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x001F8000L +#define ATCL2_1_ATC_L2_CNTL2__L2_BIGK_FRAGMENT_SIZE_MASK 0x07E00000L +#define ATCL2_1_ATC_L2_CNTL2__L2_4K_BIGK_SWAP_ENABLE_MASK 0x08000000L +//ATCL2_1_ATC_L2_CACHE_DATA0 +#define ATCL2_1_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID__SHIFT 0x1 +#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES__SHIFT 0x2 +#define ATCL2_1_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH__SHIFT 0x17 +#define ATCL2_1_ATC_L2_CACHE_DATA0__DATA_REGISTER_VALID_MASK 0x00000001L +#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHE_ENTRY_VALID_MASK 0x00000002L +#define ATCL2_1_ATC_L2_CACHE_DATA0__CACHED_ATTRIBUTES_MASK 0x007FFFFCL +#define ATCL2_1_ATC_L2_CACHE_DATA0__VIRTUAL_PAGE_ADDRESS_HIGH_MASK 0x07800000L +//ATCL2_1_ATC_L2_CACHE_DATA1 +#define ATCL2_1_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CACHE_DATA1__VIRTUAL_PAGE_ADDRESS_LOW_MASK 0xFFFFFFFFL +//ATCL2_1_ATC_L2_CACHE_DATA2 +#define ATCL2_1_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CACHE_DATA2__PHYSICAL_PAGE_ADDRESS_MASK 0xFFFFFFFFL +//ATCL2_1_ATC_L2_CNTL3 +#define ATCL2_1_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1__SHIFT 0x3 +#define ATCL2_1_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS__SHIFT 0x9 +#define ATCL2_1_ATC_L2_CNTL3__DELAY_SEND_INVALIDATION_REQUEST_MASK 0x00000007L +#define ATCL2_1_ATC_L2_CNTL3__ATS_REQUEST_CREDIT_MINUS1_MASK 0x000001F8L +#define ATCL2_1_ATC_L2_CNTL3__COMPCLKREQ_OFF_HYSTERESIS_MASK 0x00000E00L +//ATCL2_1_ATC_L2_STATUS +#define ATCL2_1_ATC_L2_STATUS__BUSY__SHIFT 0x0 +#define ATCL2_1_ATC_L2_STATUS__PARITY_ERROR_INFO__SHIFT 0x1 +#define ATCL2_1_ATC_L2_STATUS__BUSY_MASK 0x00000001L +#define ATCL2_1_ATC_L2_STATUS__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL +//ATCL2_1_ATC_L2_STATUS2 +#define ATCL2_1_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO__SHIFT 0x0 +#define ATCL2_1_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO__SHIFT 0x8 +#define ATCL2_1_ATC_L2_STATUS2__IFIFO_NON_FATAL_PARITY_ERROR_INFO_MASK 0x000000FFL +#define ATCL2_1_ATC_L2_STATUS2__IFIFO_FATAL_PARITY_ERROR_INFO_MASK 0x0000FF00L +//ATCL2_1_ATC_L2_STATUS3 +#define ATCL2_1_ATC_L2_STATUS3__BUSY__SHIFT 0x0 +#define ATCL2_1_ATC_L2_STATUS3__PARITY_ERROR_INFO__SHIFT 0x1 +#define ATCL2_1_ATC_L2_STATUS3__BUSY_MASK 0x00000001L +#define ATCL2_1_ATC_L2_STATUS3__PARITY_ERROR_INFO_MASK 0x7FFFFFFEL +//ATCL2_1_ATC_L2_MISC_CG +#define ATCL2_1_ATC_L2_MISC_CG__OFFDLY__SHIFT 0x6 +#define ATCL2_1_ATC_L2_MISC_CG__ENABLE__SHIFT 0x12 +#define ATCL2_1_ATC_L2_MISC_CG__MEM_LS_ENABLE__SHIFT 0x13 +#define ATCL2_1_ATC_L2_MISC_CG__OFFDLY_MASK 0x00000FC0L +#define ATCL2_1_ATC_L2_MISC_CG__ENABLE_MASK 0x00040000L +#define ATCL2_1_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK 0x00080000L +//ATCL2_1_ATC_L2_MEM_POWER_LS +#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_SETUP__SHIFT 0x0 +#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_HOLD__SHIFT 0x6 +#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL +#define ATCL2_1_ATC_L2_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L +//ATCL2_1_ATC_L2_CGTT_CLK_CTRL +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10 +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18 +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L +#define ATCL2_1_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L +//ATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL +//ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6 +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9 +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE__SHIFT 0x11 +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L +#define ATCL2_1_ATC_L2_CACHE_4K_DSM_CNTL__TEST_FUE_MASK 0x00020000L +//ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA__SHIFT 0x6 +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE__SHIFT 0x8 +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT__SHIFT 0x9 +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY__SHIFT 0xb +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS__SHIFT 0xc +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE__SHIFT 0x11 +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__INJECT_DELAY_MASK 0x0000003FL +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DSM_IRRITATOR_DATA_MASK 0x000000C0L +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_SINGLE_WRITE_MASK 0x00000100L +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__ENABLE_ERROR_INJECT_MASK 0x00000600L +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SELECT_INJECT_DELAY_MASK 0x00000800L +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__WRITE_COUNTERS_MASK 0x00001000L +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L +#define ATCL2_1_ATC_L2_CACHE_2M_DSM_CNTL__TEST_FUE_MASK 0x00020000L +//ATCL2_1_ATC_L2_CNTL4 +#define ATCL2_1_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x0 +#define ATCL2_1_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0xa +#define ATCL2_1_ATC_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000003FFL +#define ATCL2_1_ATC_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x000FFC00L +//ATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES +#define ATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS__SHIFT 0x0 +#define ATCL2_1_ATC_L2_MM_GROUP_RT_CLASSES__GROUP_RT_CLASS_MASK 0xFFFFFFFFL + + +// addressBlock: mmhub_utcl2_vml2pfdec:1 +//VML2PF1_VM_L2_CNTL +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0 +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING__SHIFT 0x1 +#define VML2PF1_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE__SHIFT 0x2 +#define VML2PF1_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE__SHIFT 0x4 +#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE__SHIFT 0x8 +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0x9 +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE__SHIFT 0xa +#define VML2PF1_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY__SHIFT 0xb +#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE__SHIFT 0xc +#define VML2PF1_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT 0xf +#define VML2PF1_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION__SHIFT 0x12 +#define VML2PF1_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT 0x13 +#define VML2PF1_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE__SHIFT 0x15 +#define VML2PF1_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE__SHIFT 0x1a +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_CACHE_MASK 0x00000001L +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK 0x00000002L +#define VML2PF1_VM_L2_CNTL__L2_CACHE_PTE_ENDIAN_SWAP_MODE_MASK 0x0000000CL +#define VML2PF1_VM_L2_CNTL__L2_CACHE_PDE_ENDIAN_SWAP_MODE_MASK 0x00000030L +#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_TAG_GENERATION_MODE_MASK 0x00000100L +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000200L +#define VML2PF1_VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK 0x00000400L +#define VML2PF1_VM_L2_CNTL__ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY_MASK 0x00000800L +#define VML2PF1_VM_L2_CNTL__L2_PDE0_CACHE_SPLIT_MODE_MASK 0x00007000L +#define VML2PF1_VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE_MASK 0x00038000L +#define VML2PF1_VM_L2_CNTL__PDE_FAULT_CLASSIFICATION_MASK 0x00040000L +#define VML2PF1_VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE_MASK 0x00180000L +#define VML2PF1_VM_L2_CNTL__IDENTITY_MODE_FRAGMENT_SIZE_MASK 0x03E00000L +#define VML2PF1_VM_L2_CNTL__L2_PTE_CACHE_ADDR_MODE_MASK 0x0C000000L +//VML2PF1_VM_L2_CNTL2 +#define VML2PF1_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS__SHIFT 0x0 +#define VML2PF1_VM_L2_CNTL2__INVALIDATE_L2_CACHE__SHIFT 0x1 +#define VML2PF1_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN__SHIFT 0x15 +#define VML2PF1_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION__SHIFT 0x16 +#define VML2PF1_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE__SHIFT 0x17 +#define VML2PF1_VM_L2_CNTL2__INVALIDATE_CACHE_MODE__SHIFT 0x1a +#define VML2PF1_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE__SHIFT 0x1c +#define VML2PF1_VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK 0x00000001L +#define VML2PF1_VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK 0x00000002L +#define VML2PF1_VM_L2_CNTL2__DISABLE_INVALIDATE_PER_DOMAIN_MASK 0x00200000L +#define VML2PF1_VM_L2_CNTL2__DISABLE_BIGK_CACHE_OPTIMIZATION_MASK 0x00400000L +#define VML2PF1_VM_L2_CNTL2__L2_PTE_CACHE_VMID_MODE_MASK 0x03800000L +#define VML2PF1_VM_L2_CNTL2__INVALIDATE_CACHE_MODE_MASK 0x0C000000L +#define VML2PF1_VM_L2_CNTL2__PDE_CACHE_EFFECTIVE_SIZE_MASK 0x70000000L +//VML2PF1_VM_L2_CNTL3 +#define VML2PF1_VM_L2_CNTL3__BANK_SELECT__SHIFT 0x0 +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE__SHIFT 0x6 +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE__SHIFT 0x8 +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT 0xf +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY__SHIFT 0x14 +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE__SHIFT 0x15 +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE__SHIFT 0x18 +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS__SHIFT 0x1c +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS__SHIFT 0x1d +#define VML2PF1_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS__SHIFT 0x1e +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY__SHIFT 0x1f +#define VML2PF1_VM_L2_CNTL3__BANK_SELECT_MASK 0x0000003FL +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_MODE_MASK 0x000000C0L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_UPDATE_WILDCARD_REFERENCE_VALUE_MASK 0x00001F00L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE_MASK 0x000F8000L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK 0x00100000L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_EFFECTIVE_SIZE_MASK 0x00E00000L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_EFFECTIVE_SIZE_MASK 0x0F000000L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_FORCE_MISS_MASK 0x10000000L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_BIGK_FORCE_MISS_MASK 0x20000000L +#define VML2PF1_VM_L2_CNTL3__PDE_CACHE_FORCE_MISS_MASK 0x40000000L +#define VML2PF1_VM_L2_CNTL3__L2_CACHE_4K_ASSOCIATIVITY_MASK 0x80000000L +//VML2PF1_VM_L2_STATUS +#define VML2PF1_VM_L2_STATUS__L2_BUSY__SHIFT 0x0 +#define VML2PF1_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY__SHIFT 0x1 +#define VML2PF1_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS__SHIFT 0x11 +#define VML2PF1_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS__SHIFT 0x12 +#define VML2PF1_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS__SHIFT 0x13 +#define VML2PF1_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS__SHIFT 0x14 +#define VML2PF1_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS__SHIFT 0x15 +#define VML2PF1_VM_L2_STATUS__L2_BUSY_MASK 0x00000001L +#define VML2PF1_VM_L2_STATUS__CONTEXT_DOMAIN_BUSY_MASK 0x0001FFFEL +#define VML2PF1_VM_L2_STATUS__FOUND_4K_PTE_CACHE_PARITY_ERRORS_MASK 0x00020000L +#define VML2PF1_VM_L2_STATUS__FOUND_BIGK_PTE_CACHE_PARITY_ERRORS_MASK 0x00040000L +#define VML2PF1_VM_L2_STATUS__FOUND_PDE0_CACHE_PARITY_ERRORS_MASK 0x00080000L +#define VML2PF1_VM_L2_STATUS__FOUND_PDE1_CACHE_PARITY_ERRORS_MASK 0x00100000L +#define VML2PF1_VM_L2_STATUS__FOUND_PDE2_CACHE_PARITY_ERRORS_MASK 0x00200000L +//VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL +#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE__SHIFT 0x0 +#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL__SHIFT 0x1 +#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS__SHIFT 0x2 +#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_FAULT_ENABLE_MASK 0x00000001L +#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_ADDRESS_LOGICAL_MASK 0x00000002L +#define VML2PF1_VM_DUMMY_PAGE_FAULT_CNTL__DUMMY_PAGE_COMPARE_MSBS_MASK 0x000000FCL +//VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32 +#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32__SHIFT 0x0 +#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_LO32__DUMMY_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL +//VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32 +#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4__SHIFT 0x0 +#define VML2PF1_VM_DUMMY_PAGE_FAULT_ADDR_HI32__DUMMY_PAGE_ADDR_HI4_MASK 0x0000000FL +//VML2PF1_VM_L2_PROTECTION_FAULT_CNTL +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES__SHIFT 0x1 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x2 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x3 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x4 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x5 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x6 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x7 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x8 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x9 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xb +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0xd +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x1d +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT__SHIFT 0x1e +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT__SHIFT 0x1f +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00000001L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__ALLOW_SUBSEQUENT_PROTECTION_FAULT_STATUS_ADDR_UPDATES_MASK 0x00000002L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000004L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000008L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE1_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000010L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__PDE2_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000020L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000040L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__NACK_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000080L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000100L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000200L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000800L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x1FFFE000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__OTHER_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0x20000000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_NO_RETRY_FAULT_MASK 0x40000000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL__CRASH_ON_RETRY_FAULT_MASK 0x80000000L +//VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT__SHIFT 0x10 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE__SHIFT 0x11 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY__SHIFT 0x12 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT__SHIFT 0x13 +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x0000FFFFL +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__OTHER_CLIENT_ID_PRT_FAULT_INTERRUPT_MASK 0x00010000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_MASK 0x00020000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY_MASK 0x00040000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_CNTL2__ENABLE_RETRY_FAULT_INTERRUPT_MASK 0x00080000L +//VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3 +#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL3__VML1_READ_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL +//VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4 +#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_MM_CNTL4__VML1_WRITE_CLIENT_ID_NO_RETRY_FAULT_INTERRUPT_MASK 0xFFFFFFFFL +//VML2PF1_VM_L2_PROTECTION_FAULT_STATUS +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR__SHIFT 0x1 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS__SHIFT 0x4 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR__SHIFT 0x8 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__CID__SHIFT 0x9 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__RW__SHIFT 0x12 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC__SHIFT 0x13 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19 +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__MAPPING_ERROR_MASK 0x00000100L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__CID_MASK 0x0003FE00L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__RW_MASK 0x00040000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__ATOMIC_MASK 0x00080000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L +#define VML2PF1_VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L +//VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32 +#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL +//VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32 +#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_ADDR_HI32__LOGICAL_PAGE_ADDR_HI4_MASK 0x0000000FL +//VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32 +#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32__PHYSICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL +//VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32 +#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4__SHIFT 0x0 +#define VML2PF1_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32__PHYSICAL_PAGE_ADDR_HI4_MASK 0x0000000FL +//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2PF1_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32 +#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32__SHIFT 0x0 +#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32__PHYSICAL_PAGE_OFFSET_LO32_MASK 0xFFFFFFFFL +//VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32 +#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4__SHIFT 0x0 +#define VML2PF1_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32__PHYSICAL_PAGE_OFFSET_HI4_MASK 0x0000000FL +//VML2PF1_VM_L2_CNTL4 +#define VML2PF1_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT__SHIFT 0x0 +#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL__SHIFT 0x6 +#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL__SHIFT 0x7 +#define VML2PF1_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x8 +#define VML2PF1_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT__SHIFT 0x12 +#define VML2PF1_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE__SHIFT 0x1c +#define VML2PF1_VM_L2_CNTL4__L2_CACHE_4K_PARTITION_COUNT_MASK 0x0000003FL +#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PDE_REQUEST_PHYSICAL_MASK 0x00000040L +#define VML2PF1_VM_L2_CNTL4__VMC_TAP_PTE_REQUEST_PHYSICAL_MASK 0x00000080L +#define VML2PF1_VM_L2_CNTL4__MM_NONRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0003FF00L +#define VML2PF1_VM_L2_CNTL4__MM_SOFTRT_IFIFO_ACTIVE_TRANSACTION_LIMIT_MASK 0x0FFC0000L +#define VML2PF1_VM_L2_CNTL4__BPM_CGCGLS_OVERRIDE_MASK 0x10000000L +//VML2PF1_VM_L2_MM_GROUP_RT_CLASSES +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS__SHIFT 0x0 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS__SHIFT 0x1 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS__SHIFT 0x2 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS__SHIFT 0x3 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS__SHIFT 0x4 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS__SHIFT 0x5 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS__SHIFT 0x6 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS__SHIFT 0x7 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS__SHIFT 0x8 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS__SHIFT 0x9 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS__SHIFT 0xa +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS__SHIFT 0xb +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS__SHIFT 0xc +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS__SHIFT 0xd +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS__SHIFT 0xe +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS__SHIFT 0xf +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS__SHIFT 0x10 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS__SHIFT 0x11 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS__SHIFT 0x12 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS__SHIFT 0x13 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS__SHIFT 0x14 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS__SHIFT 0x15 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS__SHIFT 0x16 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS__SHIFT 0x17 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS__SHIFT 0x18 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS__SHIFT 0x19 +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS__SHIFT 0x1a +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS__SHIFT 0x1b +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS__SHIFT 0x1c +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS__SHIFT 0x1d +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS__SHIFT 0x1e +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS__SHIFT 0x1f +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_0_RT_CLASS_MASK 0x00000001L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_1_RT_CLASS_MASK 0x00000002L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_2_RT_CLASS_MASK 0x00000004L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_3_RT_CLASS_MASK 0x00000008L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_4_RT_CLASS_MASK 0x00000010L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_5_RT_CLASS_MASK 0x00000020L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_6_RT_CLASS_MASK 0x00000040L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_7_RT_CLASS_MASK 0x00000080L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_8_RT_CLASS_MASK 0x00000100L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_9_RT_CLASS_MASK 0x00000200L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_10_RT_CLASS_MASK 0x00000400L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_11_RT_CLASS_MASK 0x00000800L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_12_RT_CLASS_MASK 0x00001000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_13_RT_CLASS_MASK 0x00002000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_14_RT_CLASS_MASK 0x00004000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_15_RT_CLASS_MASK 0x00008000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_16_RT_CLASS_MASK 0x00010000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_17_RT_CLASS_MASK 0x00020000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_18_RT_CLASS_MASK 0x00040000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_19_RT_CLASS_MASK 0x00080000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_20_RT_CLASS_MASK 0x00100000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_21_RT_CLASS_MASK 0x00200000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_22_RT_CLASS_MASK 0x00400000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_23_RT_CLASS_MASK 0x00800000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_24_RT_CLASS_MASK 0x01000000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_25_RT_CLASS_MASK 0x02000000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_26_RT_CLASS_MASK 0x04000000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_27_RT_CLASS_MASK 0x08000000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_28_RT_CLASS_MASK 0x10000000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_29_RT_CLASS_MASK 0x20000000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_30_RT_CLASS_MASK 0x40000000L +#define VML2PF1_VM_L2_MM_GROUP_RT_CLASSES__GROUP_31_RT_CLASS_MASK 0x80000000L +//VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID__SHIFT 0x0 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE__SHIFT 0x14 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__ENABLE_MASK 0x00100000L +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L +//VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID__SHIFT 0x0 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID__SHIFT 0xa +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE__SHIFT 0x14 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE__SHIFT 0x18 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION__SHIFT 0x19 +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_READ_CLIENT_ID_MASK 0x000001FFL +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_WRITE_CLIENT_ID_MASK 0x0007FC00L +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__ENABLE_MASK 0x00100000L +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_INVALIDATION_MODE_MASK 0x01000000L +#define VML2PF1_VM_L2_BANK_SELECT_RESERVED_CID2__RESERVED_CACHE_PRIVATE_INVALIDATION_MASK 0x02000000L +//VML2PF1_VM_L2_CACHE_PARITY_CNTL +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES__SHIFT 0x0 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES__SHIFT 0x1 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES__SHIFT 0x2 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE__SHIFT 0x3 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE__SHIFT 0x4 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE__SHIFT 0x5 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK__SHIFT 0x6 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER__SHIFT 0x9 +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC__SHIFT 0xc +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_4K_PTE_CACHES_MASK 0x00000001L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_BIGK_PTE_CACHES_MASK 0x00000002L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__ENABLE_PARITY_CHECKS_IN_PDE_CACHES_MASK 0x00000004L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_4K_PTE_CACHE_MASK 0x00000008L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_BIGK_PTE_CACHE_MASK 0x00000010L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_PARITY_MISMATCH_IN_PDE_CACHE_MASK 0x00000020L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_BANK_MASK 0x000001C0L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_NUMBER_MASK 0x00000E00L +#define VML2PF1_VM_L2_CACHE_PARITY_CNTL__FORCE_CACHE_ASSOC_MASK 0x0000F000L +//VML2PF1_VM_L2_CGTT_CLK_CTRL +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10 +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18 +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L +#define VML2PF1_VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L + + +// addressBlock: mmhub_utcl2_vml2vcdec:1 +//VML2VC1_VM_CONTEXT0_CNTL +#define VML2VC1_VM_CONTEXT0_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT0_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT0_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT0_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT0_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT0_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT0_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT0_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT0_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT1_CNTL +#define VML2VC1_VM_CONTEXT1_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT1_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT2_CNTL +#define VML2VC1_VM_CONTEXT2_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT2_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT2_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT2_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT2_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT2_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT2_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT2_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT2_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT2_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT2_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT3_CNTL +#define VML2VC1_VM_CONTEXT3_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT3_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT3_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT3_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT3_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT3_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT3_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT3_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT3_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT3_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT3_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT4_CNTL +#define VML2VC1_VM_CONTEXT4_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT4_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT4_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT4_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT4_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT4_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT4_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT4_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT4_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT4_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT4_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT5_CNTL +#define VML2VC1_VM_CONTEXT5_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT5_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT5_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT5_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT5_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT5_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT5_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT5_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT5_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT5_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT5_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT6_CNTL +#define VML2VC1_VM_CONTEXT6_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT6_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT6_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT6_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT6_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT6_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT6_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT6_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT6_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT6_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT6_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT7_CNTL +#define VML2VC1_VM_CONTEXT7_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT7_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT7_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT7_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT7_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT7_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT7_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT7_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT7_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT7_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT7_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT8_CNTL +#define VML2VC1_VM_CONTEXT8_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT8_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT8_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT8_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT8_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT8_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT8_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT8_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT8_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT8_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT8_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT9_CNTL +#define VML2VC1_VM_CONTEXT9_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT9_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT9_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT9_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT9_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT9_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT9_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT9_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT9_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT9_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT9_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT10_CNTL +#define VML2VC1_VM_CONTEXT10_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT10_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT10_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT10_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT10_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT10_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT10_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT10_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT10_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT10_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT10_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT11_CNTL +#define VML2VC1_VM_CONTEXT11_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT11_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT11_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT11_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT11_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT11_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT11_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT11_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT11_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT11_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT11_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT12_CNTL +#define VML2VC1_VM_CONTEXT12_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT12_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT12_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT12_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT12_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT12_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT12_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT12_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT12_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT12_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT12_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT13_CNTL +#define VML2VC1_VM_CONTEXT13_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT13_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT13_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT13_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT13_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT13_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT13_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT13_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT13_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT13_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT13_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT14_CNTL +#define VML2VC1_VM_CONTEXT14_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT14_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT14_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT14_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT14_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT14_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT14_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT14_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT14_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT14_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT14_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXT15_CNTL +#define VML2VC1_VM_CONTEXT15_CNTL__ENABLE_CONTEXT__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH__SHIFT 0x1 +#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT 0x3 +#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT__SHIFT 0x7 +#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT__SHIFT 0x8 +#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x9 +#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xa +#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xb +#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xc +#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xd +#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0xe +#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0xf +#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x10 +#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x11 +#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x12 +#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x13 +#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x14 +#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT__SHIFT 0x15 +#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT__SHIFT 0x16 +#define VML2VC1_VM_CONTEXT15_CNTL__ENABLE_CONTEXT_MASK 0x00000001L +#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_DEPTH_MASK 0x00000006L +#define VML2VC1_VM_CONTEXT15_CNTL__PAGE_TABLE_BLOCK_SIZE_MASK 0x00000078L +#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_PERMISSION_OR_INVALID_PAGE_FAULT_MASK 0x00000080L +#define VML2VC1_VM_CONTEXT15_CNTL__RETRY_OTHER_FAULT_MASK 0x00000100L +#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000200L +#define VML2VC1_VM_CONTEXT15_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00000400L +#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00000800L +#define VML2VC1_VM_CONTEXT15_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00001000L +#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00002000L +#define VML2VC1_VM_CONTEXT15_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00004000L +#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00008000L +#define VML2VC1_VM_CONTEXT15_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00010000L +#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00020000L +#define VML2VC1_VM_CONTEXT15_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00040000L +#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00080000L +#define VML2VC1_VM_CONTEXT15_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00100000L +#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK 0x00200000L +#define VML2VC1_VM_CONTEXT15_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK 0x00400000L +//VML2VC1_VM_CONTEXTS_DISABLE +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0__SHIFT 0x0 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1__SHIFT 0x1 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2__SHIFT 0x2 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3__SHIFT 0x3 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4__SHIFT 0x4 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5__SHIFT 0x5 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6__SHIFT 0x6 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7__SHIFT 0x7 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8__SHIFT 0x8 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9__SHIFT 0x9 +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10__SHIFT 0xa +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11__SHIFT 0xb +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12__SHIFT 0xc +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13__SHIFT 0xd +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14__SHIFT 0xe +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15__SHIFT 0xf +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_0_MASK 0x00000001L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_1_MASK 0x00000002L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_2_MASK 0x00000004L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_3_MASK 0x00000008L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_4_MASK 0x00000010L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_5_MASK 0x00000020L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_6_MASK 0x00000040L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_7_MASK 0x00000080L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_8_MASK 0x00000100L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_9_MASK 0x00000200L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_10_MASK 0x00000400L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_11_MASK 0x00000800L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_12_MASK 0x00001000L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_13_MASK 0x00002000L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_14_MASK 0x00004000L +#define VML2VC1_VM_CONTEXTS_DISABLE__DISABLE_CONTEXT_15_MASK 0x00008000L +//VML2VC1_VM_INVALIDATE_ENG0_SEM +#define VML2VC1_VM_INVALIDATE_ENG0_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG0_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG1_SEM +#define VML2VC1_VM_INVALIDATE_ENG1_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG1_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG2_SEM +#define VML2VC1_VM_INVALIDATE_ENG2_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG2_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG3_SEM +#define VML2VC1_VM_INVALIDATE_ENG3_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG3_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG4_SEM +#define VML2VC1_VM_INVALIDATE_ENG4_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG4_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG5_SEM +#define VML2VC1_VM_INVALIDATE_ENG5_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG5_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG6_SEM +#define VML2VC1_VM_INVALIDATE_ENG6_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG6_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG7_SEM +#define VML2VC1_VM_INVALIDATE_ENG7_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG7_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG8_SEM +#define VML2VC1_VM_INVALIDATE_ENG8_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG8_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG9_SEM +#define VML2VC1_VM_INVALIDATE_ENG9_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG9_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG10_SEM +#define VML2VC1_VM_INVALIDATE_ENG10_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG10_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG11_SEM +#define VML2VC1_VM_INVALIDATE_ENG11_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG11_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG12_SEM +#define VML2VC1_VM_INVALIDATE_ENG12_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG12_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG13_SEM +#define VML2VC1_VM_INVALIDATE_ENG13_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG13_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG14_SEM +#define VML2VC1_VM_INVALIDATE_ENG14_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG14_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG15_SEM +#define VML2VC1_VM_INVALIDATE_ENG15_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG15_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG16_SEM +#define VML2VC1_VM_INVALIDATE_ENG16_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG16_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG17_SEM +#define VML2VC1_VM_INVALIDATE_ENG17_SEM__SEMAPHORE__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG17_SEM__SEMAPHORE_MASK 0x00000001L +//VML2VC1_VM_INVALIDATE_ENG0_REQ +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG0_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG1_REQ +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG1_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG2_REQ +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG2_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG3_REQ +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG3_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG4_REQ +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG4_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG5_REQ +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG5_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG6_REQ +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG6_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG7_REQ +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG7_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG8_REQ +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG8_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG9_REQ +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG9_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG10_REQ +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG10_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG11_REQ +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG11_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG12_REQ +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG12_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG13_REQ +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG13_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG14_REQ +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG14_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG15_REQ +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG15_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG16_REQ +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG16_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG17_REQ +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES__SHIFT 0x12 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0__SHIFT 0x13 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1__SHIFT 0x14 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2__SHIFT 0x15 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES__SHIFT 0x16 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR__SHIFT 0x17 +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__PER_VMID_INVALIDATE_REQ_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__FLUSH_TYPE_MASK 0x00030000L +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PTES_MASK 0x00040000L +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE0_MASK 0x00080000L +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE1_MASK 0x00100000L +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L2_PDE2_MASK 0x00200000L +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__INVALIDATE_L1_PTES_MASK 0x00400000L +#define VML2VC1_VM_INVALIDATE_ENG17_REQ__CLEAR_PROTECTION_FAULT_STATUS_ADDR_MASK 0x00800000L +//VML2VC1_VM_INVALIDATE_ENG0_ACK +#define VML2VC1_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG0_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG0_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG0_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG1_ACK +#define VML2VC1_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG1_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG1_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG1_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG2_ACK +#define VML2VC1_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG2_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG2_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG2_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG3_ACK +#define VML2VC1_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG3_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG3_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG3_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG4_ACK +#define VML2VC1_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG4_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG4_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG4_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG5_ACK +#define VML2VC1_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG5_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG5_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG5_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG6_ACK +#define VML2VC1_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG6_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG6_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG6_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG7_ACK +#define VML2VC1_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG7_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG7_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG7_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG8_ACK +#define VML2VC1_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG8_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG8_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG8_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG9_ACK +#define VML2VC1_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG9_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG9_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG9_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG10_ACK +#define VML2VC1_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG10_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG10_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG10_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG11_ACK +#define VML2VC1_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG11_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG11_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG11_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG12_ACK +#define VML2VC1_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG12_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG12_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG12_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG13_ACK +#define VML2VC1_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG13_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG13_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG13_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG14_ACK +#define VML2VC1_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG14_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG14_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG14_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG15_ACK +#define VML2VC1_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG15_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG15_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG15_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG16_ACK +#define VML2VC1_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG16_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG16_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG16_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG17_ACK +#define VML2VC1_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG17_ACK__SEMAPHORE__SHIFT 0x10 +#define VML2VC1_VM_INVALIDATE_ENG17_ACK__PER_VMID_INVALIDATE_ACK_MASK 0x0000FFFFL +#define VML2VC1_VM_INVALIDATE_ENG17_ACK__SEMAPHORE_MASK 0x00010000L +//VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG1_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG2_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG3_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG4_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG5_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG6_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG7_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG8_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG9_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG10_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG11_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG12_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG13_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG14_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG15_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32 +#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31__SHIFT 0x1 +#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__S_BIT_MASK 0x00000001L +#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_LO32__LOGI_PAGE_ADDR_RANGE_LO31_MASK 0xFFFFFFFEL +//VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32 +#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5__SHIFT 0x0 +#define VML2VC1_VM_INVALIDATE_ENG17_ADDR_RANGE_HI32__LOGI_PAGE_ADDR_RANGE_HI5_MASK 0x0000001FL +//VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_LO32__PAGE_DIRECTORY_ENTRY_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_BASE_ADDR_HI32__PAGE_DIRECTORY_ENTRY_HI32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_START_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT2_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT3_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT4_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT5_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT6_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT7_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT8_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT9_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT10_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT11_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT12_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT13_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT14_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL +//VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_LO32__LOGICAL_PAGE_NUMBER_LO32_MASK 0xFFFFFFFFL +//VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4__SHIFT 0x0 +#define VML2VC1_VM_CONTEXT15_PAGE_TABLE_END_ADDR_HI32__LOGICAL_PAGE_NUMBER_HI4_MASK 0x0000000FL + + +// addressBlock: mmhub_utcl2_vmsharedpfdec:1 +//VMSHAREDPF1_MC_VM_NB_MMIOBASE +#define VMSHAREDPF1_MC_VM_NB_MMIOBASE__MMIOBASE__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_NB_MMIOBASE__MMIOBASE_MASK 0xFFFFFFFFL +//VMSHAREDPF1_MC_VM_NB_MMIOLIMIT +#define VMSHAREDPF1_MC_VM_NB_MMIOLIMIT__MMIOLIMIT__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_NB_MMIOLIMIT__MMIOLIMIT_MASK 0xFFFFFFFFL +//VMSHAREDPF1_MC_VM_NB_PCI_CTRL +#define VMSHAREDPF1_MC_VM_NB_PCI_CTRL__MMIOENABLE__SHIFT 0x17 +#define VMSHAREDPF1_MC_VM_NB_PCI_CTRL__MMIOENABLE_MASK 0x00800000L +//VMSHAREDPF1_MC_VM_NB_PCI_ARB +#define VMSHAREDPF1_MC_VM_NB_PCI_ARB__VGA_HOLE__SHIFT 0x3 +#define VMSHAREDPF1_MC_VM_NB_PCI_ARB__VGA_HOLE_MASK 0x00000008L +//VMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1 +#define VMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM__SHIFT 0x17 +#define VMSHAREDPF1_MC_VM_NB_TOP_OF_DRAM_SLOT1__TOP_OF_DRAM_MASK 0xFF800000L +//VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2 +#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2__SHIFT 0x17 +#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__ENABLE_MASK 0x00000001L +#define VMSHAREDPF1_MC_VM_NB_LOWER_TOP_OF_DRAM2__LOWER_TOM2_MASK 0xFF800000L +//VMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2 +#define VMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_NB_UPPER_TOP_OF_DRAM2__UPPER_TOM2_MASK 0x00000FFFL +//VMSHAREDPF1_MC_VM_FB_OFFSET +#define VMSHAREDPF1_MC_VM_FB_OFFSET__FB_OFFSET__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_FB_OFFSET__FB_OFFSET_MASK 0x00FFFFFFL +//VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB +#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB__PHYSICAL_PAGE_NUMBER_LSB_MASK 0xFFFFFFFFL +//VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB +#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB__PHYSICAL_PAGE_NUMBER_MSB_MASK 0x0000000FL +//VMSHAREDPF1_MC_VM_STEERING +#define VMSHAREDPF1_MC_VM_STEERING__DEFAULT_STEERING__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_STEERING__DEFAULT_STEERING_MASK 0x00000003L +//VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ +#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define VMSHAREDPF1_MC_SHARED_VIRT_RESET_REQ__PF_MASK 0x80000000L +//VMSHAREDPF1_MC_MEM_POWER_LS +#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_SETUP__SHIFT 0x0 +#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_HOLD__SHIFT 0x6 +#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL +#define VMSHAREDPF1_MC_MEM_POWER_LS__LS_HOLD_MASK 0x00000FC0L +//VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START +#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END +#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF1_MC_VM_APT_CNTL +#define VMSHAREDPF1_MC_VM_APT_CNTL__FORCE_MTYPE_UC__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN__SHIFT 0x1 +#define VMSHAREDPF1_MC_VM_APT_CNTL__FORCE_MTYPE_UC_MASK 0x00000001L +#define VMSHAREDPF1_MC_VM_APT_CNTL__DIRECT_SYSTEM_EN_MASK 0x00000002L +//VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START +#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_START__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END +#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_END__ADDRESS_MASK 0x000FFFFFL +//VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL +#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_LOCAL_HBM_ADDRESS_LOCK_CNTL__LOCK_MASK 0x00000001L +//VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL +#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4 +#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x0000000FL +#define VMSHAREDPF1_MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x000000F0L +//VMSHAREDPF1_MC_VM_XGMI_LFB_SIZE +#define VMSHAREDPF1_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0001FFFFL +//VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL +#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE__SHIFT 0x0 +#define VMSHAREDPF1_MC_VM_CACHEABLE_DRAM_CNTL__ENABLE_CACHEABLE_DRAM_ADDRESS_APERTURE_MASK 0x00000001L + + +// addressBlock: mmhub_utcl2_vmsharedvcdec:1 +//VMSHAREDVC1_MC_VM_FB_LOCATION_BASE +#define VMSHAREDVC1_MC_VM_FB_LOCATION_BASE__FB_BASE__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK 0x00FFFFFFL +//VMSHAREDVC1_MC_VM_FB_LOCATION_TOP +#define VMSHAREDVC1_MC_VM_FB_LOCATION_TOP__FB_TOP__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK 0x00FFFFFFL +//VMSHAREDVC1_MC_VM_AGP_TOP +#define VMSHAREDVC1_MC_VM_AGP_TOP__AGP_TOP__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_AGP_TOP__AGP_TOP_MASK 0x00FFFFFFL +//VMSHAREDVC1_MC_VM_AGP_BOT +#define VMSHAREDVC1_MC_VM_AGP_BOT__AGP_BOT__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_AGP_BOT__AGP_BOT_MASK 0x00FFFFFFL +//VMSHAREDVC1_MC_VM_AGP_BASE +#define VMSHAREDVC1_MC_VM_AGP_BASE__AGP_BASE__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_AGP_BASE__AGP_BASE_MASK 0x00FFFFFFL +//VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR +#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_LOW_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL +//VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR +#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_SYSTEM_APERTURE_HIGH_ADDR__LOGICAL_ADDR_MASK 0x3FFFFFFFL +//VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB__SHIFT 0x0 +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE__SHIFT 0x3 +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT 0x5 +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL__SHIFT 0x6 +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ECO_BITS__SHIFT 0x7 +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__MTYPE__SHIFT 0xb +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ATC_EN__SHIFT 0xd +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK 0x00000001L +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK 0x00000018L +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS_MASK 0x00000020L +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK 0x00000040L +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ECO_BITS_MASK 0x00000780L +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__MTYPE_MASK 0x00001800L +#define VMSHAREDVC1_MC_VM_MX_L1_TLB_CNTL__ATC_EN_MASK 0x00002000L + + +// addressBlock: mmhub_utcl2_vmsharedhvdec:1 +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF0__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF1__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF2__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF3__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF4__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF5__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF6__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF7__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF8__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF9__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF10__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF11__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF12__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF13__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF14__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET__SHIFT 0x10 +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_SIZE_MASK 0x0000FFFFL +#define VMSHAREDHV1_MC_VM_FB_SIZE_OFFSET_VF15__VF_FB_OFFSET_MASK 0xFFFF0000L +//VMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1 +#define VMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1__MARC_EN__SHIFT 0x8 +#define VMSHAREDHV1_VM_IOMMU_MMIO_CNTRL_1__MARC_EN_MASK 0x00000100L +//VMSHAREDHV1_MC_VM_MARC_BASE_LO_0 +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_0__MARC_BASE_LO_0_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_BASE_LO_1 +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_1__MARC_BASE_LO_1_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_BASE_LO_2 +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_2__MARC_BASE_LO_2_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_BASE_LO_3 +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_BASE_LO_3__MARC_BASE_LO_3_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_BASE_HI_0 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_0__MARC_BASE_HI_0_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_BASE_HI_1 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_1__MARC_BASE_HI_1_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_BASE_HI_2 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_2__MARC_BASE_HI_2_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_BASE_HI_3 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_BASE_HI_3__MARC_BASE_HI_3_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0__SHIFT 0x1 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_ENABLE_0_MASK 0x00000001L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_READONLY_0_MASK 0x00000002L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_0__MARC_RELOC_LO_0_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1__SHIFT 0x1 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_ENABLE_1_MASK 0x00000001L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_READONLY_1_MASK 0x00000002L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_1__MARC_RELOC_LO_1_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2__SHIFT 0x1 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_ENABLE_2_MASK 0x00000001L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_READONLY_2_MASK 0x00000002L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_2__MARC_RELOC_LO_2_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3__SHIFT 0x1 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_ENABLE_3_MASK 0x00000001L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_READONLY_3_MASK 0x00000002L +#define VMSHAREDHV1_MC_VM_MARC_RELOC_LO_3__MARC_RELOC_LO_3_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_0__MARC_RELOC_HI_0_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_1 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_1__MARC_RELOC_HI_1_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_2 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_2__MARC_RELOC_HI_2_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_RELOC_HI_3 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_RELOC_HI_3__MARC_RELOC_HI_3_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_LEN_LO_0 +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_0__MARC_LEN_LO_0_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_LEN_LO_1 +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_1__MARC_LEN_LO_1_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_LEN_LO_2 +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_2__MARC_LEN_LO_2_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_LEN_LO_3 +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_MARC_LEN_LO_3__MARC_LEN_LO_3_MASK 0xFFFFF000L +//VMSHAREDHV1_MC_VM_MARC_LEN_HI_0 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_0__MARC_LEN_HI_0_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_LEN_HI_1 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_1__MARC_LEN_HI_1_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_LEN_HI_2 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_2__MARC_LEN_HI_2_MASK 0x000FFFFFL +//VMSHAREDHV1_MC_VM_MARC_LEN_HI_3 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_MARC_LEN_HI_3__MARC_LEN_HI_3_MASK 0x000FFFFFL +//VMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER +#define VMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER__IOMMUEN__SHIFT 0x0 +#define VMSHAREDHV1_VM_IOMMU_CONTROL_REGISTER__IOMMUEN_MASK 0x00000001L +//VMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER +#define VMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN__SHIFT 0xd +#define VMSHAREDHV1_VM_IOMMU_PERFORMANCE_OPTIMIZATION_CONTROL_REGISTER__PERFOPTEN_MASK 0x00002000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__STU__SHIFT 0x10 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__STU_MASK 0x001F0000L +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_0__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_1__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_2__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_3__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_4__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_5__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_6__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_7__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_8__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_9__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_10__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_11__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_12__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_13__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_14__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15 +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE__SHIFT 0x1f +#define VMSHAREDHV1_VM_PCIE_ATS_CNTL_VF_15__ATC_ENABLE_MASK 0x80000000L +//VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA__SHIFT 0xc +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE__SHIFT 0xf +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE__SHIFT 0x10 +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE__SHIFT 0x18 +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_EXTRA_MASK 0x00007000L +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L +#define VMSHAREDHV1_UTCL2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L +//VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID +#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define VMSHAREDHV1_MC_SHARED_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0__SHIFT 0x0 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1__SHIFT 0x1 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2__SHIFT 0x2 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3__SHIFT 0x3 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4__SHIFT 0x4 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5__SHIFT 0x5 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6__SHIFT 0x6 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7__SHIFT 0x7 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8__SHIFT 0x8 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9__SHIFT 0x9 +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10__SHIFT 0xa +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11__SHIFT 0xb +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12__SHIFT 0xc +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13__SHIFT 0xd +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14__SHIFT 0xe +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15__SHIFT 0xf +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF__SHIFT 0x1f +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF0_MASK 0x00000001L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF1_MASK 0x00000002L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF2_MASK 0x00000004L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF3_MASK 0x00000008L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF4_MASK 0x00000010L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF5_MASK 0x00000020L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF6_MASK 0x00000040L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF7_MASK 0x00000080L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF8_MASK 0x00000100L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF9_MASK 0x00000200L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF10_MASK 0x00000400L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF11_MASK 0x00000800L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF12_MASK 0x00001000L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF13_MASK 0x00002000L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF14_MASK 0x00004000L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_VF15_MASK 0x00008000L +#define VMSHAREDHV1_MC_VM_XGMI_GPUIOV_ENABLE__ENABLE_PF_MASK 0x80000000L + + +// addressBlock: mmhub_utcl2_atcl2pfcntrdec:1 +//ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO +#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI +#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define ATCL2PFCNTR1_ATC_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L + + +// addressBlock: mmhub_utcl2_atcl2pfcntldec:1 +//ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define ATCL2PFCNTL1_ATC_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L + + +// addressBlock: mmhub_utcl2_vml2pldec:1 +//VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER4_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER5_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER6_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE__SHIFT 0x1c +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR__SHIFT 0x1d +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_MASK 0x000000FFL +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_SEL_END_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__PERF_MODE_MASK 0x0F000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__ENABLE_MASK 0x10000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER7_CFG__CLEAR_MASK 0x20000000L +//VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L +#define VML2PL1_MC_VM_L2_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L + + +// addressBlock: mmhub_utcl2_vml2prdec:1 +//VML2PR1_MC_VM_L2_PERFCOUNTER_LO +#define VML2PR1_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 +#define VML2PR1_MC_VM_L2_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL +//VML2PR1_MC_VM_L2_PERFCOUNTER_HI +#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 +#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 +#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL +#define VML2PR1_MC_VM_L2_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L + +#endif -- cgit v1.2.3 From 4f727ecefefbd180de10e25b3e74c03dce3f1e75 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 29 Aug 2018 16:50:20 +0800 Subject: drm/amdgpu: add sdma 4.2.2 header files for Arcturus SDMA is the system DMA block. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../include/asic_reg/sdma0/sdma0_4_2_2_offset.h | 1051 +++++++ .../include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h | 3002 ++++++++++++++++++++ .../include/asic_reg/sdma1/sdma1_4_2_2_offset.h | 1043 +++++++ .../include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h | 2956 +++++++++++++++++++ .../include/asic_reg/sdma2/sdma2_4_2_2_offset.h | 1043 +++++++ .../include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h | 2956 +++++++++++++++++++ .../include/asic_reg/sdma3/sdma3_4_2_2_offset.h | 1043 +++++++ .../include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h | 2956 +++++++++++++++++++ .../include/asic_reg/sdma4/sdma4_4_2_2_offset.h | 1043 +++++++ .../include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h | 2956 +++++++++++++++++++ .../include/asic_reg/sdma5/sdma5_4_2_2_offset.h | 1043 +++++++ .../include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h | 2956 +++++++++++++++++++ .../include/asic_reg/sdma6/sdma6_4_2_2_offset.h | 1043 +++++++ .../include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h | 2956 +++++++++++++++++++ .../include/asic_reg/sdma7/sdma7_4_2_2_offset.h | 1043 +++++++ .../include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h | 2956 +++++++++++++++++++ 16 files changed, 32046 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h new file mode 100644 index 000000000000..ff5df90071e6 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h @@ -0,0 +1,1051 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma0_4_2_2_OFFSET_HEADER +#define _sdma0_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma0_sdma0dec +// base address: 0x4980 +#define mmSDMA0_UCODE_ADDR 0x0000 +#define mmSDMA0_UCODE_ADDR_BASE_IDX 0 +#define mmSDMA0_UCODE_DATA 0x0001 +#define mmSDMA0_UCODE_DATA_BASE_IDX 0 +#define mmSDMA0_VM_CNTL 0x0004 +#define mmSDMA0_VM_CNTL_BASE_IDX 0 +#define mmSDMA0_VM_CTX_LO 0x0005 +#define mmSDMA0_VM_CTX_LO_BASE_IDX 0 +#define mmSDMA0_VM_CTX_HI 0x0006 +#define mmSDMA0_VM_CTX_HI_BASE_IDX 0 +#define mmSDMA0_ACTIVE_FCN_ID 0x0007 +#define mmSDMA0_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmSDMA0_VM_CTX_CNTL 0x0008 +#define mmSDMA0_VM_CTX_CNTL_BASE_IDX 0 +#define mmSDMA0_VIRT_RESET_REQ 0x0009 +#define mmSDMA0_VIRT_RESET_REQ_BASE_IDX 0 +#define mmSDMA0_VF_ENABLE 0x000a +#define mmSDMA0_VF_ENABLE_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA0_CONTEXT_REG_TYPE0_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA0_CONTEXT_REG_TYPE1_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA0_CONTEXT_REG_TYPE2_BASE_IDX 0 +#define mmSDMA0_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA0_CONTEXT_REG_TYPE3_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE0 0x000f +#define mmSDMA0_PUB_REG_TYPE0_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE1 0x0010 +#define mmSDMA0_PUB_REG_TYPE1_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE2 0x0011 +#define mmSDMA0_PUB_REG_TYPE2_BASE_IDX 0 +#define mmSDMA0_PUB_REG_TYPE3 0x0012 +#define mmSDMA0_PUB_REG_TYPE3_BASE_IDX 0 +#define mmSDMA0_MMHUB_CNTL 0x0013 +#define mmSDMA0_MMHUB_CNTL_BASE_IDX 0 +#define mmSDMA0_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA0_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0 +#define mmSDMA0_POWER_CNTL 0x001a +#define mmSDMA0_POWER_CNTL_BASE_IDX 0 +#define mmSDMA0_CLK_CTRL 0x001b +#define mmSDMA0_CLK_CTRL_BASE_IDX 0 +#define mmSDMA0_CNTL 0x001c +#define mmSDMA0_CNTL_BASE_IDX 0 +#define mmSDMA0_CHICKEN_BITS 0x001d +#define mmSDMA0_CHICKEN_BITS_BASE_IDX 0 +#define mmSDMA0_GB_ADDR_CONFIG 0x001e +#define mmSDMA0_GB_ADDR_CONFIG_BASE_IDX 0 +#define mmSDMA0_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA0_GB_ADDR_CONFIG_READ_BASE_IDX 0 +#define mmSDMA0_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA0_RB_RPTR_FETCH_HI_BASE_IDX 0 +#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0 +#define mmSDMA0_RB_RPTR_FETCH 0x0022 +#define mmSDMA0_RB_RPTR_FETCH_BASE_IDX 0 +#define mmSDMA0_IB_OFFSET_FETCH 0x0023 +#define mmSDMA0_IB_OFFSET_FETCH_BASE_IDX 0 +#define mmSDMA0_PROGRAM 0x0024 +#define mmSDMA0_PROGRAM_BASE_IDX 0 +#define mmSDMA0_STATUS_REG 0x0025 +#define mmSDMA0_STATUS_REG_BASE_IDX 0 +#define mmSDMA0_STATUS1_REG 0x0026 +#define mmSDMA0_STATUS1_REG_BASE_IDX 0 +#define mmSDMA0_RD_BURST_CNTL 0x0027 +#define mmSDMA0_RD_BURST_CNTL_BASE_IDX 0 +#define mmSDMA0_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA0_HBM_PAGE_CONFIG_BASE_IDX 0 +#define mmSDMA0_UCODE_CHECKSUM 0x0029 +#define mmSDMA0_UCODE_CHECKSUM_BASE_IDX 0 +#define mmSDMA0_F32_CNTL 0x002a +#define mmSDMA0_F32_CNTL_BASE_IDX 0 +#define mmSDMA0_FREEZE 0x002b +#define mmSDMA0_FREEZE_BASE_IDX 0 +#define mmSDMA0_PHASE0_QUANTUM 0x002c +#define mmSDMA0_PHASE0_QUANTUM_BASE_IDX 0 +#define mmSDMA0_PHASE1_QUANTUM 0x002d +#define mmSDMA0_PHASE1_QUANTUM_BASE_IDX 0 +#define mmSDMA_POWER_GATING 0x002e +#define mmSDMA_POWER_GATING_BASE_IDX 0 +#define mmSDMA_PGFSM_CONFIG 0x002f +#define mmSDMA_PGFSM_CONFIG_BASE_IDX 0 +#define mmSDMA_PGFSM_WRITE 0x0030 +#define mmSDMA_PGFSM_WRITE_BASE_IDX 0 +#define mmSDMA_PGFSM_READ 0x0031 +#define mmSDMA_PGFSM_READ_BASE_IDX 0 +#define mmSDMA0_EDC_CONFIG 0x0032 +#define mmSDMA0_EDC_CONFIG_BASE_IDX 0 +#define mmSDMA0_BA_THRESHOLD 0x0033 +#define mmSDMA0_BA_THRESHOLD_BASE_IDX 0 +#define mmSDMA0_ID 0x0034 +#define mmSDMA0_ID_BASE_IDX 0 +#define mmSDMA0_VERSION 0x0035 +#define mmSDMA0_VERSION_BASE_IDX 0 +#define mmSDMA0_EDC_COUNTER 0x0036 +#define mmSDMA0_EDC_COUNTER_BASE_IDX 0 +#define mmSDMA0_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA0_EDC_COUNTER_CLEAR_BASE_IDX 0 +#define mmSDMA0_STATUS2_REG 0x0038 +#define mmSDMA0_STATUS2_REG_BASE_IDX 0 +#define mmSDMA0_ATOMIC_CNTL 0x0039 +#define mmSDMA0_ATOMIC_CNTL_BASE_IDX 0 +#define mmSDMA0_ATOMIC_PREOP_LO 0x003a +#define mmSDMA0_ATOMIC_PREOP_LO_BASE_IDX 0 +#define mmSDMA0_ATOMIC_PREOP_HI 0x003b +#define mmSDMA0_ATOMIC_PREOP_HI_BASE_IDX 0 +#define mmSDMA0_UTCL1_CNTL 0x003c +#define mmSDMA0_UTCL1_CNTL_BASE_IDX 0 +#define mmSDMA0_UTCL1_WATERMK 0x003d +#define mmSDMA0_UTCL1_WATERMK_BASE_IDX 0 +#define mmSDMA0_UTCL1_RD_STATUS 0x003e +#define mmSDMA0_UTCL1_RD_STATUS_BASE_IDX 0 +#define mmSDMA0_UTCL1_WR_STATUS 0x003f +#define mmSDMA0_UTCL1_WR_STATUS_BASE_IDX 0 +#define mmSDMA0_UTCL1_INV0 0x0040 +#define mmSDMA0_UTCL1_INV0_BASE_IDX 0 +#define mmSDMA0_UTCL1_INV1 0x0041 +#define mmSDMA0_UTCL1_INV1_BASE_IDX 0 +#define mmSDMA0_UTCL1_INV2 0x0042 +#define mmSDMA0_UTCL1_INV2_BASE_IDX 0 +#define mmSDMA0_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA0_UTCL1_RD_XNACK0_BASE_IDX 0 +#define mmSDMA0_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA0_UTCL1_RD_XNACK1_BASE_IDX 0 +#define mmSDMA0_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA0_UTCL1_WR_XNACK0_BASE_IDX 0 +#define mmSDMA0_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA0_UTCL1_WR_XNACK1_BASE_IDX 0 +#define mmSDMA0_UTCL1_TIMEOUT 0x0047 +#define mmSDMA0_UTCL1_TIMEOUT_BASE_IDX 0 +#define mmSDMA0_UTCL1_PAGE 0x0048 +#define mmSDMA0_UTCL1_PAGE_BASE_IDX 0 +#define mmSDMA0_POWER_CNTL_IDLE 0x0049 +#define mmSDMA0_POWER_CNTL_IDLE_BASE_IDX 0 +#define mmSDMA0_RELAX_ORDERING_LUT 0x004a +#define mmSDMA0_RELAX_ORDERING_LUT_BASE_IDX 0 +#define mmSDMA0_CHICKEN_BITS_2 0x004b +#define mmSDMA0_CHICKEN_BITS_2_BASE_IDX 0 +#define mmSDMA0_STATUS3_REG 0x004c +#define mmSDMA0_STATUS3_REG_BASE_IDX 0 +#define mmSDMA0_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA0_PHYSICAL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA0_PHYSICAL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PHASE2_QUANTUM 0x004f +#define mmSDMA0_PHASE2_QUANTUM_BASE_IDX 0 +#define mmSDMA0_ERROR_LOG 0x0050 +#define mmSDMA0_ERROR_LOG_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG0 0x0051 +#define mmSDMA0_PUB_DUMMY_REG0_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG1 0x0052 +#define mmSDMA0_PUB_DUMMY_REG1_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG2 0x0053 +#define mmSDMA0_PUB_DUMMY_REG2_BASE_IDX 0 +#define mmSDMA0_PUB_DUMMY_REG3 0x0054 +#define mmSDMA0_PUB_DUMMY_REG3_BASE_IDX 0 +#define mmSDMA0_F32_COUNTER 0x0055 +#define mmSDMA0_F32_COUNTER_BASE_IDX 0 +#define mmSDMA0_UNBREAKABLE 0x0056 +#define mmSDMA0_UNBREAKABLE_BASE_IDX 0 +#define mmSDMA0_PERFMON_CNTL 0x0057 +#define mmSDMA0_PERFMON_CNTL_BASE_IDX 0 +#define mmSDMA0_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA0_PERFCOUNTER0_RESULT_BASE_IDX 0 +#define mmSDMA0_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA0_PERFCOUNTER1_RESULT_BASE_IDX 0 +#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA0_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0 +#define mmSDMA0_CRD_CNTL 0x005b +#define mmSDMA0_CRD_CNTL_BASE_IDX 0 +#define mmSDMA0_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA0_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define mmSDMA0_ULV_CNTL 0x005e +#define mmSDMA0_ULV_CNTL_BASE_IDX 0 +#define mmSDMA0_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA0_EA_DBIT_ADDR_DATA_BASE_IDX 0 +#define mmSDMA0_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA0_EA_DBIT_ADDR_INDEX_BASE_IDX 0 +#define mmSDMA0_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA0_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0 +#define mmSDMA0_GFX_RB_CNTL 0x0080 +#define mmSDMA0_GFX_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_RB_BASE 0x0081 +#define mmSDMA0_GFX_RB_BASE_BASE_IDX 0 +#define mmSDMA0_GFX_RB_BASE_HI 0x0082 +#define mmSDMA0_GFX_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR 0x0083 +#define mmSDMA0_GFX_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA0_GFX_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR 0x0085 +#define mmSDMA0_GFX_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA0_GFX_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA0_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA0_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA0_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_GFX_IB_CNTL 0x008a +#define mmSDMA0_GFX_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_IB_RPTR 0x008b +#define mmSDMA0_GFX_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_GFX_IB_OFFSET 0x008c +#define mmSDMA0_GFX_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_GFX_IB_BASE_LO 0x008d +#define mmSDMA0_GFX_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_GFX_IB_BASE_HI 0x008e +#define mmSDMA0_GFX_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_GFX_IB_SIZE 0x008f +#define mmSDMA0_GFX_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_GFX_SKIP_CNTL 0x0090 +#define mmSDMA0_GFX_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA0_GFX_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_GFX_DOORBELL 0x0092 +#define mmSDMA0_GFX_DOORBELL_BASE_IDX 0 +#define mmSDMA0_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA0_GFX_CONTEXT_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_STATUS 0x00a8 +#define mmSDMA0_GFX_STATUS_BASE_IDX 0 +#define mmSDMA0_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA0_GFX_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_GFX_WATERMARK 0x00aa +#define mmSDMA0_GFX_WATERMARK_BASE_IDX 0 +#define mmSDMA0_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA0_GFX_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA0_GFX_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA0_GFX_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA0_GFX_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_GFX_PREEMPT 0x00b0 +#define mmSDMA0_GFX_PREEMPT_BASE_IDX 0 +#define mmSDMA0_GFX_DUMMY_REG 0x00b1 +#define mmSDMA0_GFX_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA0_GFX_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA0_GFX_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA0_GFX_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA0_GFX_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA0_GFX_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA0_GFX_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA0_GFX_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA0_GFX_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA0_GFX_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA0_GFX_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA0_GFX_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA0_GFX_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_CNTL 0x00d8 +#define mmSDMA0_PAGE_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_BASE 0x00d9 +#define mmSDMA0_PAGE_RB_BASE_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_BASE_HI 0x00da +#define mmSDMA0_PAGE_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR 0x00db +#define mmSDMA0_PAGE_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA0_PAGE_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR 0x00dd +#define mmSDMA0_PAGE_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA0_PAGE_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA0_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA0_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_CNTL 0x00e2 +#define mmSDMA0_PAGE_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_RPTR 0x00e3 +#define mmSDMA0_PAGE_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA0_PAGE_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA0_PAGE_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA0_PAGE_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_SIZE 0x00e7 +#define mmSDMA0_PAGE_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA0_PAGE_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA0_PAGE_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_PAGE_DOORBELL 0x00ea +#define mmSDMA0_PAGE_DOORBELL_BASE_IDX 0 +#define mmSDMA0_PAGE_STATUS 0x0100 +#define mmSDMA0_PAGE_STATUS_BASE_IDX 0 +#define mmSDMA0_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA0_PAGE_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_PAGE_WATERMARK 0x0102 +#define mmSDMA0_PAGE_WATERMARK_BASE_IDX 0 +#define mmSDMA0_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA0_PAGE_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA0_PAGE_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA0_PAGE_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA0_PAGE_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_PAGE_PREEMPT 0x0108 +#define mmSDMA0_PAGE_PREEMPT_BASE_IDX 0 +#define mmSDMA0_PAGE_DUMMY_REG 0x0109 +#define mmSDMA0_PAGE_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA0_PAGE_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA0_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA0_PAGE_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA0_PAGE_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA0_PAGE_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA0_PAGE_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA0_PAGE_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA0_PAGE_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA0_PAGE_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA0_PAGE_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA0_PAGE_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA0_PAGE_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_CNTL 0x0130 +#define mmSDMA0_RLC0_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_BASE 0x0131 +#define mmSDMA0_RLC0_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA0_RLC0_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR 0x0133 +#define mmSDMA0_RLC0_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA0_RLC0_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR 0x0135 +#define mmSDMA0_RLC0_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA0_RLC0_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA0_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA0_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_CNTL 0x013a +#define mmSDMA0_RLC0_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_RPTR 0x013b +#define mmSDMA0_RLC0_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_OFFSET 0x013c +#define mmSDMA0_RLC0_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_BASE_LO 0x013d +#define mmSDMA0_RLC0_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_BASE_HI 0x013e +#define mmSDMA0_RLC0_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_SIZE 0x013f +#define mmSDMA0_RLC0_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA0_RLC0_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA0_RLC0_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC0_DOORBELL 0x0142 +#define mmSDMA0_RLC0_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC0_STATUS 0x0158 +#define mmSDMA0_RLC0_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA0_RLC0_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC0_WATERMARK 0x015a +#define mmSDMA0_RLC0_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA0_RLC0_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA0_RLC0_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA0_RLC0_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA0_RLC0_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC0_PREEMPT 0x0160 +#define mmSDMA0_RLC0_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC0_DUMMY_REG 0x0161 +#define mmSDMA0_RLC0_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA0_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA0_RLC0_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA0_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA0_RLC0_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA0_RLC0_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA0_RLC0_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA0_RLC0_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA0_RLC0_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA0_RLC0_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA0_RLC0_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA0_RLC0_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA0_RLC0_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA0_RLC0_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_CNTL 0x0188 +#define mmSDMA0_RLC1_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_BASE 0x0189 +#define mmSDMA0_RLC1_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_BASE_HI 0x018a +#define mmSDMA0_RLC1_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR 0x018b +#define mmSDMA0_RLC1_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA0_RLC1_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR 0x018d +#define mmSDMA0_RLC1_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA0_RLC1_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA0_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA0_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_CNTL 0x0192 +#define mmSDMA0_RLC1_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_RPTR 0x0193 +#define mmSDMA0_RLC1_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_OFFSET 0x0194 +#define mmSDMA0_RLC1_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA0_RLC1_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA0_RLC1_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_SIZE 0x0197 +#define mmSDMA0_RLC1_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA0_RLC1_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA0_RLC1_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC1_DOORBELL 0x019a +#define mmSDMA0_RLC1_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC1_STATUS 0x01b0 +#define mmSDMA0_RLC1_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA0_RLC1_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC1_WATERMARK 0x01b2 +#define mmSDMA0_RLC1_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA0_RLC1_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA0_RLC1_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA0_RLC1_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA0_RLC1_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC1_PREEMPT 0x01b8 +#define mmSDMA0_RLC1_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA0_RLC1_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA0_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA0_RLC1_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA0_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA0_RLC1_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA0_RLC1_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA0_RLC1_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA0_RLC1_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA0_RLC1_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA0_RLC1_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA0_RLC1_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA0_RLC1_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA0_RLC1_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA0_RLC1_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_CNTL 0x01e0 +#define mmSDMA0_RLC2_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_BASE 0x01e1 +#define mmSDMA0_RLC2_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA0_RLC2_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR 0x01e3 +#define mmSDMA0_RLC2_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA0_RLC2_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR 0x01e5 +#define mmSDMA0_RLC2_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA0_RLC2_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA0_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA0_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_CNTL 0x01ea +#define mmSDMA0_RLC2_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_RPTR 0x01eb +#define mmSDMA0_RLC2_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_OFFSET 0x01ec +#define mmSDMA0_RLC2_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA0_RLC2_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA0_RLC2_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_SIZE 0x01ef +#define mmSDMA0_RLC2_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA0_RLC2_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA0_RLC2_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC2_DOORBELL 0x01f2 +#define mmSDMA0_RLC2_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC2_STATUS 0x0208 +#define mmSDMA0_RLC2_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA0_RLC2_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC2_WATERMARK 0x020a +#define mmSDMA0_RLC2_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA0_RLC2_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA0_RLC2_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA0_RLC2_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA0_RLC2_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC2_PREEMPT 0x0210 +#define mmSDMA0_RLC2_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC2_DUMMY_REG 0x0211 +#define mmSDMA0_RLC2_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA0_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA0_RLC2_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA0_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA0_RLC2_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA0_RLC2_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA0_RLC2_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA0_RLC2_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA0_RLC2_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA0_RLC2_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA0_RLC2_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA0_RLC2_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA0_RLC2_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA0_RLC2_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_CNTL 0x0238 +#define mmSDMA0_RLC3_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_BASE 0x0239 +#define mmSDMA0_RLC3_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_BASE_HI 0x023a +#define mmSDMA0_RLC3_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR 0x023b +#define mmSDMA0_RLC3_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA0_RLC3_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR 0x023d +#define mmSDMA0_RLC3_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA0_RLC3_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA0_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA0_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_CNTL 0x0242 +#define mmSDMA0_RLC3_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_RPTR 0x0243 +#define mmSDMA0_RLC3_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_OFFSET 0x0244 +#define mmSDMA0_RLC3_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA0_RLC3_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA0_RLC3_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_SIZE 0x0247 +#define mmSDMA0_RLC3_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA0_RLC3_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA0_RLC3_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC3_DOORBELL 0x024a +#define mmSDMA0_RLC3_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC3_STATUS 0x0260 +#define mmSDMA0_RLC3_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA0_RLC3_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC3_WATERMARK 0x0262 +#define mmSDMA0_RLC3_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA0_RLC3_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA0_RLC3_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA0_RLC3_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA0_RLC3_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC3_PREEMPT 0x0268 +#define mmSDMA0_RLC3_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC3_DUMMY_REG 0x0269 +#define mmSDMA0_RLC3_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA0_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA0_RLC3_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA0_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA0_RLC3_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA0_RLC3_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA0_RLC3_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA0_RLC3_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA0_RLC3_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA0_RLC3_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA0_RLC3_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA0_RLC3_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA0_RLC3_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA0_RLC3_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_CNTL 0x0290 +#define mmSDMA0_RLC4_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_BASE 0x0291 +#define mmSDMA0_RLC4_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA0_RLC4_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR 0x0293 +#define mmSDMA0_RLC4_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA0_RLC4_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR 0x0295 +#define mmSDMA0_RLC4_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA0_RLC4_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA0_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA0_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_CNTL 0x029a +#define mmSDMA0_RLC4_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_RPTR 0x029b +#define mmSDMA0_RLC4_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_OFFSET 0x029c +#define mmSDMA0_RLC4_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_BASE_LO 0x029d +#define mmSDMA0_RLC4_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_BASE_HI 0x029e +#define mmSDMA0_RLC4_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_SIZE 0x029f +#define mmSDMA0_RLC4_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA0_RLC4_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA0_RLC4_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC4_DOORBELL 0x02a2 +#define mmSDMA0_RLC4_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC4_STATUS 0x02b8 +#define mmSDMA0_RLC4_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA0_RLC4_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC4_WATERMARK 0x02ba +#define mmSDMA0_RLC4_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA0_RLC4_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA0_RLC4_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA0_RLC4_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA0_RLC4_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC4_PREEMPT 0x02c0 +#define mmSDMA0_RLC4_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA0_RLC4_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA0_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA0_RLC4_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA0_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA0_RLC4_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA0_RLC4_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA0_RLC4_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA0_RLC4_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA0_RLC4_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA0_RLC4_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA0_RLC4_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA0_RLC4_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA0_RLC4_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA0_RLC4_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_CNTL 0x02e8 +#define mmSDMA0_RLC5_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_BASE 0x02e9 +#define mmSDMA0_RLC5_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA0_RLC5_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR 0x02eb +#define mmSDMA0_RLC5_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA0_RLC5_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR 0x02ed +#define mmSDMA0_RLC5_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA0_RLC5_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA0_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA0_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_CNTL 0x02f2 +#define mmSDMA0_RLC5_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_RPTR 0x02f3 +#define mmSDMA0_RLC5_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA0_RLC5_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA0_RLC5_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA0_RLC5_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_SIZE 0x02f7 +#define mmSDMA0_RLC5_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA0_RLC5_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA0_RLC5_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC5_DOORBELL 0x02fa +#define mmSDMA0_RLC5_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC5_STATUS 0x0310 +#define mmSDMA0_RLC5_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA0_RLC5_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC5_WATERMARK 0x0312 +#define mmSDMA0_RLC5_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA0_RLC5_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA0_RLC5_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA0_RLC5_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA0_RLC5_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC5_PREEMPT 0x0318 +#define mmSDMA0_RLC5_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC5_DUMMY_REG 0x0319 +#define mmSDMA0_RLC5_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA0_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA0_RLC5_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA0_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA0_RLC5_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA0_RLC5_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA0_RLC5_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA0_RLC5_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA0_RLC5_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA0_RLC5_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA0_RLC5_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA0_RLC5_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA0_RLC5_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA0_RLC5_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_CNTL 0x0340 +#define mmSDMA0_RLC6_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_BASE 0x0341 +#define mmSDMA0_RLC6_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA0_RLC6_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR 0x0343 +#define mmSDMA0_RLC6_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA0_RLC6_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR 0x0345 +#define mmSDMA0_RLC6_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA0_RLC6_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA0_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA0_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_CNTL 0x034a +#define mmSDMA0_RLC6_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_RPTR 0x034b +#define mmSDMA0_RLC6_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_OFFSET 0x034c +#define mmSDMA0_RLC6_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_BASE_LO 0x034d +#define mmSDMA0_RLC6_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_BASE_HI 0x034e +#define mmSDMA0_RLC6_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_SIZE 0x034f +#define mmSDMA0_RLC6_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA0_RLC6_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA0_RLC6_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC6_DOORBELL 0x0352 +#define mmSDMA0_RLC6_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC6_STATUS 0x0368 +#define mmSDMA0_RLC6_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA0_RLC6_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC6_WATERMARK 0x036a +#define mmSDMA0_RLC6_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA0_RLC6_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA0_RLC6_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA0_RLC6_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA0_RLC6_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC6_PREEMPT 0x0370 +#define mmSDMA0_RLC6_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC6_DUMMY_REG 0x0371 +#define mmSDMA0_RLC6_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA0_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA0_RLC6_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA0_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA0_RLC6_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA0_RLC6_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA0_RLC6_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA0_RLC6_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA0_RLC6_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA0_RLC6_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA0_RLC6_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA0_RLC6_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA0_RLC6_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA0_RLC6_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_CNTL 0x0398 +#define mmSDMA0_RLC7_RB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_BASE 0x0399 +#define mmSDMA0_RLC7_RB_BASE_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_BASE_HI 0x039a +#define mmSDMA0_RLC7_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR 0x039b +#define mmSDMA0_RLC7_RB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA0_RLC7_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR 0x039d +#define mmSDMA0_RLC7_RB_WPTR_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA0_RLC7_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA0_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA0_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_CNTL 0x03a2 +#define mmSDMA0_RLC7_IB_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_RPTR 0x03a3 +#define mmSDMA0_RLC7_IB_RPTR_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA0_RLC7_IB_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA0_RLC7_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA0_RLC7_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_SIZE 0x03a7 +#define mmSDMA0_RLC7_IB_SIZE_BASE_IDX 0 +#define mmSDMA0_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA0_RLC7_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA0_RLC7_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC7_DOORBELL 0x03aa +#define mmSDMA0_RLC7_DOORBELL_BASE_IDX 0 +#define mmSDMA0_RLC7_STATUS 0x03c0 +#define mmSDMA0_RLC7_STATUS_BASE_IDX 0 +#define mmSDMA0_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA0_RLC7_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA0_RLC7_WATERMARK 0x03c2 +#define mmSDMA0_RLC7_WATERMARK_BASE_IDX 0 +#define mmSDMA0_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA0_RLC7_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA0_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA0_RLC7_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA0_RLC7_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA0_RLC7_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA0_RLC7_PREEMPT 0x03c8 +#define mmSDMA0_RLC7_PREEMPT_BASE_IDX 0 +#define mmSDMA0_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA0_RLC7_DUMMY_REG_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA0_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA0_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA0_RLC7_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA0_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA0_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA0_RLC7_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA0_RLC7_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA0_RLC7_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA0_RLC7_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA0_RLC7_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA0_RLC7_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA0_RLC7_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA0_RLC7_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA0_RLC7_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA0_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA0_RLC7_MIDCMD_CNTL_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h new file mode 100644 index 000000000000..9feb67b09b63 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h @@ -0,0 +1,3002 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma0_4_2_2_SH_MASK_HEADER +#define _sdma0_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma0_sdma0dec +//SDMA0_UCODE_ADDR +#define SDMA0_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA0_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA0_UCODE_DATA +#define SDMA0_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA0_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA0_VM_CNTL +#define SDMA0_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA0_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA0_VM_CTX_LO +#define SDMA0_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA0_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_VM_CTX_HI +#define SDMA0_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA0_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_ACTIVE_FCN_ID +#define SDMA0_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA0_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA0_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA0_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA0_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA0_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA0_VM_CTX_CNTL +#define SDMA0_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA0_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA0_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA0_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA0_VIRT_RESET_REQ +#define SDMA0_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA0_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA0_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA0_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA0_VF_ENABLE +#define SDMA0_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA0_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA0_CONTEXT_REG_TYPE0 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE__SHIFT 0x1 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL__SHIFT 0xa +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR__SHIFT 0xb +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE__SHIFT 0xf +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL__SHIFT 0x12 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_MASK 0x00000002L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_DOORBELL_MASK 0x00040000L +#define SDMA0_CONTEXT_REG_TYPE0__SDMA0_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA0_CONTEXT_REG_TYPE1 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS__SHIFT 0x8 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK__SHIFT 0xa +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT__SHIFT 0x10 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA0_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_STATUS_MASK 0x00000100L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_WATERMARK_MASK 0x00000400L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA0_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_PREEMPT_MASK 0x00010000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA0_CONTEXT_REG_TYPE1__SDMA0_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA0_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA0_CONTEXT_REG_TYPE2 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA0_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA0_CONTEXT_REG_TYPE2__SDMA0_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA0_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA0_CONTEXT_REG_TYPE3 +#define SDMA0_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA0_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA0_PUB_REG_TYPE0 +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL__SHIFT 0x4 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO__SHIFT 0x5 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI__SHIFT 0x6 +#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA0_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL__SHIFT 0x13 +#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL__SHIFT 0x1a +#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL__SHIFT 0x1b +#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL__SHIFT 0x1c +#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS__SHIFT 0x1d +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_ADDR_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE0__SDMA0_UCODE_DATA_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CNTL_MASK 0x00000010L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_LO_MASK 0x00000020L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_HI_MASK 0x00000040L +#define SDMA0_PUB_REG_TYPE0__SDMA0_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA0_PUB_REG_TYPE0__SDMA0_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA0_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_MMHUB_CNTL_MASK 0x00080000L +#define SDMA0_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_POWER_CNTL_MASK 0x04000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CLK_CTRL_MASK 0x08000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CNTL_MASK 0x10000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_CHICKEN_BITS_MASK 0x20000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA0_PUB_REG_TYPE0__SDMA0_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA0_PUB_REG_TYPE1 +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM__SHIFT 0x4 +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG__SHIFT 0x5 +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG__SHIFT 0x6 +#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL__SHIFT 0xa +#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE__SHIFT 0xb +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG__SHIFT 0x12 +#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD__SHIFT 0x13 +#define SDMA0_PUB_REG_TYPE1__SDMA0_ID__SHIFT 0x14 +#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION__SHIFT 0x15 +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER__SHIFT 0x16 +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG__SHIFT 0x18 +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL__SHIFT 0x1c +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE1__SDMA0_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE1__SDMA0_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA0_PUB_REG_TYPE1__SDMA0_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA0_PUB_REG_TYPE1__SDMA0_PROGRAM_MASK 0x00000010L +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS_REG_MASK 0x00000020L +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS1_REG_MASK 0x00000040L +#define SDMA0_PUB_REG_TYPE1__SDMA0_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA0_PUB_REG_TYPE1__SDMA0_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA0_PUB_REG_TYPE1__SDMA0_F32_CNTL_MASK 0x00000400L +#define SDMA0_PUB_REG_TYPE1__SDMA0_FREEZE_MASK 0x00000800L +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA0_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA0_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_CONFIG_MASK 0x00040000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_BA_THRESHOLD_MASK 0x00080000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ID_MASK 0x00100000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_VERSION_MASK 0x00200000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_MASK 0x00400000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_STATUS2_REG_MASK 0x01000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_CNTL_MASK 0x10000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA0_PUB_REG_TYPE1__SDMA0_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA0_PUB_REG_TYPE2 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2__SHIFT 0x2 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE__SHIFT 0x8 +#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG__SHIFT 0xc +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG__SHIFT 0x10 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER__SHIFT 0x15 +#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE__SHIFT 0x16 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL__SHIFT 0x17 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL__SHIFT 0x1b +#define SDMA0_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL__SHIFT 0x1e +#define SDMA0_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV0_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV1_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_INV2_MASK 0x00000004L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UTCL1_PAGE_MASK 0x00000100L +#define SDMA0_PUB_REG_TYPE2__SDMA0_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA0_PUB_REG_TYPE2__SDMA0_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA0_PUB_REG_TYPE2__SDMA0_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA0_PUB_REG_TYPE2__SDMA0_STATUS3_REG_MASK 0x00001000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_ERROR_LOG_MASK 0x00010000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_F32_COUNTER_MASK 0x00200000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_UNBREAKABLE_MASK 0x00400000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFMON_CNTL_MASK 0x00800000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_CRD_CNTL_MASK 0x08000000L +#define SDMA0_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA0_PUB_REG_TYPE2__SDMA0_ULV_CNTL_MASK 0x40000000L +#define SDMA0_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA0_PUB_REG_TYPE3 +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA0_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA0_PUB_REG_TYPE3__SDMA0_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA0_PUB_REG_TYPE3__SDMA0_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA0_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA0_MMHUB_CNTL +#define SDMA0_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA0_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA0_CONTEXT_GROUP_BOUNDARY +#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA0_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA0_POWER_CNTL +#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE__SHIFT 0x0 +#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ__SHIFT 0x1 +#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ__SHIFT 0x2 +#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME__SHIFT 0x3 +#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA0_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME__SHIFT 0x1a +#define SDMA0_POWER_CNTL__PG_CNTL_ENABLE_MASK 0x00000001L +#define SDMA0_POWER_CNTL__EXT_PG_POWER_ON_REQ_MASK 0x00000002L +#define SDMA0_POWER_CNTL__EXT_PG_POWER_OFF_REQ_MASK 0x00000004L +#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L +#define SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA0_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA0_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA0_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA0_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L +//SDMA0_CLK_CTRL +#define SDMA0_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA0_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA0_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA0_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA0_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA0_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA0_CNTL +#define SDMA0_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA0_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA0_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA0_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA0_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA0_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA0_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA0_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA0_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA0_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA0_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA0_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA0_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA0_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA0_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA0_CHICKEN_BITS +#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA0_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA0_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA0_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA0_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA0_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA0_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA0_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA0_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA0_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA0_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA0_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA0_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA0_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA0_GB_ADDR_CONFIG +#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA0_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA0_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA0_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA0_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA0_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA0_GB_ADDR_CONFIG_READ +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA0_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA0_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA0_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA0_RB_RPTR_FETCH_HI +#define SDMA0_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA0_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA0_RB_RPTR_FETCH +#define SDMA0_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA0_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA0_IB_OFFSET_FETCH +#define SDMA0_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA0_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA0_PROGRAM +#define SDMA0_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA0_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA0_STATUS_REG +#define SDMA0_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA0_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA0_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA0_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA0_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA0_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA0_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA0_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA0_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA0_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA0_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA0_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA0_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA0_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA0_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA0_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA0_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA0_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA0_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA0_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA0_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA0_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA0_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA0_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA0_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA0_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA0_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA0_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA0_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA0_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA0_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA0_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA0_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA0_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA0_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA0_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA0_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA0_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA0_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA0_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA0_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA0_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA0_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA0_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA0_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA0_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA0_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA0_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA0_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA0_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA0_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA0_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA0_STATUS1_REG +#define SDMA0_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA0_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA0_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA0_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA0_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA0_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA0_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA0_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA0_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA0_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA0_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA0_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA0_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA0_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA0_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA0_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA0_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA0_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA0_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA0_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA0_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA0_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA0_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA0_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA0_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA0_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA0_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA0_RD_BURST_CNTL +#define SDMA0_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA0_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA0_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA0_HBM_PAGE_CONFIG +#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA0_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000003L +//SDMA0_UCODE_CHECKSUM +#define SDMA0_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA0_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA0_F32_CNTL +#define SDMA0_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA0_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA0_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA0_F32_CNTL__STEP_MASK 0x00000002L +//SDMA0_FREEZE +#define SDMA0_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA0_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA0_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA0_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA0_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA0_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA0_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA0_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA0_PHASE0_QUANTUM +#define SDMA0_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA0_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA0_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA0_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA0_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA0_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA0_PHASE1_QUANTUM +#define SDMA0_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA0_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA0_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA0_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA0_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA0_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA_POWER_GATING +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION__SHIFT 0x0 +#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION__SHIFT 0x1 +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ__SHIFT 0x2 +#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ__SHIFT 0x3 +#define SDMA_POWER_GATING__PG_CNTL_STATUS__SHIFT 0x4 +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_CONDITION_MASK 0x00000001L +#define SDMA_POWER_GATING__SDMA0_POWER_ON_CONDITION_MASK 0x00000002L +#define SDMA_POWER_GATING__SDMA0_POWER_OFF_REQ_MASK 0x00000004L +#define SDMA_POWER_GATING__SDMA0_POWER_ON_REQ_MASK 0x00000008L +#define SDMA_POWER_GATING__PG_CNTL_STATUS_MASK 0x00000030L +//SDMA_PGFSM_CONFIG +#define SDMA_PGFSM_CONFIG__FSM_ADDR__SHIFT 0x0 +#define SDMA_PGFSM_CONFIG__POWER_DOWN__SHIFT 0x8 +#define SDMA_PGFSM_CONFIG__POWER_UP__SHIFT 0x9 +#define SDMA_PGFSM_CONFIG__P1_SELECT__SHIFT 0xa +#define SDMA_PGFSM_CONFIG__P2_SELECT__SHIFT 0xb +#define SDMA_PGFSM_CONFIG__WRITE__SHIFT 0xc +#define SDMA_PGFSM_CONFIG__READ__SHIFT 0xd +#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE__SHIFT 0x1b +#define SDMA_PGFSM_CONFIG__REG_ADDR__SHIFT 0x1c +#define SDMA_PGFSM_CONFIG__FSM_ADDR_MASK 0x000000FFL +#define SDMA_PGFSM_CONFIG__POWER_DOWN_MASK 0x00000100L +#define SDMA_PGFSM_CONFIG__POWER_UP_MASK 0x00000200L +#define SDMA_PGFSM_CONFIG__P1_SELECT_MASK 0x00000400L +#define SDMA_PGFSM_CONFIG__P2_SELECT_MASK 0x00000800L +#define SDMA_PGFSM_CONFIG__WRITE_MASK 0x00001000L +#define SDMA_PGFSM_CONFIG__READ_MASK 0x00002000L +#define SDMA_PGFSM_CONFIG__SRBM_OVERRIDE_MASK 0x08000000L +#define SDMA_PGFSM_CONFIG__REG_ADDR_MASK 0xF0000000L +//SDMA_PGFSM_WRITE +#define SDMA_PGFSM_WRITE__VALUE__SHIFT 0x0 +#define SDMA_PGFSM_WRITE__VALUE_MASK 0xFFFFFFFFL +//SDMA_PGFSM_READ +#define SDMA_PGFSM_READ__VALUE__SHIFT 0x0 +#define SDMA_PGFSM_READ__VALUE_MASK 0x00FFFFFFL +//SDMA0_EDC_CONFIG +#define SDMA0_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA0_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA0_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA0_BA_THRESHOLD +#define SDMA0_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA0_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA0_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA0_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA0_ID +#define SDMA0_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA0_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA0_VERSION +#define SDMA0_VERSION__MINVER__SHIFT 0x0 +#define SDMA0_VERSION__MAJVER__SHIFT 0x8 +#define SDMA0_VERSION__REV__SHIFT 0x10 +#define SDMA0_VERSION__MINVER_MASK 0x0000007FL +#define SDMA0_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA0_VERSION__REV_MASK 0x003F0000L +//SDMA0_EDC_COUNTER +#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA0_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA0_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA0_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA0_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA0_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA0_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA0_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA0_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA0_EDC_COUNTER_CLEAR +#define SDMA0_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA0_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA0_STATUS2_REG +#define SDMA0_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA0_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA0_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA0_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA0_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA0_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA0_ATOMIC_CNTL +#define SDMA0_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA0_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA0_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA0_ATOMIC_PREOP_LO +#define SDMA0_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA0_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA0_ATOMIC_PREOP_HI +#define SDMA0_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA0_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_CNTL +#define SDMA0_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA0_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA0_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA0_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA0_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA0_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA0_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA0_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA0_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA0_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA0_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA0_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA0_UTCL1_WATERMK +#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA0_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA0_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA0_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA0_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA0_UTCL1_RD_STATUS +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA0_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA0_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA0_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA0_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA0_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA0_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA0_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA0_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA0_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA0_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA0_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA0_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA0_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA0_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA0_UTCL1_WR_STATUS +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA0_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA0_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA0_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA0_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA0_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA0_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA0_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA0_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA0_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA0_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA0_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA0_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA0_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA0_UTCL1_INV0 +#define SDMA0_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA0_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA0_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA0_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA0_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA0_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA0_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA0_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA0_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA0_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA0_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA0_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA0_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA0_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA0_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA0_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA0_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA0_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA0_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA0_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA0_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA0_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA0_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA0_UTCL1_INV1 +#define SDMA0_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA0_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_INV2 +#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA0_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_RD_XNACK0 +#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA0_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_RD_XNACK1 +#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA0_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA0_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA0_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA0_UTCL1_WR_XNACK0 +#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA0_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA0_UTCL1_WR_XNACK1 +#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA0_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA0_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA0_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA0_UTCL1_TIMEOUT +#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA0_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA0_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA0_UTCL1_PAGE +#define SDMA0_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA0_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA0_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA0_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA0_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA0_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA0_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA0_POWER_CNTL_IDLE +#define SDMA0_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA0_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA0_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA0_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA0_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA0_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA0_RELAX_ORDERING_LUT +#define SDMA0_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA0_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA0_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA0_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA0_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA0_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA0_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA0_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA0_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA0_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA0_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA0_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA0_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA0_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA0_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA0_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA0_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA0_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA0_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA0_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA0_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA0_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA0_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA0_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA0_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA0_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA0_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA0_CHICKEN_BITS_2 +#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA0_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA0_STATUS3_REG +#define SDMA0_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA0_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA0_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA0_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA0_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA0_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA0_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA0_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA0_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA0_PHYSICAL_ADDR_LO +#define SDMA0_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA0_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA0_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA0_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA0_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA0_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA0_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA0_PHYSICAL_ADDR_HI +#define SDMA0_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA0_PHASE2_QUANTUM +#define SDMA0_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA0_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA0_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA0_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA0_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA0_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA0_ERROR_LOG +#define SDMA0_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA0_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA0_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA0_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA0_PUB_DUMMY_REG0 +#define SDMA0_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA0_PUB_DUMMY_REG1 +#define SDMA0_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA0_PUB_DUMMY_REG2 +#define SDMA0_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA0_PUB_DUMMY_REG3 +#define SDMA0_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA0_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA0_F32_COUNTER +#define SDMA0_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA0_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA0_UNBREAKABLE +#define SDMA0_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA0_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA0_PERFMON_CNTL +#define SDMA0_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA0_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA0_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA0_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA0_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA0_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA0_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA0_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA0_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA0_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA0_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA0_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA0_PERFCOUNTER0_RESULT +#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA0_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA0_PERFCOUNTER1_RESULT +#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA0_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA0_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA0_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA0_CRD_CNTL +#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA0_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA0_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA0_GPU_IOV_VIOLATION_LOG +#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA0_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA0_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA0_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA0_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA0_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA0_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA0_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA0_ULV_CNTL +#define SDMA0_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA0_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA0_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA0_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA0_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA0_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA0_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA0_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA0_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA0_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA0_EA_DBIT_ADDR_DATA +#define SDMA0_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA0_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA0_EA_DBIT_ADDR_INDEX +#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA0_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA0_GPU_IOV_VIOLATION_LOG2 +#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA0_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA0_GFX_RB_CNTL +#define SDMA0_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_GFX_RB_BASE +#define SDMA0_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_BASE_HI +#define SDMA0_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_GFX_RB_RPTR +#define SDMA0_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_RPTR_HI +#define SDMA0_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR +#define SDMA0_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_HI +#define SDMA0_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_POLL_CNTL +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_GFX_RB_RPTR_ADDR_HI +#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_RPTR_ADDR_LO +#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_GFX_IB_CNTL +#define SDMA0_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_GFX_IB_RPTR +#define SDMA0_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_GFX_IB_OFFSET +#define SDMA0_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_GFX_IB_BASE_LO +#define SDMA0_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_GFX_IB_BASE_HI +#define SDMA0_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_IB_SIZE +#define SDMA0_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_GFX_SKIP_CNTL +#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_GFX_CONTEXT_STATUS +#define SDMA0_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_GFX_DOORBELL +#define SDMA0_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_GFX_CONTEXT_CNTL +#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA0_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA0_GFX_STATUS +#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_GFX_DOORBELL_LOG +#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_GFX_WATERMARK +#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_GFX_DOORBELL_OFFSET +#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_GFX_CSA_ADDR_LO +#define SDMA0_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_GFX_CSA_ADDR_HI +#define SDMA0_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_IB_SUB_REMAIN +#define SDMA0_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_GFX_PREEMPT +#define SDMA0_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_GFX_DUMMY_REG +#define SDMA0_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_GFX_RB_AQL_CNTL +#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_GFX_MINOR_PTR_UPDATE +#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_GFX_MIDCMD_DATA0 +#define SDMA0_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA1 +#define SDMA0_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA2 +#define SDMA0_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA3 +#define SDMA0_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA4 +#define SDMA0_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA5 +#define SDMA0_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA6 +#define SDMA0_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA7 +#define SDMA0_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_DATA8 +#define SDMA0_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_GFX_MIDCMD_CNTL +#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_PAGE_RB_CNTL +#define SDMA0_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_PAGE_RB_BASE +#define SDMA0_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_BASE_HI +#define SDMA0_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_PAGE_RB_RPTR +#define SDMA0_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_RPTR_HI +#define SDMA0_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR +#define SDMA0_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_HI +#define SDMA0_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_POLL_CNTL +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_PAGE_RB_RPTR_ADDR_HI +#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_RPTR_ADDR_LO +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_PAGE_IB_CNTL +#define SDMA0_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_PAGE_IB_RPTR +#define SDMA0_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_PAGE_IB_OFFSET +#define SDMA0_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_PAGE_IB_BASE_LO +#define SDMA0_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_PAGE_IB_BASE_HI +#define SDMA0_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_IB_SIZE +#define SDMA0_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_PAGE_SKIP_CNTL +#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_PAGE_CONTEXT_STATUS +#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_PAGE_DOORBELL +#define SDMA0_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_PAGE_STATUS +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_PAGE_DOORBELL_LOG +#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_PAGE_WATERMARK +#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_PAGE_DOORBELL_OFFSET +#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_PAGE_CSA_ADDR_LO +#define SDMA0_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_PAGE_CSA_ADDR_HI +#define SDMA0_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_IB_SUB_REMAIN +#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_PAGE_PREEMPT +#define SDMA0_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_PAGE_DUMMY_REG +#define SDMA0_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_PAGE_RB_AQL_CNTL +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_PAGE_MINOR_PTR_UPDATE +#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_PAGE_MIDCMD_DATA0 +#define SDMA0_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA1 +#define SDMA0_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA2 +#define SDMA0_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA3 +#define SDMA0_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA4 +#define SDMA0_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA5 +#define SDMA0_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA6 +#define SDMA0_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA7 +#define SDMA0_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_DATA8 +#define SDMA0_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_PAGE_MIDCMD_CNTL +#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC0_RB_CNTL +#define SDMA0_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC0_RB_BASE +#define SDMA0_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_BASE_HI +#define SDMA0_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC0_RB_RPTR +#define SDMA0_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_RPTR_HI +#define SDMA0_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR +#define SDMA0_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_HI +#define SDMA0_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_POLL_CNTL +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC0_RB_RPTR_ADDR_HI +#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_RPTR_ADDR_LO +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC0_IB_CNTL +#define SDMA0_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC0_IB_RPTR +#define SDMA0_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC0_IB_OFFSET +#define SDMA0_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC0_IB_BASE_LO +#define SDMA0_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC0_IB_BASE_HI +#define SDMA0_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_IB_SIZE +#define SDMA0_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC0_SKIP_CNTL +#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC0_CONTEXT_STATUS +#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC0_DOORBELL +#define SDMA0_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC0_STATUS +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC0_DOORBELL_LOG +#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC0_WATERMARK +#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC0_DOORBELL_OFFSET +#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC0_CSA_ADDR_LO +#define SDMA0_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC0_CSA_ADDR_HI +#define SDMA0_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_IB_SUB_REMAIN +#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC0_PREEMPT +#define SDMA0_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC0_DUMMY_REG +#define SDMA0_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC0_RB_AQL_CNTL +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC0_MINOR_PTR_UPDATE +#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC0_MIDCMD_DATA0 +#define SDMA0_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA1 +#define SDMA0_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA2 +#define SDMA0_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA3 +#define SDMA0_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA4 +#define SDMA0_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA5 +#define SDMA0_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA6 +#define SDMA0_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA7 +#define SDMA0_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_DATA8 +#define SDMA0_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC0_MIDCMD_CNTL +#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC1_RB_CNTL +#define SDMA0_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC1_RB_BASE +#define SDMA0_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_BASE_HI +#define SDMA0_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC1_RB_RPTR +#define SDMA0_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_RPTR_HI +#define SDMA0_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR +#define SDMA0_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_HI +#define SDMA0_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_POLL_CNTL +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC1_RB_RPTR_ADDR_HI +#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_RPTR_ADDR_LO +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC1_IB_CNTL +#define SDMA0_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC1_IB_RPTR +#define SDMA0_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC1_IB_OFFSET +#define SDMA0_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC1_IB_BASE_LO +#define SDMA0_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC1_IB_BASE_HI +#define SDMA0_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_IB_SIZE +#define SDMA0_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC1_SKIP_CNTL +#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC1_CONTEXT_STATUS +#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC1_DOORBELL +#define SDMA0_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC1_STATUS +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC1_DOORBELL_LOG +#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC1_WATERMARK +#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC1_DOORBELL_OFFSET +#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC1_CSA_ADDR_LO +#define SDMA0_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC1_CSA_ADDR_HI +#define SDMA0_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_IB_SUB_REMAIN +#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC1_PREEMPT +#define SDMA0_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC1_DUMMY_REG +#define SDMA0_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC1_RB_AQL_CNTL +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC1_MINOR_PTR_UPDATE +#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC1_MIDCMD_DATA0 +#define SDMA0_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA1 +#define SDMA0_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA2 +#define SDMA0_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA3 +#define SDMA0_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA4 +#define SDMA0_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA5 +#define SDMA0_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA6 +#define SDMA0_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA7 +#define SDMA0_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_DATA8 +#define SDMA0_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC1_MIDCMD_CNTL +#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC2_RB_CNTL +#define SDMA0_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC2_RB_BASE +#define SDMA0_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_BASE_HI +#define SDMA0_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC2_RB_RPTR +#define SDMA0_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_RPTR_HI +#define SDMA0_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR +#define SDMA0_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_HI +#define SDMA0_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_POLL_CNTL +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC2_RB_RPTR_ADDR_HI +#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_RPTR_ADDR_LO +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC2_IB_CNTL +#define SDMA0_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC2_IB_RPTR +#define SDMA0_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC2_IB_OFFSET +#define SDMA0_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC2_IB_BASE_LO +#define SDMA0_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC2_IB_BASE_HI +#define SDMA0_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_IB_SIZE +#define SDMA0_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC2_SKIP_CNTL +#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC2_CONTEXT_STATUS +#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC2_DOORBELL +#define SDMA0_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC2_STATUS +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC2_DOORBELL_LOG +#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC2_WATERMARK +#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC2_DOORBELL_OFFSET +#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC2_CSA_ADDR_LO +#define SDMA0_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC2_CSA_ADDR_HI +#define SDMA0_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_IB_SUB_REMAIN +#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC2_PREEMPT +#define SDMA0_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC2_DUMMY_REG +#define SDMA0_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC2_RB_AQL_CNTL +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC2_MINOR_PTR_UPDATE +#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC2_MIDCMD_DATA0 +#define SDMA0_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA1 +#define SDMA0_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA2 +#define SDMA0_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA3 +#define SDMA0_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA4 +#define SDMA0_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA5 +#define SDMA0_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA6 +#define SDMA0_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA7 +#define SDMA0_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_DATA8 +#define SDMA0_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC2_MIDCMD_CNTL +#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC3_RB_CNTL +#define SDMA0_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC3_RB_BASE +#define SDMA0_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_BASE_HI +#define SDMA0_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC3_RB_RPTR +#define SDMA0_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_RPTR_HI +#define SDMA0_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR +#define SDMA0_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_HI +#define SDMA0_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_POLL_CNTL +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC3_RB_RPTR_ADDR_HI +#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_RPTR_ADDR_LO +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC3_IB_CNTL +#define SDMA0_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC3_IB_RPTR +#define SDMA0_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC3_IB_OFFSET +#define SDMA0_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC3_IB_BASE_LO +#define SDMA0_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC3_IB_BASE_HI +#define SDMA0_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_IB_SIZE +#define SDMA0_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC3_SKIP_CNTL +#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC3_CONTEXT_STATUS +#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC3_DOORBELL +#define SDMA0_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC3_STATUS +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC3_DOORBELL_LOG +#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC3_WATERMARK +#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC3_DOORBELL_OFFSET +#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC3_CSA_ADDR_LO +#define SDMA0_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC3_CSA_ADDR_HI +#define SDMA0_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_IB_SUB_REMAIN +#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC3_PREEMPT +#define SDMA0_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC3_DUMMY_REG +#define SDMA0_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC3_RB_AQL_CNTL +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC3_MINOR_PTR_UPDATE +#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC3_MIDCMD_DATA0 +#define SDMA0_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA1 +#define SDMA0_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA2 +#define SDMA0_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA3 +#define SDMA0_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA4 +#define SDMA0_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA5 +#define SDMA0_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA6 +#define SDMA0_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA7 +#define SDMA0_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_DATA8 +#define SDMA0_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC3_MIDCMD_CNTL +#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC4_RB_CNTL +#define SDMA0_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC4_RB_BASE +#define SDMA0_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_BASE_HI +#define SDMA0_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC4_RB_RPTR +#define SDMA0_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_RPTR_HI +#define SDMA0_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR +#define SDMA0_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_HI +#define SDMA0_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_POLL_CNTL +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC4_RB_RPTR_ADDR_HI +#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_RPTR_ADDR_LO +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC4_IB_CNTL +#define SDMA0_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC4_IB_RPTR +#define SDMA0_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC4_IB_OFFSET +#define SDMA0_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC4_IB_BASE_LO +#define SDMA0_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC4_IB_BASE_HI +#define SDMA0_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_IB_SIZE +#define SDMA0_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC4_SKIP_CNTL +#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC4_CONTEXT_STATUS +#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC4_DOORBELL +#define SDMA0_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC4_STATUS +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC4_DOORBELL_LOG +#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC4_WATERMARK +#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC4_DOORBELL_OFFSET +#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC4_CSA_ADDR_LO +#define SDMA0_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC4_CSA_ADDR_HI +#define SDMA0_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_IB_SUB_REMAIN +#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC4_PREEMPT +#define SDMA0_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC4_DUMMY_REG +#define SDMA0_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC4_RB_AQL_CNTL +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC4_MINOR_PTR_UPDATE +#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC4_MIDCMD_DATA0 +#define SDMA0_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA1 +#define SDMA0_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA2 +#define SDMA0_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA3 +#define SDMA0_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA4 +#define SDMA0_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA5 +#define SDMA0_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA6 +#define SDMA0_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA7 +#define SDMA0_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_DATA8 +#define SDMA0_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC4_MIDCMD_CNTL +#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC5_RB_CNTL +#define SDMA0_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC5_RB_BASE +#define SDMA0_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_BASE_HI +#define SDMA0_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC5_RB_RPTR +#define SDMA0_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_RPTR_HI +#define SDMA0_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR +#define SDMA0_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_HI +#define SDMA0_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_POLL_CNTL +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC5_RB_RPTR_ADDR_HI +#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_RPTR_ADDR_LO +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC5_IB_CNTL +#define SDMA0_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC5_IB_RPTR +#define SDMA0_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC5_IB_OFFSET +#define SDMA0_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC5_IB_BASE_LO +#define SDMA0_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC5_IB_BASE_HI +#define SDMA0_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_IB_SIZE +#define SDMA0_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC5_SKIP_CNTL +#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC5_CONTEXT_STATUS +#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC5_DOORBELL +#define SDMA0_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC5_STATUS +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC5_DOORBELL_LOG +#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC5_WATERMARK +#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC5_DOORBELL_OFFSET +#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC5_CSA_ADDR_LO +#define SDMA0_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC5_CSA_ADDR_HI +#define SDMA0_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_IB_SUB_REMAIN +#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC5_PREEMPT +#define SDMA0_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC5_DUMMY_REG +#define SDMA0_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC5_RB_AQL_CNTL +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC5_MINOR_PTR_UPDATE +#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC5_MIDCMD_DATA0 +#define SDMA0_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA1 +#define SDMA0_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA2 +#define SDMA0_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA3 +#define SDMA0_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA4 +#define SDMA0_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA5 +#define SDMA0_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA6 +#define SDMA0_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA7 +#define SDMA0_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_DATA8 +#define SDMA0_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC5_MIDCMD_CNTL +#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC6_RB_CNTL +#define SDMA0_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC6_RB_BASE +#define SDMA0_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_BASE_HI +#define SDMA0_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC6_RB_RPTR +#define SDMA0_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_RPTR_HI +#define SDMA0_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR +#define SDMA0_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_HI +#define SDMA0_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_POLL_CNTL +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC6_RB_RPTR_ADDR_HI +#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_RPTR_ADDR_LO +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC6_IB_CNTL +#define SDMA0_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC6_IB_RPTR +#define SDMA0_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC6_IB_OFFSET +#define SDMA0_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC6_IB_BASE_LO +#define SDMA0_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC6_IB_BASE_HI +#define SDMA0_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_IB_SIZE +#define SDMA0_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC6_SKIP_CNTL +#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC6_CONTEXT_STATUS +#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC6_DOORBELL +#define SDMA0_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC6_STATUS +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC6_DOORBELL_LOG +#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC6_WATERMARK +#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC6_DOORBELL_OFFSET +#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC6_CSA_ADDR_LO +#define SDMA0_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC6_CSA_ADDR_HI +#define SDMA0_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_IB_SUB_REMAIN +#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC6_PREEMPT +#define SDMA0_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC6_DUMMY_REG +#define SDMA0_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC6_RB_AQL_CNTL +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC6_MINOR_PTR_UPDATE +#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC6_MIDCMD_DATA0 +#define SDMA0_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA1 +#define SDMA0_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA2 +#define SDMA0_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA3 +#define SDMA0_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA4 +#define SDMA0_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA5 +#define SDMA0_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA6 +#define SDMA0_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA7 +#define SDMA0_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_DATA8 +#define SDMA0_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC6_MIDCMD_CNTL +#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA0_RLC7_RB_CNTL +#define SDMA0_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA0_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA0_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA0_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA0_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA0_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA0_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA0_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA0_RLC7_RB_BASE +#define SDMA0_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_BASE_HI +#define SDMA0_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA0_RLC7_RB_RPTR +#define SDMA0_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_RPTR_HI +#define SDMA0_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR +#define SDMA0_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_HI +#define SDMA0_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_POLL_CNTL +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA0_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA0_RLC7_RB_RPTR_ADDR_HI +#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_RPTR_ADDR_LO +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC7_IB_CNTL +#define SDMA0_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA0_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA0_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA0_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA0_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA0_RLC7_IB_RPTR +#define SDMA0_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA0_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC7_IB_OFFSET +#define SDMA0_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA0_RLC7_IB_BASE_LO +#define SDMA0_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA0_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA0_RLC7_IB_BASE_HI +#define SDMA0_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_IB_SIZE +#define SDMA0_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA0_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC7_SKIP_CNTL +#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA0_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA0_RLC7_CONTEXT_STATUS +#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA0_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA0_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA0_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA0_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA0_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA0_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA0_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA0_RLC7_DOORBELL +#define SDMA0_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA0_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA0_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA0_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA0_RLC7_STATUS +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA0_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA0_RLC7_DOORBELL_LOG +#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA0_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA0_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA0_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA0_RLC7_WATERMARK +#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA0_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA0_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA0_RLC7_DOORBELL_OFFSET +#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA0_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA0_RLC7_CSA_ADDR_LO +#define SDMA0_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC7_CSA_ADDR_HI +#define SDMA0_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_IB_SUB_REMAIN +#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA0_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA0_RLC7_PREEMPT +#define SDMA0_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA0_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA0_RLC7_DUMMY_REG +#define SDMA0_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA0_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA0_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA0_RLC7_RB_AQL_CNTL +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA0_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA0_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA0_RLC7_MINOR_PTR_UPDATE +#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA0_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA0_RLC7_MIDCMD_DATA0 +#define SDMA0_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA1 +#define SDMA0_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA2 +#define SDMA0_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA3 +#define SDMA0_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA4 +#define SDMA0_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA5 +#define SDMA0_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA6 +#define SDMA0_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA7 +#define SDMA0_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_DATA8 +#define SDMA0_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA0_RLC7_MIDCMD_CNTL +#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA0_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA0_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA0_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA0_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h new file mode 100644 index 000000000000..681233a55a1d --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h @@ -0,0 +1,1043 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma1_4_2_2_OFFSET_HEADER +#define _sdma1_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma1_sdma1dec +// base address: 0x6180 +#define mmSDMA1_UCODE_ADDR 0x0000 +#define mmSDMA1_UCODE_ADDR_BASE_IDX 0 +#define mmSDMA1_UCODE_DATA 0x0001 +#define mmSDMA1_UCODE_DATA_BASE_IDX 0 +#define mmSDMA1_VM_CNTL 0x0004 +#define mmSDMA1_VM_CNTL_BASE_IDX 0 +#define mmSDMA1_VM_CTX_LO 0x0005 +#define mmSDMA1_VM_CTX_LO_BASE_IDX 0 +#define mmSDMA1_VM_CTX_HI 0x0006 +#define mmSDMA1_VM_CTX_HI_BASE_IDX 0 +#define mmSDMA1_ACTIVE_FCN_ID 0x0007 +#define mmSDMA1_ACTIVE_FCN_ID_BASE_IDX 0 +#define mmSDMA1_VM_CTX_CNTL 0x0008 +#define mmSDMA1_VM_CTX_CNTL_BASE_IDX 0 +#define mmSDMA1_VIRT_RESET_REQ 0x0009 +#define mmSDMA1_VIRT_RESET_REQ_BASE_IDX 0 +#define mmSDMA1_VF_ENABLE 0x000a +#define mmSDMA1_VF_ENABLE_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA1_CONTEXT_REG_TYPE0_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA1_CONTEXT_REG_TYPE1_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA1_CONTEXT_REG_TYPE2_BASE_IDX 0 +#define mmSDMA1_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA1_CONTEXT_REG_TYPE3_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE0 0x000f +#define mmSDMA1_PUB_REG_TYPE0_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE1 0x0010 +#define mmSDMA1_PUB_REG_TYPE1_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE2 0x0011 +#define mmSDMA1_PUB_REG_TYPE2_BASE_IDX 0 +#define mmSDMA1_PUB_REG_TYPE3 0x0012 +#define mmSDMA1_PUB_REG_TYPE3_BASE_IDX 0 +#define mmSDMA1_MMHUB_CNTL 0x0013 +#define mmSDMA1_MMHUB_CNTL_BASE_IDX 0 +#define mmSDMA1_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA1_CONTEXT_GROUP_BOUNDARY_BASE_IDX 0 +#define mmSDMA1_POWER_CNTL 0x001a +#define mmSDMA1_POWER_CNTL_BASE_IDX 0 +#define mmSDMA1_CLK_CTRL 0x001b +#define mmSDMA1_CLK_CTRL_BASE_IDX 0 +#define mmSDMA1_CNTL 0x001c +#define mmSDMA1_CNTL_BASE_IDX 0 +#define mmSDMA1_CHICKEN_BITS 0x001d +#define mmSDMA1_CHICKEN_BITS_BASE_IDX 0 +#define mmSDMA1_GB_ADDR_CONFIG 0x001e +#define mmSDMA1_GB_ADDR_CONFIG_BASE_IDX 0 +#define mmSDMA1_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA1_GB_ADDR_CONFIG_READ_BASE_IDX 0 +#define mmSDMA1_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA1_RB_RPTR_FETCH_HI_BASE_IDX 0 +#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA1_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 0 +#define mmSDMA1_RB_RPTR_FETCH 0x0022 +#define mmSDMA1_RB_RPTR_FETCH_BASE_IDX 0 +#define mmSDMA1_IB_OFFSET_FETCH 0x0023 +#define mmSDMA1_IB_OFFSET_FETCH_BASE_IDX 0 +#define mmSDMA1_PROGRAM 0x0024 +#define mmSDMA1_PROGRAM_BASE_IDX 0 +#define mmSDMA1_STATUS_REG 0x0025 +#define mmSDMA1_STATUS_REG_BASE_IDX 0 +#define mmSDMA1_STATUS1_REG 0x0026 +#define mmSDMA1_STATUS1_REG_BASE_IDX 0 +#define mmSDMA1_RD_BURST_CNTL 0x0027 +#define mmSDMA1_RD_BURST_CNTL_BASE_IDX 0 +#define mmSDMA1_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA1_HBM_PAGE_CONFIG_BASE_IDX 0 +#define mmSDMA1_UCODE_CHECKSUM 0x0029 +#define mmSDMA1_UCODE_CHECKSUM_BASE_IDX 0 +#define mmSDMA1_F32_CNTL 0x002a +#define mmSDMA1_F32_CNTL_BASE_IDX 0 +#define mmSDMA1_FREEZE 0x002b +#define mmSDMA1_FREEZE_BASE_IDX 0 +#define mmSDMA1_PHASE0_QUANTUM 0x002c +#define mmSDMA1_PHASE0_QUANTUM_BASE_IDX 0 +#define mmSDMA1_PHASE1_QUANTUM 0x002d +#define mmSDMA1_PHASE1_QUANTUM_BASE_IDX 0 +#define mmSDMA1_EDC_CONFIG 0x0032 +#define mmSDMA1_EDC_CONFIG_BASE_IDX 0 +#define mmSDMA1_BA_THRESHOLD 0x0033 +#define mmSDMA1_BA_THRESHOLD_BASE_IDX 0 +#define mmSDMA1_ID 0x0034 +#define mmSDMA1_ID_BASE_IDX 0 +#define mmSDMA1_VERSION 0x0035 +#define mmSDMA1_VERSION_BASE_IDX 0 +#define mmSDMA1_EDC_COUNTER 0x0036 +#define mmSDMA1_EDC_COUNTER_BASE_IDX 0 +#define mmSDMA1_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA1_EDC_COUNTER_CLEAR_BASE_IDX 0 +#define mmSDMA1_STATUS2_REG 0x0038 +#define mmSDMA1_STATUS2_REG_BASE_IDX 0 +#define mmSDMA1_ATOMIC_CNTL 0x0039 +#define mmSDMA1_ATOMIC_CNTL_BASE_IDX 0 +#define mmSDMA1_ATOMIC_PREOP_LO 0x003a +#define mmSDMA1_ATOMIC_PREOP_LO_BASE_IDX 0 +#define mmSDMA1_ATOMIC_PREOP_HI 0x003b +#define mmSDMA1_ATOMIC_PREOP_HI_BASE_IDX 0 +#define mmSDMA1_UTCL1_CNTL 0x003c +#define mmSDMA1_UTCL1_CNTL_BASE_IDX 0 +#define mmSDMA1_UTCL1_WATERMK 0x003d +#define mmSDMA1_UTCL1_WATERMK_BASE_IDX 0 +#define mmSDMA1_UTCL1_RD_STATUS 0x003e +#define mmSDMA1_UTCL1_RD_STATUS_BASE_IDX 0 +#define mmSDMA1_UTCL1_WR_STATUS 0x003f +#define mmSDMA1_UTCL1_WR_STATUS_BASE_IDX 0 +#define mmSDMA1_UTCL1_INV0 0x0040 +#define mmSDMA1_UTCL1_INV0_BASE_IDX 0 +#define mmSDMA1_UTCL1_INV1 0x0041 +#define mmSDMA1_UTCL1_INV1_BASE_IDX 0 +#define mmSDMA1_UTCL1_INV2 0x0042 +#define mmSDMA1_UTCL1_INV2_BASE_IDX 0 +#define mmSDMA1_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA1_UTCL1_RD_XNACK0_BASE_IDX 0 +#define mmSDMA1_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA1_UTCL1_RD_XNACK1_BASE_IDX 0 +#define mmSDMA1_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA1_UTCL1_WR_XNACK0_BASE_IDX 0 +#define mmSDMA1_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA1_UTCL1_WR_XNACK1_BASE_IDX 0 +#define mmSDMA1_UTCL1_TIMEOUT 0x0047 +#define mmSDMA1_UTCL1_TIMEOUT_BASE_IDX 0 +#define mmSDMA1_UTCL1_PAGE 0x0048 +#define mmSDMA1_UTCL1_PAGE_BASE_IDX 0 +#define mmSDMA1_POWER_CNTL_IDLE 0x0049 +#define mmSDMA1_POWER_CNTL_IDLE_BASE_IDX 0 +#define mmSDMA1_RELAX_ORDERING_LUT 0x004a +#define mmSDMA1_RELAX_ORDERING_LUT_BASE_IDX 0 +#define mmSDMA1_CHICKEN_BITS_2 0x004b +#define mmSDMA1_CHICKEN_BITS_2_BASE_IDX 0 +#define mmSDMA1_STATUS3_REG 0x004c +#define mmSDMA1_STATUS3_REG_BASE_IDX 0 +#define mmSDMA1_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA1_PHYSICAL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA1_PHYSICAL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PHASE2_QUANTUM 0x004f +#define mmSDMA1_PHASE2_QUANTUM_BASE_IDX 0 +#define mmSDMA1_ERROR_LOG 0x0050 +#define mmSDMA1_ERROR_LOG_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG0 0x0051 +#define mmSDMA1_PUB_DUMMY_REG0_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG1 0x0052 +#define mmSDMA1_PUB_DUMMY_REG1_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG2 0x0053 +#define mmSDMA1_PUB_DUMMY_REG2_BASE_IDX 0 +#define mmSDMA1_PUB_DUMMY_REG3 0x0054 +#define mmSDMA1_PUB_DUMMY_REG3_BASE_IDX 0 +#define mmSDMA1_F32_COUNTER 0x0055 +#define mmSDMA1_F32_COUNTER_BASE_IDX 0 +#define mmSDMA1_UNBREAKABLE 0x0056 +#define mmSDMA1_UNBREAKABLE_BASE_IDX 0 +#define mmSDMA1_PERFMON_CNTL 0x0057 +#define mmSDMA1_PERFMON_CNTL_BASE_IDX 0 +#define mmSDMA1_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA1_PERFCOUNTER0_RESULT_BASE_IDX 0 +#define mmSDMA1_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA1_PERFCOUNTER1_RESULT_BASE_IDX 0 +#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA1_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 0 +#define mmSDMA1_CRD_CNTL 0x005b +#define mmSDMA1_CRD_CNTL_BASE_IDX 0 +#define mmSDMA1_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA1_GPU_IOV_VIOLATION_LOG_BASE_IDX 0 +#define mmSDMA1_ULV_CNTL 0x005e +#define mmSDMA1_ULV_CNTL_BASE_IDX 0 +#define mmSDMA1_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA1_EA_DBIT_ADDR_DATA_BASE_IDX 0 +#define mmSDMA1_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA1_EA_DBIT_ADDR_INDEX_BASE_IDX 0 +#define mmSDMA1_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA1_GPU_IOV_VIOLATION_LOG2_BASE_IDX 0 +#define mmSDMA1_GFX_RB_CNTL 0x0080 +#define mmSDMA1_GFX_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_RB_BASE 0x0081 +#define mmSDMA1_GFX_RB_BASE_BASE_IDX 0 +#define mmSDMA1_GFX_RB_BASE_HI 0x0082 +#define mmSDMA1_GFX_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR 0x0083 +#define mmSDMA1_GFX_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA1_GFX_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR 0x0085 +#define mmSDMA1_GFX_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA1_GFX_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA1_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA1_GFX_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA1_GFX_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_GFX_IB_CNTL 0x008a +#define mmSDMA1_GFX_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_IB_RPTR 0x008b +#define mmSDMA1_GFX_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_GFX_IB_OFFSET 0x008c +#define mmSDMA1_GFX_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_GFX_IB_BASE_LO 0x008d +#define mmSDMA1_GFX_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_GFX_IB_BASE_HI 0x008e +#define mmSDMA1_GFX_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_GFX_IB_SIZE 0x008f +#define mmSDMA1_GFX_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_GFX_SKIP_CNTL 0x0090 +#define mmSDMA1_GFX_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA1_GFX_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_GFX_DOORBELL 0x0092 +#define mmSDMA1_GFX_DOORBELL_BASE_IDX 0 +#define mmSDMA1_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA1_GFX_CONTEXT_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_STATUS 0x00a8 +#define mmSDMA1_GFX_STATUS_BASE_IDX 0 +#define mmSDMA1_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA1_GFX_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_GFX_WATERMARK 0x00aa +#define mmSDMA1_GFX_WATERMARK_BASE_IDX 0 +#define mmSDMA1_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA1_GFX_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA1_GFX_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA1_GFX_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA1_GFX_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_GFX_PREEMPT 0x00b0 +#define mmSDMA1_GFX_PREEMPT_BASE_IDX 0 +#define mmSDMA1_GFX_DUMMY_REG 0x00b1 +#define mmSDMA1_GFX_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA1_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA1_GFX_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA1_GFX_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA1_GFX_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA1_GFX_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA1_GFX_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA1_GFX_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA1_GFX_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA1_GFX_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA1_GFX_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA1_GFX_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA1_GFX_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA1_GFX_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_CNTL 0x00d8 +#define mmSDMA1_PAGE_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_BASE 0x00d9 +#define mmSDMA1_PAGE_RB_BASE_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_BASE_HI 0x00da +#define mmSDMA1_PAGE_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR 0x00db +#define mmSDMA1_PAGE_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA1_PAGE_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR 0x00dd +#define mmSDMA1_PAGE_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA1_PAGE_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA1_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA1_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_CNTL 0x00e2 +#define mmSDMA1_PAGE_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_RPTR 0x00e3 +#define mmSDMA1_PAGE_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA1_PAGE_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA1_PAGE_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA1_PAGE_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_SIZE 0x00e7 +#define mmSDMA1_PAGE_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA1_PAGE_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA1_PAGE_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_PAGE_DOORBELL 0x00ea +#define mmSDMA1_PAGE_DOORBELL_BASE_IDX 0 +#define mmSDMA1_PAGE_STATUS 0x0100 +#define mmSDMA1_PAGE_STATUS_BASE_IDX 0 +#define mmSDMA1_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA1_PAGE_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_PAGE_WATERMARK 0x0102 +#define mmSDMA1_PAGE_WATERMARK_BASE_IDX 0 +#define mmSDMA1_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA1_PAGE_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA1_PAGE_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA1_PAGE_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA1_PAGE_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_PAGE_PREEMPT 0x0108 +#define mmSDMA1_PAGE_PREEMPT_BASE_IDX 0 +#define mmSDMA1_PAGE_DUMMY_REG 0x0109 +#define mmSDMA1_PAGE_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA1_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA1_PAGE_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA1_PAGE_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA1_PAGE_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA1_PAGE_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA1_PAGE_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA1_PAGE_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA1_PAGE_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA1_PAGE_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA1_PAGE_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA1_PAGE_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA1_PAGE_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA1_PAGE_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_CNTL 0x0130 +#define mmSDMA1_RLC0_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_BASE 0x0131 +#define mmSDMA1_RLC0_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA1_RLC0_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR 0x0133 +#define mmSDMA1_RLC0_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA1_RLC0_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR 0x0135 +#define mmSDMA1_RLC0_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA1_RLC0_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA1_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA1_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_CNTL 0x013a +#define mmSDMA1_RLC0_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_RPTR 0x013b +#define mmSDMA1_RLC0_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_OFFSET 0x013c +#define mmSDMA1_RLC0_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_BASE_LO 0x013d +#define mmSDMA1_RLC0_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_BASE_HI 0x013e +#define mmSDMA1_RLC0_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_SIZE 0x013f +#define mmSDMA1_RLC0_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA1_RLC0_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA1_RLC0_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC0_DOORBELL 0x0142 +#define mmSDMA1_RLC0_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC0_STATUS 0x0158 +#define mmSDMA1_RLC0_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA1_RLC0_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC0_WATERMARK 0x015a +#define mmSDMA1_RLC0_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA1_RLC0_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA1_RLC0_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA1_RLC0_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA1_RLC0_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC0_PREEMPT 0x0160 +#define mmSDMA1_RLC0_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC0_DUMMY_REG 0x0161 +#define mmSDMA1_RLC0_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA1_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA1_RLC0_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA1_RLC0_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA1_RLC0_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA1_RLC0_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA1_RLC0_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA1_RLC0_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA1_RLC0_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA1_RLC0_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA1_RLC0_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA1_RLC0_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA1_RLC0_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA1_RLC0_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_CNTL 0x0188 +#define mmSDMA1_RLC1_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_BASE 0x0189 +#define mmSDMA1_RLC1_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_BASE_HI 0x018a +#define mmSDMA1_RLC1_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR 0x018b +#define mmSDMA1_RLC1_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA1_RLC1_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR 0x018d +#define mmSDMA1_RLC1_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA1_RLC1_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA1_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA1_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_CNTL 0x0192 +#define mmSDMA1_RLC1_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_RPTR 0x0193 +#define mmSDMA1_RLC1_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_OFFSET 0x0194 +#define mmSDMA1_RLC1_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA1_RLC1_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA1_RLC1_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_SIZE 0x0197 +#define mmSDMA1_RLC1_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA1_RLC1_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA1_RLC1_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC1_DOORBELL 0x019a +#define mmSDMA1_RLC1_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC1_STATUS 0x01b0 +#define mmSDMA1_RLC1_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA1_RLC1_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC1_WATERMARK 0x01b2 +#define mmSDMA1_RLC1_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA1_RLC1_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA1_RLC1_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA1_RLC1_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA1_RLC1_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC1_PREEMPT 0x01b8 +#define mmSDMA1_RLC1_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA1_RLC1_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA1_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA1_RLC1_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA1_RLC1_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA1_RLC1_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA1_RLC1_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA1_RLC1_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA1_RLC1_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA1_RLC1_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA1_RLC1_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA1_RLC1_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA1_RLC1_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA1_RLC1_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA1_RLC1_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_CNTL 0x01e0 +#define mmSDMA1_RLC2_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_BASE 0x01e1 +#define mmSDMA1_RLC2_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA1_RLC2_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR 0x01e3 +#define mmSDMA1_RLC2_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA1_RLC2_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR 0x01e5 +#define mmSDMA1_RLC2_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA1_RLC2_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA1_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA1_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_CNTL 0x01ea +#define mmSDMA1_RLC2_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_RPTR 0x01eb +#define mmSDMA1_RLC2_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_OFFSET 0x01ec +#define mmSDMA1_RLC2_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA1_RLC2_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA1_RLC2_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_SIZE 0x01ef +#define mmSDMA1_RLC2_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA1_RLC2_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA1_RLC2_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC2_DOORBELL 0x01f2 +#define mmSDMA1_RLC2_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC2_STATUS 0x0208 +#define mmSDMA1_RLC2_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA1_RLC2_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC2_WATERMARK 0x020a +#define mmSDMA1_RLC2_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA1_RLC2_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA1_RLC2_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA1_RLC2_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA1_RLC2_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC2_PREEMPT 0x0210 +#define mmSDMA1_RLC2_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC2_DUMMY_REG 0x0211 +#define mmSDMA1_RLC2_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA1_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA1_RLC2_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA1_RLC2_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA1_RLC2_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA1_RLC2_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA1_RLC2_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA1_RLC2_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA1_RLC2_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA1_RLC2_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA1_RLC2_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA1_RLC2_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA1_RLC2_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA1_RLC2_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_CNTL 0x0238 +#define mmSDMA1_RLC3_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_BASE 0x0239 +#define mmSDMA1_RLC3_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_BASE_HI 0x023a +#define mmSDMA1_RLC3_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR 0x023b +#define mmSDMA1_RLC3_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA1_RLC3_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR 0x023d +#define mmSDMA1_RLC3_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA1_RLC3_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA1_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA1_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_CNTL 0x0242 +#define mmSDMA1_RLC3_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_RPTR 0x0243 +#define mmSDMA1_RLC3_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_OFFSET 0x0244 +#define mmSDMA1_RLC3_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA1_RLC3_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA1_RLC3_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_SIZE 0x0247 +#define mmSDMA1_RLC3_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA1_RLC3_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA1_RLC3_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC3_DOORBELL 0x024a +#define mmSDMA1_RLC3_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC3_STATUS 0x0260 +#define mmSDMA1_RLC3_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA1_RLC3_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC3_WATERMARK 0x0262 +#define mmSDMA1_RLC3_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA1_RLC3_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA1_RLC3_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA1_RLC3_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA1_RLC3_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC3_PREEMPT 0x0268 +#define mmSDMA1_RLC3_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC3_DUMMY_REG 0x0269 +#define mmSDMA1_RLC3_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA1_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA1_RLC3_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA1_RLC3_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA1_RLC3_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA1_RLC3_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA1_RLC3_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA1_RLC3_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA1_RLC3_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA1_RLC3_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA1_RLC3_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA1_RLC3_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA1_RLC3_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA1_RLC3_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_CNTL 0x0290 +#define mmSDMA1_RLC4_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_BASE 0x0291 +#define mmSDMA1_RLC4_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA1_RLC4_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR 0x0293 +#define mmSDMA1_RLC4_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA1_RLC4_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR 0x0295 +#define mmSDMA1_RLC4_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA1_RLC4_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA1_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA1_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_CNTL 0x029a +#define mmSDMA1_RLC4_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_RPTR 0x029b +#define mmSDMA1_RLC4_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_OFFSET 0x029c +#define mmSDMA1_RLC4_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_BASE_LO 0x029d +#define mmSDMA1_RLC4_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_BASE_HI 0x029e +#define mmSDMA1_RLC4_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_SIZE 0x029f +#define mmSDMA1_RLC4_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA1_RLC4_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA1_RLC4_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC4_DOORBELL 0x02a2 +#define mmSDMA1_RLC4_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC4_STATUS 0x02b8 +#define mmSDMA1_RLC4_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA1_RLC4_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC4_WATERMARK 0x02ba +#define mmSDMA1_RLC4_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA1_RLC4_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA1_RLC4_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA1_RLC4_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA1_RLC4_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC4_PREEMPT 0x02c0 +#define mmSDMA1_RLC4_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA1_RLC4_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA1_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA1_RLC4_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA1_RLC4_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA1_RLC4_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA1_RLC4_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA1_RLC4_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA1_RLC4_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA1_RLC4_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA1_RLC4_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA1_RLC4_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA1_RLC4_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA1_RLC4_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA1_RLC4_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_CNTL 0x02e8 +#define mmSDMA1_RLC5_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_BASE 0x02e9 +#define mmSDMA1_RLC5_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA1_RLC5_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR 0x02eb +#define mmSDMA1_RLC5_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA1_RLC5_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR 0x02ed +#define mmSDMA1_RLC5_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA1_RLC5_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA1_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA1_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_CNTL 0x02f2 +#define mmSDMA1_RLC5_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_RPTR 0x02f3 +#define mmSDMA1_RLC5_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA1_RLC5_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA1_RLC5_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA1_RLC5_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_SIZE 0x02f7 +#define mmSDMA1_RLC5_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA1_RLC5_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA1_RLC5_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC5_DOORBELL 0x02fa +#define mmSDMA1_RLC5_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC5_STATUS 0x0310 +#define mmSDMA1_RLC5_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA1_RLC5_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC5_WATERMARK 0x0312 +#define mmSDMA1_RLC5_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA1_RLC5_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA1_RLC5_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA1_RLC5_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA1_RLC5_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC5_PREEMPT 0x0318 +#define mmSDMA1_RLC5_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC5_DUMMY_REG 0x0319 +#define mmSDMA1_RLC5_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA1_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA1_RLC5_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA1_RLC5_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA1_RLC5_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA1_RLC5_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA1_RLC5_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA1_RLC5_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA1_RLC5_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA1_RLC5_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA1_RLC5_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA1_RLC5_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA1_RLC5_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA1_RLC5_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_CNTL 0x0340 +#define mmSDMA1_RLC6_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_BASE 0x0341 +#define mmSDMA1_RLC6_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA1_RLC6_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR 0x0343 +#define mmSDMA1_RLC6_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA1_RLC6_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR 0x0345 +#define mmSDMA1_RLC6_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA1_RLC6_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA1_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA1_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_CNTL 0x034a +#define mmSDMA1_RLC6_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_RPTR 0x034b +#define mmSDMA1_RLC6_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_OFFSET 0x034c +#define mmSDMA1_RLC6_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_BASE_LO 0x034d +#define mmSDMA1_RLC6_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_BASE_HI 0x034e +#define mmSDMA1_RLC6_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_SIZE 0x034f +#define mmSDMA1_RLC6_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA1_RLC6_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA1_RLC6_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC6_DOORBELL 0x0352 +#define mmSDMA1_RLC6_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC6_STATUS 0x0368 +#define mmSDMA1_RLC6_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA1_RLC6_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC6_WATERMARK 0x036a +#define mmSDMA1_RLC6_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA1_RLC6_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA1_RLC6_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA1_RLC6_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA1_RLC6_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC6_PREEMPT 0x0370 +#define mmSDMA1_RLC6_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC6_DUMMY_REG 0x0371 +#define mmSDMA1_RLC6_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA1_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA1_RLC6_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA1_RLC6_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA1_RLC6_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA1_RLC6_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA1_RLC6_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA1_RLC6_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA1_RLC6_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA1_RLC6_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA1_RLC6_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA1_RLC6_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA1_RLC6_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA1_RLC6_MIDCMD_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_CNTL 0x0398 +#define mmSDMA1_RLC7_RB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_BASE 0x0399 +#define mmSDMA1_RLC7_RB_BASE_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_BASE_HI 0x039a +#define mmSDMA1_RLC7_RB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR 0x039b +#define mmSDMA1_RLC7_RB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA1_RLC7_RB_RPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR 0x039d +#define mmSDMA1_RLC7_RB_WPTR_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA1_RLC7_RB_WPTR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA1_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA1_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_CNTL 0x03a2 +#define mmSDMA1_RLC7_IB_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_RPTR 0x03a3 +#define mmSDMA1_RLC7_IB_RPTR_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA1_RLC7_IB_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA1_RLC7_IB_BASE_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA1_RLC7_IB_BASE_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_SIZE 0x03a7 +#define mmSDMA1_RLC7_IB_SIZE_BASE_IDX 0 +#define mmSDMA1_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA1_RLC7_SKIP_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA1_RLC7_CONTEXT_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC7_DOORBELL 0x03aa +#define mmSDMA1_RLC7_DOORBELL_BASE_IDX 0 +#define mmSDMA1_RLC7_STATUS 0x03c0 +#define mmSDMA1_RLC7_STATUS_BASE_IDX 0 +#define mmSDMA1_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA1_RLC7_DOORBELL_LOG_BASE_IDX 0 +#define mmSDMA1_RLC7_WATERMARK 0x03c2 +#define mmSDMA1_RLC7_WATERMARK_BASE_IDX 0 +#define mmSDMA1_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA1_RLC7_DOORBELL_OFFSET_BASE_IDX 0 +#define mmSDMA1_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA1_RLC7_CSA_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA1_RLC7_CSA_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA1_RLC7_IB_SUB_REMAIN_BASE_IDX 0 +#define mmSDMA1_RLC7_PREEMPT 0x03c8 +#define mmSDMA1_RLC7_PREEMPT_BASE_IDX 0 +#define mmSDMA1_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA1_RLC7_DUMMY_REG_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA1_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 0 +#define mmSDMA1_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA1_RLC7_RB_AQL_CNTL_BASE_IDX 0 +#define mmSDMA1_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA1_RLC7_MINOR_PTR_UPDATE_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA1_RLC7_MIDCMD_DATA0_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA1_RLC7_MIDCMD_DATA1_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA1_RLC7_MIDCMD_DATA2_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA1_RLC7_MIDCMD_DATA3_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA1_RLC7_MIDCMD_DATA4_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA1_RLC7_MIDCMD_DATA5_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA1_RLC7_MIDCMD_DATA6_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA1_RLC7_MIDCMD_DATA7_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA1_RLC7_MIDCMD_DATA8_BASE_IDX 0 +#define mmSDMA1_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA1_RLC7_MIDCMD_CNTL_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h new file mode 100644 index 000000000000..ac2468e6bc46 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h @@ -0,0 +1,2956 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma1_4_2_2_SH_MASK_HEADER +#define _sdma1_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma1_sdma1dec +//SDMA1_UCODE_ADDR +#define SDMA1_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA1_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA1_UCODE_DATA +#define SDMA1_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA1_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA1_VM_CNTL +#define SDMA1_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA1_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA1_VM_CTX_LO +#define SDMA1_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA1_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_VM_CTX_HI +#define SDMA1_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA1_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_ACTIVE_FCN_ID +#define SDMA1_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA1_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA1_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA1_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA1_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA1_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA1_VM_CTX_CNTL +#define SDMA1_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA1_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA1_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA1_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA1_VIRT_RESET_REQ +#define SDMA1_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA1_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA1_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA1_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA1_VF_ENABLE +#define SDMA1_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA1_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA1_CONTEXT_REG_TYPE0 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE__SHIFT 0x1 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL__SHIFT 0xa +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR__SHIFT 0xb +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE__SHIFT 0xf +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL__SHIFT 0x12 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_MASK 0x00000002L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_DOORBELL_MASK 0x00040000L +#define SDMA1_CONTEXT_REG_TYPE0__SDMA1_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA1_CONTEXT_REG_TYPE1 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS__SHIFT 0x8 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK__SHIFT 0xa +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT__SHIFT 0x10 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA1_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_STATUS_MASK 0x00000100L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_WATERMARK_MASK 0x00000400L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA1_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_PREEMPT_MASK 0x00010000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA1_CONTEXT_REG_TYPE1__SDMA1_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA1_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA1_CONTEXT_REG_TYPE2 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA1_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA1_CONTEXT_REG_TYPE2__SDMA1_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA1_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA1_CONTEXT_REG_TYPE3 +#define SDMA1_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA1_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA1_PUB_REG_TYPE0 +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL__SHIFT 0x4 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO__SHIFT 0x5 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI__SHIFT 0x6 +#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA1_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL__SHIFT 0x13 +#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL__SHIFT 0x1a +#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL__SHIFT 0x1b +#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL__SHIFT 0x1c +#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS__SHIFT 0x1d +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_ADDR_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE0__SDMA1_UCODE_DATA_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CNTL_MASK 0x00000010L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_LO_MASK 0x00000020L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_HI_MASK 0x00000040L +#define SDMA1_PUB_REG_TYPE0__SDMA1_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA1_PUB_REG_TYPE0__SDMA1_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA1_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_MMHUB_CNTL_MASK 0x00080000L +#define SDMA1_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_POWER_CNTL_MASK 0x04000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CLK_CTRL_MASK 0x08000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CNTL_MASK 0x10000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_CHICKEN_BITS_MASK 0x20000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA1_PUB_REG_TYPE0__SDMA1_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA1_PUB_REG_TYPE1 +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM__SHIFT 0x4 +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG__SHIFT 0x5 +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG__SHIFT 0x6 +#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL__SHIFT 0xa +#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE__SHIFT 0xb +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG__SHIFT 0x12 +#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD__SHIFT 0x13 +#define SDMA1_PUB_REG_TYPE1__SDMA1_ID__SHIFT 0x14 +#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION__SHIFT 0x15 +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER__SHIFT 0x16 +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG__SHIFT 0x18 +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL__SHIFT 0x1c +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE1__SDMA1_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE1__SDMA1_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA1_PUB_REG_TYPE1__SDMA1_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA1_PUB_REG_TYPE1__SDMA1_PROGRAM_MASK 0x00000010L +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS_REG_MASK 0x00000020L +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS1_REG_MASK 0x00000040L +#define SDMA1_PUB_REG_TYPE1__SDMA1_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA1_PUB_REG_TYPE1__SDMA1_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA1_PUB_REG_TYPE1__SDMA1_F32_CNTL_MASK 0x00000400L +#define SDMA1_PUB_REG_TYPE1__SDMA1_FREEZE_MASK 0x00000800L +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA1_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA1_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_CONFIG_MASK 0x00040000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_BA_THRESHOLD_MASK 0x00080000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ID_MASK 0x00100000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_VERSION_MASK 0x00200000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_MASK 0x00400000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_STATUS2_REG_MASK 0x01000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_CNTL_MASK 0x10000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA1_PUB_REG_TYPE1__SDMA1_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA1_PUB_REG_TYPE2 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2__SHIFT 0x2 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE__SHIFT 0x8 +#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG__SHIFT 0xc +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG__SHIFT 0x10 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER__SHIFT 0x15 +#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE__SHIFT 0x16 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL__SHIFT 0x17 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL__SHIFT 0x1b +#define SDMA1_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL__SHIFT 0x1e +#define SDMA1_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV0_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV1_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_INV2_MASK 0x00000004L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UTCL1_PAGE_MASK 0x00000100L +#define SDMA1_PUB_REG_TYPE2__SDMA1_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA1_PUB_REG_TYPE2__SDMA1_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA1_PUB_REG_TYPE2__SDMA1_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA1_PUB_REG_TYPE2__SDMA1_STATUS3_REG_MASK 0x00001000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_ERROR_LOG_MASK 0x00010000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_F32_COUNTER_MASK 0x00200000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_UNBREAKABLE_MASK 0x00400000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFMON_CNTL_MASK 0x00800000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_CRD_CNTL_MASK 0x08000000L +#define SDMA1_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA1_PUB_REG_TYPE2__SDMA1_ULV_CNTL_MASK 0x40000000L +#define SDMA1_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA1_PUB_REG_TYPE3 +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA1_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA1_PUB_REG_TYPE3__SDMA1_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA1_PUB_REG_TYPE3__SDMA1_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA1_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA1_MMHUB_CNTL +#define SDMA1_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA1_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA1_CONTEXT_GROUP_BOUNDARY +#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA1_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA1_POWER_CNTL +#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA1_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA1_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA1_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA1_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA1_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA1_CLK_CTRL +#define SDMA1_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA1_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA1_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA1_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA1_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA1_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA1_CNTL +#define SDMA1_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA1_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA1_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA1_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA1_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA1_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA1_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA1_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA1_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA1_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA1_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA1_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA1_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA1_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA1_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA1_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA1_CHICKEN_BITS +#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA1_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA1_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA1_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA1_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA1_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA1_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA1_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA1_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA1_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA1_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA1_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA1_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA1_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA1_GB_ADDR_CONFIG +#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA1_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA1_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA1_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA1_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA1_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA1_GB_ADDR_CONFIG_READ +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA1_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA1_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA1_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA1_RB_RPTR_FETCH_HI +#define SDMA1_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA1_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA1_RB_RPTR_FETCH +#define SDMA1_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA1_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA1_IB_OFFSET_FETCH +#define SDMA1_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA1_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA1_PROGRAM +#define SDMA1_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA1_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA1_STATUS_REG +#define SDMA1_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA1_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA1_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA1_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA1_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA1_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA1_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA1_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA1_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA1_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA1_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA1_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA1_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA1_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA1_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA1_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA1_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA1_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA1_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA1_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA1_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA1_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA1_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA1_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA1_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA1_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA1_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA1_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA1_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA1_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA1_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA1_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA1_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA1_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA1_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA1_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA1_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA1_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA1_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA1_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA1_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA1_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA1_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA1_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA1_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA1_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA1_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA1_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA1_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA1_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA1_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA1_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA1_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA1_STATUS1_REG +#define SDMA1_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA1_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA1_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA1_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA1_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA1_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA1_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA1_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA1_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA1_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA1_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA1_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA1_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA1_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA1_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA1_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA1_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA1_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA1_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA1_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA1_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA1_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA1_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA1_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA1_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA1_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA1_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA1_RD_BURST_CNTL +#define SDMA1_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA1_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA1_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA1_HBM_PAGE_CONFIG +#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA1_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA1_UCODE_CHECKSUM +#define SDMA1_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA1_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA1_F32_CNTL +#define SDMA1_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA1_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA1_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA1_F32_CNTL__STEP_MASK 0x00000002L +//SDMA1_FREEZE +#define SDMA1_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA1_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA1_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA1_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA1_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA1_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA1_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA1_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA1_PHASE0_QUANTUM +#define SDMA1_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA1_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA1_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA1_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA1_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA1_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA1_PHASE1_QUANTUM +#define SDMA1_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA1_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA1_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA1_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA1_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA1_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA1_EDC_CONFIG +#define SDMA1_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA1_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA1_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA1_BA_THRESHOLD +#define SDMA1_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA1_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA1_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA1_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA1_ID +#define SDMA1_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA1_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA1_VERSION +#define SDMA1_VERSION__MINVER__SHIFT 0x0 +#define SDMA1_VERSION__MAJVER__SHIFT 0x8 +#define SDMA1_VERSION__REV__SHIFT 0x10 +#define SDMA1_VERSION__MINVER_MASK 0x0000007FL +#define SDMA1_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA1_VERSION__REV_MASK 0x003F0000L +//SDMA1_EDC_COUNTER +#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA1_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA1_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA1_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA1_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA1_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA1_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA1_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA1_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA1_EDC_COUNTER_CLEAR +#define SDMA1_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA1_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA1_STATUS2_REG +#define SDMA1_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA1_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA1_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA1_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA1_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA1_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA1_ATOMIC_CNTL +#define SDMA1_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA1_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA1_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA1_ATOMIC_PREOP_LO +#define SDMA1_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA1_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA1_ATOMIC_PREOP_HI +#define SDMA1_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA1_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_CNTL +#define SDMA1_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA1_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA1_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA1_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA1_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA1_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA1_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA1_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA1_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA1_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA1_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA1_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA1_UTCL1_WATERMK +#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA1_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA1_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA1_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA1_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA1_UTCL1_RD_STATUS +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA1_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA1_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA1_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA1_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA1_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA1_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA1_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA1_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA1_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA1_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA1_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA1_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA1_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA1_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA1_UTCL1_WR_STATUS +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA1_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA1_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA1_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA1_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA1_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA1_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA1_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA1_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA1_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA1_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA1_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA1_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA1_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA1_UTCL1_INV0 +#define SDMA1_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA1_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA1_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA1_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA1_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA1_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA1_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA1_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA1_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA1_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA1_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA1_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA1_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA1_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA1_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA1_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA1_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA1_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA1_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA1_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA1_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA1_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA1_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA1_UTCL1_INV1 +#define SDMA1_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA1_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_INV2 +#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA1_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_RD_XNACK0 +#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA1_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_RD_XNACK1 +#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA1_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA1_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA1_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA1_UTCL1_WR_XNACK0 +#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA1_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA1_UTCL1_WR_XNACK1 +#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA1_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA1_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA1_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA1_UTCL1_TIMEOUT +#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA1_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA1_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA1_UTCL1_PAGE +#define SDMA1_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA1_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA1_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA1_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA1_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA1_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA1_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA1_POWER_CNTL_IDLE +#define SDMA1_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA1_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA1_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA1_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA1_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA1_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA1_RELAX_ORDERING_LUT +#define SDMA1_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA1_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA1_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA1_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA1_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA1_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA1_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA1_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA1_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA1_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA1_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA1_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA1_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA1_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA1_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA1_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA1_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA1_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA1_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA1_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA1_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA1_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA1_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA1_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA1_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA1_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA1_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA1_CHICKEN_BITS_2 +#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA1_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA1_STATUS3_REG +#define SDMA1_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA1_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA1_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA1_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA1_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA1_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA1_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA1_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA1_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA1_PHYSICAL_ADDR_LO +#define SDMA1_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA1_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA1_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA1_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA1_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA1_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA1_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA1_PHYSICAL_ADDR_HI +#define SDMA1_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA1_PHASE2_QUANTUM +#define SDMA1_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA1_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA1_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA1_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA1_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA1_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA1_ERROR_LOG +#define SDMA1_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA1_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA1_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA1_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA1_PUB_DUMMY_REG0 +#define SDMA1_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA1_PUB_DUMMY_REG1 +#define SDMA1_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA1_PUB_DUMMY_REG2 +#define SDMA1_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA1_PUB_DUMMY_REG3 +#define SDMA1_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA1_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA1_F32_COUNTER +#define SDMA1_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA1_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA1_UNBREAKABLE +#define SDMA1_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA1_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA1_PERFMON_CNTL +#define SDMA1_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA1_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA1_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA1_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA1_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA1_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA1_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA1_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA1_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA1_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA1_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA1_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA1_PERFCOUNTER0_RESULT +#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA1_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA1_PERFCOUNTER1_RESULT +#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA1_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA1_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA1_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA1_CRD_CNTL +#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA1_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA1_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA1_GPU_IOV_VIOLATION_LOG +#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA1_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA1_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA1_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA1_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA1_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA1_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA1_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA1_ULV_CNTL +#define SDMA1_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA1_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA1_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA1_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA1_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA1_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA1_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA1_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA1_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA1_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA1_EA_DBIT_ADDR_DATA +#define SDMA1_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA1_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA1_EA_DBIT_ADDR_INDEX +#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA1_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA1_GPU_IOV_VIOLATION_LOG2 +#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA1_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA1_GFX_RB_CNTL +#define SDMA1_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_GFX_RB_BASE +#define SDMA1_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_BASE_HI +#define SDMA1_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_GFX_RB_RPTR +#define SDMA1_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_RPTR_HI +#define SDMA1_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR +#define SDMA1_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_HI +#define SDMA1_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_POLL_CNTL +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_GFX_RB_RPTR_ADDR_HI +#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_RPTR_ADDR_LO +#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_GFX_IB_CNTL +#define SDMA1_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_GFX_IB_RPTR +#define SDMA1_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_GFX_IB_OFFSET +#define SDMA1_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_GFX_IB_BASE_LO +#define SDMA1_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_GFX_IB_BASE_HI +#define SDMA1_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_IB_SIZE +#define SDMA1_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_GFX_SKIP_CNTL +#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_GFX_CONTEXT_STATUS +#define SDMA1_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_GFX_DOORBELL +#define SDMA1_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_GFX_CONTEXT_CNTL +#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA1_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA1_GFX_STATUS +#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_GFX_DOORBELL_LOG +#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_GFX_WATERMARK +#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_GFX_DOORBELL_OFFSET +#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_GFX_CSA_ADDR_LO +#define SDMA1_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_GFX_CSA_ADDR_HI +#define SDMA1_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_IB_SUB_REMAIN +#define SDMA1_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_GFX_PREEMPT +#define SDMA1_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_GFX_DUMMY_REG +#define SDMA1_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_GFX_RB_AQL_CNTL +#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_GFX_MINOR_PTR_UPDATE +#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_GFX_MIDCMD_DATA0 +#define SDMA1_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA1 +#define SDMA1_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA2 +#define SDMA1_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA3 +#define SDMA1_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA4 +#define SDMA1_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA5 +#define SDMA1_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA6 +#define SDMA1_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA7 +#define SDMA1_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_DATA8 +#define SDMA1_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_GFX_MIDCMD_CNTL +#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_PAGE_RB_CNTL +#define SDMA1_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_PAGE_RB_BASE +#define SDMA1_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_BASE_HI +#define SDMA1_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_PAGE_RB_RPTR +#define SDMA1_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_RPTR_HI +#define SDMA1_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR +#define SDMA1_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_HI +#define SDMA1_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_POLL_CNTL +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_PAGE_RB_RPTR_ADDR_HI +#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_RPTR_ADDR_LO +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_PAGE_IB_CNTL +#define SDMA1_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_PAGE_IB_RPTR +#define SDMA1_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_PAGE_IB_OFFSET +#define SDMA1_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_PAGE_IB_BASE_LO +#define SDMA1_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_PAGE_IB_BASE_HI +#define SDMA1_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_IB_SIZE +#define SDMA1_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_PAGE_SKIP_CNTL +#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_PAGE_CONTEXT_STATUS +#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_PAGE_DOORBELL +#define SDMA1_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_PAGE_STATUS +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_PAGE_DOORBELL_LOG +#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_PAGE_WATERMARK +#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_PAGE_DOORBELL_OFFSET +#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_PAGE_CSA_ADDR_LO +#define SDMA1_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_PAGE_CSA_ADDR_HI +#define SDMA1_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_IB_SUB_REMAIN +#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_PAGE_PREEMPT +#define SDMA1_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_PAGE_DUMMY_REG +#define SDMA1_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_PAGE_RB_AQL_CNTL +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_PAGE_MINOR_PTR_UPDATE +#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_PAGE_MIDCMD_DATA0 +#define SDMA1_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA1 +#define SDMA1_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA2 +#define SDMA1_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA3 +#define SDMA1_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA4 +#define SDMA1_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA5 +#define SDMA1_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA6 +#define SDMA1_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA7 +#define SDMA1_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_DATA8 +#define SDMA1_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_PAGE_MIDCMD_CNTL +#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC0_RB_CNTL +#define SDMA1_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC0_RB_BASE +#define SDMA1_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_BASE_HI +#define SDMA1_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC0_RB_RPTR +#define SDMA1_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_RPTR_HI +#define SDMA1_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR +#define SDMA1_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_HI +#define SDMA1_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_POLL_CNTL +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC0_RB_RPTR_ADDR_HI +#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_RPTR_ADDR_LO +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC0_IB_CNTL +#define SDMA1_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC0_IB_RPTR +#define SDMA1_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC0_IB_OFFSET +#define SDMA1_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC0_IB_BASE_LO +#define SDMA1_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC0_IB_BASE_HI +#define SDMA1_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_IB_SIZE +#define SDMA1_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC0_SKIP_CNTL +#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC0_CONTEXT_STATUS +#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC0_DOORBELL +#define SDMA1_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC0_STATUS +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC0_DOORBELL_LOG +#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC0_WATERMARK +#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC0_DOORBELL_OFFSET +#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC0_CSA_ADDR_LO +#define SDMA1_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC0_CSA_ADDR_HI +#define SDMA1_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_IB_SUB_REMAIN +#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC0_PREEMPT +#define SDMA1_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC0_DUMMY_REG +#define SDMA1_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC0_RB_AQL_CNTL +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC0_MINOR_PTR_UPDATE +#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC0_MIDCMD_DATA0 +#define SDMA1_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA1 +#define SDMA1_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA2 +#define SDMA1_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA3 +#define SDMA1_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA4 +#define SDMA1_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA5 +#define SDMA1_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA6 +#define SDMA1_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA7 +#define SDMA1_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_DATA8 +#define SDMA1_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC0_MIDCMD_CNTL +#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC1_RB_CNTL +#define SDMA1_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC1_RB_BASE +#define SDMA1_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_BASE_HI +#define SDMA1_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC1_RB_RPTR +#define SDMA1_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_RPTR_HI +#define SDMA1_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR +#define SDMA1_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_HI +#define SDMA1_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_POLL_CNTL +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC1_RB_RPTR_ADDR_HI +#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_RPTR_ADDR_LO +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC1_IB_CNTL +#define SDMA1_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC1_IB_RPTR +#define SDMA1_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC1_IB_OFFSET +#define SDMA1_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC1_IB_BASE_LO +#define SDMA1_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC1_IB_BASE_HI +#define SDMA1_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_IB_SIZE +#define SDMA1_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC1_SKIP_CNTL +#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC1_CONTEXT_STATUS +#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC1_DOORBELL +#define SDMA1_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC1_STATUS +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC1_DOORBELL_LOG +#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC1_WATERMARK +#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC1_DOORBELL_OFFSET +#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC1_CSA_ADDR_LO +#define SDMA1_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC1_CSA_ADDR_HI +#define SDMA1_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_IB_SUB_REMAIN +#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC1_PREEMPT +#define SDMA1_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC1_DUMMY_REG +#define SDMA1_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC1_RB_AQL_CNTL +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC1_MINOR_PTR_UPDATE +#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC1_MIDCMD_DATA0 +#define SDMA1_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA1 +#define SDMA1_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA2 +#define SDMA1_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA3 +#define SDMA1_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA4 +#define SDMA1_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA5 +#define SDMA1_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA6 +#define SDMA1_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA7 +#define SDMA1_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_DATA8 +#define SDMA1_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC1_MIDCMD_CNTL +#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC2_RB_CNTL +#define SDMA1_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC2_RB_BASE +#define SDMA1_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_BASE_HI +#define SDMA1_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC2_RB_RPTR +#define SDMA1_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_RPTR_HI +#define SDMA1_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR +#define SDMA1_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_HI +#define SDMA1_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_POLL_CNTL +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC2_RB_RPTR_ADDR_HI +#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_RPTR_ADDR_LO +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC2_IB_CNTL +#define SDMA1_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC2_IB_RPTR +#define SDMA1_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC2_IB_OFFSET +#define SDMA1_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC2_IB_BASE_LO +#define SDMA1_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC2_IB_BASE_HI +#define SDMA1_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_IB_SIZE +#define SDMA1_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC2_SKIP_CNTL +#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC2_CONTEXT_STATUS +#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC2_DOORBELL +#define SDMA1_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC2_STATUS +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC2_DOORBELL_LOG +#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC2_WATERMARK +#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC2_DOORBELL_OFFSET +#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC2_CSA_ADDR_LO +#define SDMA1_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC2_CSA_ADDR_HI +#define SDMA1_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_IB_SUB_REMAIN +#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC2_PREEMPT +#define SDMA1_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC2_DUMMY_REG +#define SDMA1_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC2_RB_AQL_CNTL +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC2_MINOR_PTR_UPDATE +#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC2_MIDCMD_DATA0 +#define SDMA1_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA1 +#define SDMA1_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA2 +#define SDMA1_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA3 +#define SDMA1_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA4 +#define SDMA1_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA5 +#define SDMA1_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA6 +#define SDMA1_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA7 +#define SDMA1_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_DATA8 +#define SDMA1_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC2_MIDCMD_CNTL +#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC3_RB_CNTL +#define SDMA1_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC3_RB_BASE +#define SDMA1_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_BASE_HI +#define SDMA1_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC3_RB_RPTR +#define SDMA1_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_RPTR_HI +#define SDMA1_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR +#define SDMA1_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_HI +#define SDMA1_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_POLL_CNTL +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC3_RB_RPTR_ADDR_HI +#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_RPTR_ADDR_LO +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC3_IB_CNTL +#define SDMA1_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC3_IB_RPTR +#define SDMA1_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC3_IB_OFFSET +#define SDMA1_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC3_IB_BASE_LO +#define SDMA1_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC3_IB_BASE_HI +#define SDMA1_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_IB_SIZE +#define SDMA1_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC3_SKIP_CNTL +#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC3_CONTEXT_STATUS +#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC3_DOORBELL +#define SDMA1_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC3_STATUS +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC3_DOORBELL_LOG +#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC3_WATERMARK +#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC3_DOORBELL_OFFSET +#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC3_CSA_ADDR_LO +#define SDMA1_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC3_CSA_ADDR_HI +#define SDMA1_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_IB_SUB_REMAIN +#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC3_PREEMPT +#define SDMA1_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC3_DUMMY_REG +#define SDMA1_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC3_RB_AQL_CNTL +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC3_MINOR_PTR_UPDATE +#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC3_MIDCMD_DATA0 +#define SDMA1_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA1 +#define SDMA1_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA2 +#define SDMA1_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA3 +#define SDMA1_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA4 +#define SDMA1_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA5 +#define SDMA1_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA6 +#define SDMA1_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA7 +#define SDMA1_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_DATA8 +#define SDMA1_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC3_MIDCMD_CNTL +#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC4_RB_CNTL +#define SDMA1_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC4_RB_BASE +#define SDMA1_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_BASE_HI +#define SDMA1_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC4_RB_RPTR +#define SDMA1_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_RPTR_HI +#define SDMA1_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR +#define SDMA1_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_HI +#define SDMA1_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_POLL_CNTL +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC4_RB_RPTR_ADDR_HI +#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_RPTR_ADDR_LO +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC4_IB_CNTL +#define SDMA1_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC4_IB_RPTR +#define SDMA1_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC4_IB_OFFSET +#define SDMA1_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC4_IB_BASE_LO +#define SDMA1_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC4_IB_BASE_HI +#define SDMA1_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_IB_SIZE +#define SDMA1_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC4_SKIP_CNTL +#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC4_CONTEXT_STATUS +#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC4_DOORBELL +#define SDMA1_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC4_STATUS +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC4_DOORBELL_LOG +#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC4_WATERMARK +#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC4_DOORBELL_OFFSET +#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC4_CSA_ADDR_LO +#define SDMA1_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC4_CSA_ADDR_HI +#define SDMA1_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_IB_SUB_REMAIN +#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC4_PREEMPT +#define SDMA1_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC4_DUMMY_REG +#define SDMA1_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC4_RB_AQL_CNTL +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC4_MINOR_PTR_UPDATE +#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC4_MIDCMD_DATA0 +#define SDMA1_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA1 +#define SDMA1_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA2 +#define SDMA1_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA3 +#define SDMA1_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA4 +#define SDMA1_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA5 +#define SDMA1_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA6 +#define SDMA1_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA7 +#define SDMA1_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_DATA8 +#define SDMA1_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC4_MIDCMD_CNTL +#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC5_RB_CNTL +#define SDMA1_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC5_RB_BASE +#define SDMA1_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_BASE_HI +#define SDMA1_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC5_RB_RPTR +#define SDMA1_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_RPTR_HI +#define SDMA1_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR +#define SDMA1_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_HI +#define SDMA1_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_POLL_CNTL +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC5_RB_RPTR_ADDR_HI +#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_RPTR_ADDR_LO +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC5_IB_CNTL +#define SDMA1_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC5_IB_RPTR +#define SDMA1_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC5_IB_OFFSET +#define SDMA1_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC5_IB_BASE_LO +#define SDMA1_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC5_IB_BASE_HI +#define SDMA1_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_IB_SIZE +#define SDMA1_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC5_SKIP_CNTL +#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC5_CONTEXT_STATUS +#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC5_DOORBELL +#define SDMA1_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC5_STATUS +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC5_DOORBELL_LOG +#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC5_WATERMARK +#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC5_DOORBELL_OFFSET +#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC5_CSA_ADDR_LO +#define SDMA1_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC5_CSA_ADDR_HI +#define SDMA1_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_IB_SUB_REMAIN +#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC5_PREEMPT +#define SDMA1_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC5_DUMMY_REG +#define SDMA1_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC5_RB_AQL_CNTL +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC5_MINOR_PTR_UPDATE +#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC5_MIDCMD_DATA0 +#define SDMA1_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA1 +#define SDMA1_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA2 +#define SDMA1_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA3 +#define SDMA1_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA4 +#define SDMA1_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA5 +#define SDMA1_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA6 +#define SDMA1_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA7 +#define SDMA1_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_DATA8 +#define SDMA1_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC5_MIDCMD_CNTL +#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC6_RB_CNTL +#define SDMA1_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC6_RB_BASE +#define SDMA1_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_BASE_HI +#define SDMA1_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC6_RB_RPTR +#define SDMA1_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_RPTR_HI +#define SDMA1_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR +#define SDMA1_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_HI +#define SDMA1_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_POLL_CNTL +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC6_RB_RPTR_ADDR_HI +#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_RPTR_ADDR_LO +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC6_IB_CNTL +#define SDMA1_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC6_IB_RPTR +#define SDMA1_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC6_IB_OFFSET +#define SDMA1_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC6_IB_BASE_LO +#define SDMA1_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC6_IB_BASE_HI +#define SDMA1_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_IB_SIZE +#define SDMA1_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC6_SKIP_CNTL +#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC6_CONTEXT_STATUS +#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC6_DOORBELL +#define SDMA1_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC6_STATUS +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC6_DOORBELL_LOG +#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC6_WATERMARK +#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC6_DOORBELL_OFFSET +#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC6_CSA_ADDR_LO +#define SDMA1_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC6_CSA_ADDR_HI +#define SDMA1_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_IB_SUB_REMAIN +#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC6_PREEMPT +#define SDMA1_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC6_DUMMY_REG +#define SDMA1_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC6_RB_AQL_CNTL +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC6_MINOR_PTR_UPDATE +#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC6_MIDCMD_DATA0 +#define SDMA1_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA1 +#define SDMA1_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA2 +#define SDMA1_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA3 +#define SDMA1_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA4 +#define SDMA1_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA5 +#define SDMA1_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA6 +#define SDMA1_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA7 +#define SDMA1_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_DATA8 +#define SDMA1_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC6_MIDCMD_CNTL +#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA1_RLC7_RB_CNTL +#define SDMA1_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA1_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA1_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA1_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA1_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA1_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA1_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA1_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA1_RLC7_RB_BASE +#define SDMA1_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_BASE_HI +#define SDMA1_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA1_RLC7_RB_RPTR +#define SDMA1_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_RPTR_HI +#define SDMA1_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR +#define SDMA1_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_HI +#define SDMA1_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_POLL_CNTL +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA1_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA1_RLC7_RB_RPTR_ADDR_HI +#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_RPTR_ADDR_LO +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC7_IB_CNTL +#define SDMA1_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA1_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA1_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA1_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA1_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA1_RLC7_IB_RPTR +#define SDMA1_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA1_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC7_IB_OFFSET +#define SDMA1_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA1_RLC7_IB_BASE_LO +#define SDMA1_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA1_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA1_RLC7_IB_BASE_HI +#define SDMA1_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_IB_SIZE +#define SDMA1_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA1_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC7_SKIP_CNTL +#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA1_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA1_RLC7_CONTEXT_STATUS +#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA1_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA1_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA1_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA1_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA1_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA1_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA1_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA1_RLC7_DOORBELL +#define SDMA1_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA1_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA1_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA1_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA1_RLC7_STATUS +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA1_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA1_RLC7_DOORBELL_LOG +#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA1_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA1_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA1_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA1_RLC7_WATERMARK +#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA1_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA1_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA1_RLC7_DOORBELL_OFFSET +#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA1_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA1_RLC7_CSA_ADDR_LO +#define SDMA1_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC7_CSA_ADDR_HI +#define SDMA1_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_IB_SUB_REMAIN +#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA1_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA1_RLC7_PREEMPT +#define SDMA1_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA1_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA1_RLC7_DUMMY_REG +#define SDMA1_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA1_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA1_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA1_RLC7_RB_AQL_CNTL +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA1_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA1_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA1_RLC7_MINOR_PTR_UPDATE +#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA1_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA1_RLC7_MIDCMD_DATA0 +#define SDMA1_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA1 +#define SDMA1_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA2 +#define SDMA1_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA3 +#define SDMA1_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA4 +#define SDMA1_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA5 +#define SDMA1_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA6 +#define SDMA1_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA7 +#define SDMA1_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_DATA8 +#define SDMA1_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA1_RLC7_MIDCMD_CNTL +#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA1_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA1_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA1_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA1_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h new file mode 100644 index 000000000000..6aa0813915c2 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h @@ -0,0 +1,1043 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma2_4_2_2_OFFSET_HEADER +#define _sdma2_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma2_sdma2dec +// base address: 0x78000 +#define mmSDMA2_UCODE_ADDR 0x0000 +#define mmSDMA2_UCODE_ADDR_BASE_IDX 1 +#define mmSDMA2_UCODE_DATA 0x0001 +#define mmSDMA2_UCODE_DATA_BASE_IDX 1 +#define mmSDMA2_VM_CNTL 0x0004 +#define mmSDMA2_VM_CNTL_BASE_IDX 1 +#define mmSDMA2_VM_CTX_LO 0x0005 +#define mmSDMA2_VM_CTX_LO_BASE_IDX 1 +#define mmSDMA2_VM_CTX_HI 0x0006 +#define mmSDMA2_VM_CTX_HI_BASE_IDX 1 +#define mmSDMA2_ACTIVE_FCN_ID 0x0007 +#define mmSDMA2_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmSDMA2_VM_CTX_CNTL 0x0008 +#define mmSDMA2_VM_CTX_CNTL_BASE_IDX 1 +#define mmSDMA2_VIRT_RESET_REQ 0x0009 +#define mmSDMA2_VIRT_RESET_REQ_BASE_IDX 1 +#define mmSDMA2_VF_ENABLE 0x000a +#define mmSDMA2_VF_ENABLE_BASE_IDX 1 +#define mmSDMA2_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA2_CONTEXT_REG_TYPE0_BASE_IDX 1 +#define mmSDMA2_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA2_CONTEXT_REG_TYPE1_BASE_IDX 1 +#define mmSDMA2_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA2_CONTEXT_REG_TYPE2_BASE_IDX 1 +#define mmSDMA2_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA2_CONTEXT_REG_TYPE3_BASE_IDX 1 +#define mmSDMA2_PUB_REG_TYPE0 0x000f +#define mmSDMA2_PUB_REG_TYPE0_BASE_IDX 1 +#define mmSDMA2_PUB_REG_TYPE1 0x0010 +#define mmSDMA2_PUB_REG_TYPE1_BASE_IDX 1 +#define mmSDMA2_PUB_REG_TYPE2 0x0011 +#define mmSDMA2_PUB_REG_TYPE2_BASE_IDX 1 +#define mmSDMA2_PUB_REG_TYPE3 0x0012 +#define mmSDMA2_PUB_REG_TYPE3_BASE_IDX 1 +#define mmSDMA2_MMHUB_CNTL 0x0013 +#define mmSDMA2_MMHUB_CNTL_BASE_IDX 1 +#define mmSDMA2_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA2_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1 +#define mmSDMA2_POWER_CNTL 0x001a +#define mmSDMA2_POWER_CNTL_BASE_IDX 1 +#define mmSDMA2_CLK_CTRL 0x001b +#define mmSDMA2_CLK_CTRL_BASE_IDX 1 +#define mmSDMA2_CNTL 0x001c +#define mmSDMA2_CNTL_BASE_IDX 1 +#define mmSDMA2_CHICKEN_BITS 0x001d +#define mmSDMA2_CHICKEN_BITS_BASE_IDX 1 +#define mmSDMA2_GB_ADDR_CONFIG 0x001e +#define mmSDMA2_GB_ADDR_CONFIG_BASE_IDX 1 +#define mmSDMA2_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA2_GB_ADDR_CONFIG_READ_BASE_IDX 1 +#define mmSDMA2_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA2_RB_RPTR_FETCH_HI_BASE_IDX 1 +#define mmSDMA2_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA2_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1 +#define mmSDMA2_RB_RPTR_FETCH 0x0022 +#define mmSDMA2_RB_RPTR_FETCH_BASE_IDX 1 +#define mmSDMA2_IB_OFFSET_FETCH 0x0023 +#define mmSDMA2_IB_OFFSET_FETCH_BASE_IDX 1 +#define mmSDMA2_PROGRAM 0x0024 +#define mmSDMA2_PROGRAM_BASE_IDX 1 +#define mmSDMA2_STATUS_REG 0x0025 +#define mmSDMA2_STATUS_REG_BASE_IDX 1 +#define mmSDMA2_STATUS1_REG 0x0026 +#define mmSDMA2_STATUS1_REG_BASE_IDX 1 +#define mmSDMA2_RD_BURST_CNTL 0x0027 +#define mmSDMA2_RD_BURST_CNTL_BASE_IDX 1 +#define mmSDMA2_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA2_HBM_PAGE_CONFIG_BASE_IDX 1 +#define mmSDMA2_UCODE_CHECKSUM 0x0029 +#define mmSDMA2_UCODE_CHECKSUM_BASE_IDX 1 +#define mmSDMA2_F32_CNTL 0x002a +#define mmSDMA2_F32_CNTL_BASE_IDX 1 +#define mmSDMA2_FREEZE 0x002b +#define mmSDMA2_FREEZE_BASE_IDX 1 +#define mmSDMA2_PHASE0_QUANTUM 0x002c +#define mmSDMA2_PHASE0_QUANTUM_BASE_IDX 1 +#define mmSDMA2_PHASE1_QUANTUM 0x002d +#define mmSDMA2_PHASE1_QUANTUM_BASE_IDX 1 +#define mmSDMA2_EDC_CONFIG 0x0032 +#define mmSDMA2_EDC_CONFIG_BASE_IDX 1 +#define mmSDMA2_BA_THRESHOLD 0x0033 +#define mmSDMA2_BA_THRESHOLD_BASE_IDX 1 +#define mmSDMA2_ID 0x0034 +#define mmSDMA2_ID_BASE_IDX 1 +#define mmSDMA2_VERSION 0x0035 +#define mmSDMA2_VERSION_BASE_IDX 1 +#define mmSDMA2_EDC_COUNTER 0x0036 +#define mmSDMA2_EDC_COUNTER_BASE_IDX 1 +#define mmSDMA2_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA2_EDC_COUNTER_CLEAR_BASE_IDX 1 +#define mmSDMA2_STATUS2_REG 0x0038 +#define mmSDMA2_STATUS2_REG_BASE_IDX 1 +#define mmSDMA2_ATOMIC_CNTL 0x0039 +#define mmSDMA2_ATOMIC_CNTL_BASE_IDX 1 +#define mmSDMA2_ATOMIC_PREOP_LO 0x003a +#define mmSDMA2_ATOMIC_PREOP_LO_BASE_IDX 1 +#define mmSDMA2_ATOMIC_PREOP_HI 0x003b +#define mmSDMA2_ATOMIC_PREOP_HI_BASE_IDX 1 +#define mmSDMA2_UTCL1_CNTL 0x003c +#define mmSDMA2_UTCL1_CNTL_BASE_IDX 1 +#define mmSDMA2_UTCL1_WATERMK 0x003d +#define mmSDMA2_UTCL1_WATERMK_BASE_IDX 1 +#define mmSDMA2_UTCL1_RD_STATUS 0x003e +#define mmSDMA2_UTCL1_RD_STATUS_BASE_IDX 1 +#define mmSDMA2_UTCL1_WR_STATUS 0x003f +#define mmSDMA2_UTCL1_WR_STATUS_BASE_IDX 1 +#define mmSDMA2_UTCL1_INV0 0x0040 +#define mmSDMA2_UTCL1_INV0_BASE_IDX 1 +#define mmSDMA2_UTCL1_INV1 0x0041 +#define mmSDMA2_UTCL1_INV1_BASE_IDX 1 +#define mmSDMA2_UTCL1_INV2 0x0042 +#define mmSDMA2_UTCL1_INV2_BASE_IDX 1 +#define mmSDMA2_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA2_UTCL1_RD_XNACK0_BASE_IDX 1 +#define mmSDMA2_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA2_UTCL1_RD_XNACK1_BASE_IDX 1 +#define mmSDMA2_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA2_UTCL1_WR_XNACK0_BASE_IDX 1 +#define mmSDMA2_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA2_UTCL1_WR_XNACK1_BASE_IDX 1 +#define mmSDMA2_UTCL1_TIMEOUT 0x0047 +#define mmSDMA2_UTCL1_TIMEOUT_BASE_IDX 1 +#define mmSDMA2_UTCL1_PAGE 0x0048 +#define mmSDMA2_UTCL1_PAGE_BASE_IDX 1 +#define mmSDMA2_POWER_CNTL_IDLE 0x0049 +#define mmSDMA2_POWER_CNTL_IDLE_BASE_IDX 1 +#define mmSDMA2_RELAX_ORDERING_LUT 0x004a +#define mmSDMA2_RELAX_ORDERING_LUT_BASE_IDX 1 +#define mmSDMA2_CHICKEN_BITS_2 0x004b +#define mmSDMA2_CHICKEN_BITS_2_BASE_IDX 1 +#define mmSDMA2_STATUS3_REG 0x004c +#define mmSDMA2_STATUS3_REG_BASE_IDX 1 +#define mmSDMA2_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA2_PHYSICAL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA2_PHYSICAL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_PHASE2_QUANTUM 0x004f +#define mmSDMA2_PHASE2_QUANTUM_BASE_IDX 1 +#define mmSDMA2_ERROR_LOG 0x0050 +#define mmSDMA2_ERROR_LOG_BASE_IDX 1 +#define mmSDMA2_PUB_DUMMY_REG0 0x0051 +#define mmSDMA2_PUB_DUMMY_REG0_BASE_IDX 1 +#define mmSDMA2_PUB_DUMMY_REG1 0x0052 +#define mmSDMA2_PUB_DUMMY_REG1_BASE_IDX 1 +#define mmSDMA2_PUB_DUMMY_REG2 0x0053 +#define mmSDMA2_PUB_DUMMY_REG2_BASE_IDX 1 +#define mmSDMA2_PUB_DUMMY_REG3 0x0054 +#define mmSDMA2_PUB_DUMMY_REG3_BASE_IDX 1 +#define mmSDMA2_F32_COUNTER 0x0055 +#define mmSDMA2_F32_COUNTER_BASE_IDX 1 +#define mmSDMA2_UNBREAKABLE 0x0056 +#define mmSDMA2_UNBREAKABLE_BASE_IDX 1 +#define mmSDMA2_PERFMON_CNTL 0x0057 +#define mmSDMA2_PERFMON_CNTL_BASE_IDX 1 +#define mmSDMA2_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA2_PERFCOUNTER0_RESULT_BASE_IDX 1 +#define mmSDMA2_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA2_PERFCOUNTER1_RESULT_BASE_IDX 1 +#define mmSDMA2_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA2_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1 +#define mmSDMA2_CRD_CNTL 0x005b +#define mmSDMA2_CRD_CNTL_BASE_IDX 1 +#define mmSDMA2_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA2_GPU_IOV_VIOLATION_LOG_BASE_IDX 1 +#define mmSDMA2_ULV_CNTL 0x005e +#define mmSDMA2_ULV_CNTL_BASE_IDX 1 +#define mmSDMA2_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA2_EA_DBIT_ADDR_DATA_BASE_IDX 1 +#define mmSDMA2_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA2_EA_DBIT_ADDR_INDEX_BASE_IDX 1 +#define mmSDMA2_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA2_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1 +#define mmSDMA2_GFX_RB_CNTL 0x0080 +#define mmSDMA2_GFX_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_GFX_RB_BASE 0x0081 +#define mmSDMA2_GFX_RB_BASE_BASE_IDX 1 +#define mmSDMA2_GFX_RB_BASE_HI 0x0082 +#define mmSDMA2_GFX_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_GFX_RB_RPTR 0x0083 +#define mmSDMA2_GFX_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA2_GFX_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_GFX_RB_WPTR 0x0085 +#define mmSDMA2_GFX_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA2_GFX_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA2_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA2_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA2_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_GFX_IB_CNTL 0x008a +#define mmSDMA2_GFX_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_GFX_IB_RPTR 0x008b +#define mmSDMA2_GFX_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_GFX_IB_OFFSET 0x008c +#define mmSDMA2_GFX_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_GFX_IB_BASE_LO 0x008d +#define mmSDMA2_GFX_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_GFX_IB_BASE_HI 0x008e +#define mmSDMA2_GFX_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_GFX_IB_SIZE 0x008f +#define mmSDMA2_GFX_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_GFX_SKIP_CNTL 0x0090 +#define mmSDMA2_GFX_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA2_GFX_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_GFX_DOORBELL 0x0092 +#define mmSDMA2_GFX_DOORBELL_BASE_IDX 1 +#define mmSDMA2_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA2_GFX_CONTEXT_CNTL_BASE_IDX 1 +#define mmSDMA2_GFX_STATUS 0x00a8 +#define mmSDMA2_GFX_STATUS_BASE_IDX 1 +#define mmSDMA2_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA2_GFX_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_GFX_WATERMARK 0x00aa +#define mmSDMA2_GFX_WATERMARK_BASE_IDX 1 +#define mmSDMA2_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA2_GFX_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA2_GFX_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA2_GFX_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA2_GFX_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_GFX_PREEMPT 0x00b0 +#define mmSDMA2_GFX_PREEMPT_BASE_IDX 1 +#define mmSDMA2_GFX_DUMMY_REG 0x00b1 +#define mmSDMA2_GFX_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA2_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA2_GFX_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA2_GFX_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA2_GFX_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA2_GFX_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA2_GFX_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA2_GFX_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA2_GFX_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA2_GFX_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA2_GFX_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA2_GFX_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA2_GFX_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA2_GFX_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_CNTL 0x00d8 +#define mmSDMA2_PAGE_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_BASE 0x00d9 +#define mmSDMA2_PAGE_RB_BASE_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_BASE_HI 0x00da +#define mmSDMA2_PAGE_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_RPTR 0x00db +#define mmSDMA2_PAGE_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA2_PAGE_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_WPTR 0x00dd +#define mmSDMA2_PAGE_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA2_PAGE_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA2_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA2_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA2_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_PAGE_IB_CNTL 0x00e2 +#define mmSDMA2_PAGE_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_PAGE_IB_RPTR 0x00e3 +#define mmSDMA2_PAGE_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA2_PAGE_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA2_PAGE_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA2_PAGE_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_PAGE_IB_SIZE 0x00e7 +#define mmSDMA2_PAGE_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA2_PAGE_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA2_PAGE_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_PAGE_DOORBELL 0x00ea +#define mmSDMA2_PAGE_DOORBELL_BASE_IDX 1 +#define mmSDMA2_PAGE_STATUS 0x0100 +#define mmSDMA2_PAGE_STATUS_BASE_IDX 1 +#define mmSDMA2_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA2_PAGE_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_PAGE_WATERMARK 0x0102 +#define mmSDMA2_PAGE_WATERMARK_BASE_IDX 1 +#define mmSDMA2_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA2_PAGE_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA2_PAGE_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA2_PAGE_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA2_PAGE_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_PAGE_PREEMPT 0x0108 +#define mmSDMA2_PAGE_PREEMPT_BASE_IDX 1 +#define mmSDMA2_PAGE_DUMMY_REG 0x0109 +#define mmSDMA2_PAGE_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA2_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA2_PAGE_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA2_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA2_PAGE_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA2_PAGE_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA2_PAGE_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA2_PAGE_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA2_PAGE_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA2_PAGE_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA2_PAGE_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA2_PAGE_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA2_PAGE_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA2_PAGE_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_CNTL 0x0130 +#define mmSDMA2_RLC0_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_BASE 0x0131 +#define mmSDMA2_RLC0_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA2_RLC0_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_RPTR 0x0133 +#define mmSDMA2_RLC0_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA2_RLC0_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_WPTR 0x0135 +#define mmSDMA2_RLC0_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA2_RLC0_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA2_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA2_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA2_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC0_IB_CNTL 0x013a +#define mmSDMA2_RLC0_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC0_IB_RPTR 0x013b +#define mmSDMA2_RLC0_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC0_IB_OFFSET 0x013c +#define mmSDMA2_RLC0_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC0_IB_BASE_LO 0x013d +#define mmSDMA2_RLC0_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC0_IB_BASE_HI 0x013e +#define mmSDMA2_RLC0_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC0_IB_SIZE 0x013f +#define mmSDMA2_RLC0_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA2_RLC0_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA2_RLC0_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC0_DOORBELL 0x0142 +#define mmSDMA2_RLC0_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC0_STATUS 0x0158 +#define mmSDMA2_RLC0_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA2_RLC0_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC0_WATERMARK 0x015a +#define mmSDMA2_RLC0_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA2_RLC0_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA2_RLC0_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA2_RLC0_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA2_RLC0_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC0_PREEMPT 0x0160 +#define mmSDMA2_RLC0_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC0_DUMMY_REG 0x0161 +#define mmSDMA2_RLC0_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA2_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA2_RLC0_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA2_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA2_RLC0_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA2_RLC0_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA2_RLC0_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA2_RLC0_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA2_RLC0_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA2_RLC0_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA2_RLC0_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA2_RLC0_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA2_RLC0_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA2_RLC0_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_CNTL 0x0188 +#define mmSDMA2_RLC1_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_BASE 0x0189 +#define mmSDMA2_RLC1_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_BASE_HI 0x018a +#define mmSDMA2_RLC1_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_RPTR 0x018b +#define mmSDMA2_RLC1_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA2_RLC1_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_WPTR 0x018d +#define mmSDMA2_RLC1_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA2_RLC1_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA2_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA2_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA2_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC1_IB_CNTL 0x0192 +#define mmSDMA2_RLC1_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC1_IB_RPTR 0x0193 +#define mmSDMA2_RLC1_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC1_IB_OFFSET 0x0194 +#define mmSDMA2_RLC1_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA2_RLC1_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA2_RLC1_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC1_IB_SIZE 0x0197 +#define mmSDMA2_RLC1_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA2_RLC1_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA2_RLC1_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC1_DOORBELL 0x019a +#define mmSDMA2_RLC1_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC1_STATUS 0x01b0 +#define mmSDMA2_RLC1_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA2_RLC1_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC1_WATERMARK 0x01b2 +#define mmSDMA2_RLC1_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA2_RLC1_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA2_RLC1_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA2_RLC1_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA2_RLC1_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC1_PREEMPT 0x01b8 +#define mmSDMA2_RLC1_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA2_RLC1_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA2_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA2_RLC1_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA2_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA2_RLC1_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA2_RLC1_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA2_RLC1_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA2_RLC1_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA2_RLC1_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA2_RLC1_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA2_RLC1_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA2_RLC1_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA2_RLC1_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA2_RLC1_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_CNTL 0x01e0 +#define mmSDMA2_RLC2_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_BASE 0x01e1 +#define mmSDMA2_RLC2_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA2_RLC2_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_RPTR 0x01e3 +#define mmSDMA2_RLC2_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA2_RLC2_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_WPTR 0x01e5 +#define mmSDMA2_RLC2_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA2_RLC2_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA2_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA2_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA2_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC2_IB_CNTL 0x01ea +#define mmSDMA2_RLC2_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC2_IB_RPTR 0x01eb +#define mmSDMA2_RLC2_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC2_IB_OFFSET 0x01ec +#define mmSDMA2_RLC2_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA2_RLC2_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA2_RLC2_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC2_IB_SIZE 0x01ef +#define mmSDMA2_RLC2_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA2_RLC2_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA2_RLC2_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC2_DOORBELL 0x01f2 +#define mmSDMA2_RLC2_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC2_STATUS 0x0208 +#define mmSDMA2_RLC2_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA2_RLC2_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC2_WATERMARK 0x020a +#define mmSDMA2_RLC2_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA2_RLC2_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA2_RLC2_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA2_RLC2_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA2_RLC2_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC2_PREEMPT 0x0210 +#define mmSDMA2_RLC2_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC2_DUMMY_REG 0x0211 +#define mmSDMA2_RLC2_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA2_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA2_RLC2_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA2_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA2_RLC2_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA2_RLC2_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA2_RLC2_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA2_RLC2_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA2_RLC2_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA2_RLC2_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA2_RLC2_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA2_RLC2_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA2_RLC2_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA2_RLC2_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_CNTL 0x0238 +#define mmSDMA2_RLC3_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_BASE 0x0239 +#define mmSDMA2_RLC3_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_BASE_HI 0x023a +#define mmSDMA2_RLC3_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_RPTR 0x023b +#define mmSDMA2_RLC3_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA2_RLC3_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_WPTR 0x023d +#define mmSDMA2_RLC3_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA2_RLC3_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA2_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA2_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA2_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC3_IB_CNTL 0x0242 +#define mmSDMA2_RLC3_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC3_IB_RPTR 0x0243 +#define mmSDMA2_RLC3_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC3_IB_OFFSET 0x0244 +#define mmSDMA2_RLC3_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA2_RLC3_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA2_RLC3_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC3_IB_SIZE 0x0247 +#define mmSDMA2_RLC3_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA2_RLC3_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA2_RLC3_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC3_DOORBELL 0x024a +#define mmSDMA2_RLC3_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC3_STATUS 0x0260 +#define mmSDMA2_RLC3_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA2_RLC3_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC3_WATERMARK 0x0262 +#define mmSDMA2_RLC3_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA2_RLC3_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA2_RLC3_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA2_RLC3_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA2_RLC3_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC3_PREEMPT 0x0268 +#define mmSDMA2_RLC3_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC3_DUMMY_REG 0x0269 +#define mmSDMA2_RLC3_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA2_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA2_RLC3_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA2_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA2_RLC3_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA2_RLC3_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA2_RLC3_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA2_RLC3_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA2_RLC3_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA2_RLC3_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA2_RLC3_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA2_RLC3_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA2_RLC3_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA2_RLC3_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_CNTL 0x0290 +#define mmSDMA2_RLC4_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_BASE 0x0291 +#define mmSDMA2_RLC4_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA2_RLC4_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_RPTR 0x0293 +#define mmSDMA2_RLC4_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA2_RLC4_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_WPTR 0x0295 +#define mmSDMA2_RLC4_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA2_RLC4_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA2_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA2_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA2_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC4_IB_CNTL 0x029a +#define mmSDMA2_RLC4_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC4_IB_RPTR 0x029b +#define mmSDMA2_RLC4_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC4_IB_OFFSET 0x029c +#define mmSDMA2_RLC4_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC4_IB_BASE_LO 0x029d +#define mmSDMA2_RLC4_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC4_IB_BASE_HI 0x029e +#define mmSDMA2_RLC4_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC4_IB_SIZE 0x029f +#define mmSDMA2_RLC4_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA2_RLC4_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA2_RLC4_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC4_DOORBELL 0x02a2 +#define mmSDMA2_RLC4_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC4_STATUS 0x02b8 +#define mmSDMA2_RLC4_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA2_RLC4_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC4_WATERMARK 0x02ba +#define mmSDMA2_RLC4_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA2_RLC4_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA2_RLC4_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA2_RLC4_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA2_RLC4_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC4_PREEMPT 0x02c0 +#define mmSDMA2_RLC4_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA2_RLC4_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA2_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA2_RLC4_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA2_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA2_RLC4_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA2_RLC4_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA2_RLC4_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA2_RLC4_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA2_RLC4_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA2_RLC4_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA2_RLC4_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA2_RLC4_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA2_RLC4_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA2_RLC4_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_CNTL 0x02e8 +#define mmSDMA2_RLC5_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_BASE 0x02e9 +#define mmSDMA2_RLC5_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA2_RLC5_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_RPTR 0x02eb +#define mmSDMA2_RLC5_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA2_RLC5_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_WPTR 0x02ed +#define mmSDMA2_RLC5_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA2_RLC5_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA2_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA2_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA2_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC5_IB_CNTL 0x02f2 +#define mmSDMA2_RLC5_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC5_IB_RPTR 0x02f3 +#define mmSDMA2_RLC5_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA2_RLC5_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA2_RLC5_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA2_RLC5_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC5_IB_SIZE 0x02f7 +#define mmSDMA2_RLC5_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA2_RLC5_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA2_RLC5_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC5_DOORBELL 0x02fa +#define mmSDMA2_RLC5_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC5_STATUS 0x0310 +#define mmSDMA2_RLC5_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA2_RLC5_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC5_WATERMARK 0x0312 +#define mmSDMA2_RLC5_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA2_RLC5_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA2_RLC5_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA2_RLC5_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA2_RLC5_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC5_PREEMPT 0x0318 +#define mmSDMA2_RLC5_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC5_DUMMY_REG 0x0319 +#define mmSDMA2_RLC5_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA2_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA2_RLC5_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA2_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA2_RLC5_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA2_RLC5_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA2_RLC5_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA2_RLC5_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA2_RLC5_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA2_RLC5_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA2_RLC5_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA2_RLC5_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA2_RLC5_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA2_RLC5_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_CNTL 0x0340 +#define mmSDMA2_RLC6_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_BASE 0x0341 +#define mmSDMA2_RLC6_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA2_RLC6_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_RPTR 0x0343 +#define mmSDMA2_RLC6_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA2_RLC6_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_WPTR 0x0345 +#define mmSDMA2_RLC6_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA2_RLC6_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA2_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA2_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA2_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC6_IB_CNTL 0x034a +#define mmSDMA2_RLC6_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC6_IB_RPTR 0x034b +#define mmSDMA2_RLC6_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC6_IB_OFFSET 0x034c +#define mmSDMA2_RLC6_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC6_IB_BASE_LO 0x034d +#define mmSDMA2_RLC6_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC6_IB_BASE_HI 0x034e +#define mmSDMA2_RLC6_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC6_IB_SIZE 0x034f +#define mmSDMA2_RLC6_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA2_RLC6_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA2_RLC6_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC6_DOORBELL 0x0352 +#define mmSDMA2_RLC6_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC6_STATUS 0x0368 +#define mmSDMA2_RLC6_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA2_RLC6_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC6_WATERMARK 0x036a +#define mmSDMA2_RLC6_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA2_RLC6_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA2_RLC6_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA2_RLC6_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA2_RLC6_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC6_PREEMPT 0x0370 +#define mmSDMA2_RLC6_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC6_DUMMY_REG 0x0371 +#define mmSDMA2_RLC6_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA2_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA2_RLC6_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA2_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA2_RLC6_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA2_RLC6_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA2_RLC6_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA2_RLC6_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA2_RLC6_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA2_RLC6_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA2_RLC6_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA2_RLC6_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA2_RLC6_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA2_RLC6_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_CNTL 0x0398 +#define mmSDMA2_RLC7_RB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_BASE 0x0399 +#define mmSDMA2_RLC7_RB_BASE_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_BASE_HI 0x039a +#define mmSDMA2_RLC7_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_RPTR 0x039b +#define mmSDMA2_RLC7_RB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA2_RLC7_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_WPTR 0x039d +#define mmSDMA2_RLC7_RB_WPTR_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA2_RLC7_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA2_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA2_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA2_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC7_IB_CNTL 0x03a2 +#define mmSDMA2_RLC7_IB_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC7_IB_RPTR 0x03a3 +#define mmSDMA2_RLC7_IB_RPTR_BASE_IDX 1 +#define mmSDMA2_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA2_RLC7_IB_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA2_RLC7_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA2_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA2_RLC7_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA2_RLC7_IB_SIZE 0x03a7 +#define mmSDMA2_RLC7_IB_SIZE_BASE_IDX 1 +#define mmSDMA2_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA2_RLC7_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA2_RLC7_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC7_DOORBELL 0x03aa +#define mmSDMA2_RLC7_DOORBELL_BASE_IDX 1 +#define mmSDMA2_RLC7_STATUS 0x03c0 +#define mmSDMA2_RLC7_STATUS_BASE_IDX 1 +#define mmSDMA2_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA2_RLC7_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA2_RLC7_WATERMARK 0x03c2 +#define mmSDMA2_RLC7_WATERMARK_BASE_IDX 1 +#define mmSDMA2_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA2_RLC7_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA2_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA2_RLC7_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA2_RLC7_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA2_RLC7_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA2_RLC7_PREEMPT 0x03c8 +#define mmSDMA2_RLC7_PREEMPT_BASE_IDX 1 +#define mmSDMA2_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA2_RLC7_DUMMY_REG_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA2_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA2_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA2_RLC7_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA2_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA2_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA2_RLC7_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA2_RLC7_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA2_RLC7_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA2_RLC7_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA2_RLC7_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA2_RLC7_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA2_RLC7_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA2_RLC7_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA2_RLC7_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA2_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA2_RLC7_MIDCMD_CNTL_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h new file mode 100644 index 000000000000..be10d5d3347e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h @@ -0,0 +1,2956 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma2_4_2_2_SH_MASK_HEADER +#define _sdma2_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma2_sdma2dec +//SDMA2_UCODE_ADDR +#define SDMA2_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA2_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA2_UCODE_DATA +#define SDMA2_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA2_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA2_VM_CNTL +#define SDMA2_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA2_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA2_VM_CTX_LO +#define SDMA2_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA2_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_VM_CTX_HI +#define SDMA2_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA2_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_ACTIVE_FCN_ID +#define SDMA2_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA2_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA2_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA2_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA2_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA2_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA2_VM_CTX_CNTL +#define SDMA2_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA2_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA2_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA2_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA2_VIRT_RESET_REQ +#define SDMA2_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA2_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA2_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA2_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA2_VF_ENABLE +#define SDMA2_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA2_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA2_CONTEXT_REG_TYPE0 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE__SHIFT 0x1 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_CNTL__SHIFT 0xa +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_RPTR__SHIFT 0xb +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_SIZE__SHIFT 0xf +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_DOORBELL__SHIFT 0x12 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE_MASK 0x00000002L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_DOORBELL_MASK 0x00040000L +#define SDMA2_CONTEXT_REG_TYPE0__SDMA2_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA2_CONTEXT_REG_TYPE1 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_STATUS__SHIFT 0x8 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_WATERMARK__SHIFT 0xa +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA2_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_PREEMPT__SHIFT 0x10 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA2_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_STATUS_MASK 0x00000100L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_WATERMARK_MASK 0x00000400L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA2_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_PREEMPT_MASK 0x00010000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA2_CONTEXT_REG_TYPE1__SDMA2_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA2_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA2_CONTEXT_REG_TYPE2 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA2_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA2_CONTEXT_REG_TYPE2__SDMA2_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA2_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA2_CONTEXT_REG_TYPE3 +#define SDMA2_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA2_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA2_PUB_REG_TYPE0 +#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_ADDR__SHIFT 0x0 +#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_DATA__SHIFT 0x1 +#define SDMA2_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CNTL__SHIFT 0x4 +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_LO__SHIFT 0x5 +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_HI__SHIFT 0x6 +#define SDMA2_PUB_REG_TYPE0__SDMA2_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA2_PUB_REG_TYPE0__SDMA2_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA2_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA2_PUB_REG_TYPE0__SDMA2_MMHUB_CNTL__SHIFT 0x13 +#define SDMA2_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA2_PUB_REG_TYPE0__SDMA2_POWER_CNTL__SHIFT 0x1a +#define SDMA2_PUB_REG_TYPE0__SDMA2_CLK_CTRL__SHIFT 0x1b +#define SDMA2_PUB_REG_TYPE0__SDMA2_CNTL__SHIFT 0x1c +#define SDMA2_PUB_REG_TYPE0__SDMA2_CHICKEN_BITS__SHIFT 0x1d +#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_ADDR_MASK 0x00000001L +#define SDMA2_PUB_REG_TYPE0__SDMA2_UCODE_DATA_MASK 0x00000002L +#define SDMA2_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CNTL_MASK 0x00000010L +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_LO_MASK 0x00000020L +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_HI_MASK 0x00000040L +#define SDMA2_PUB_REG_TYPE0__SDMA2_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA2_PUB_REG_TYPE0__SDMA2_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA2_PUB_REG_TYPE0__SDMA2_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA2_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_MMHUB_CNTL_MASK 0x00080000L +#define SDMA2_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_POWER_CNTL_MASK 0x04000000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CLK_CTRL_MASK 0x08000000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CNTL_MASK 0x10000000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_CHICKEN_BITS_MASK 0x20000000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA2_PUB_REG_TYPE0__SDMA2_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA2_PUB_REG_TYPE1 +#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA2_PUB_REG_TYPE1__SDMA2_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA2_PUB_REG_TYPE1__SDMA2_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA2_PUB_REG_TYPE1__SDMA2_PROGRAM__SHIFT 0x4 +#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS_REG__SHIFT 0x5 +#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS1_REG__SHIFT 0x6 +#define SDMA2_PUB_REG_TYPE1__SDMA2_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA2_PUB_REG_TYPE1__SDMA2_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA2_PUB_REG_TYPE1__SDMA2_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA2_PUB_REG_TYPE1__SDMA2_F32_CNTL__SHIFT 0xa +#define SDMA2_PUB_REG_TYPE1__SDMA2_FREEZE__SHIFT 0xb +#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA2_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_CONFIG__SHIFT 0x12 +#define SDMA2_PUB_REG_TYPE1__SDMA2_BA_THRESHOLD__SHIFT 0x13 +#define SDMA2_PUB_REG_TYPE1__SDMA2_ID__SHIFT 0x14 +#define SDMA2_PUB_REG_TYPE1__SDMA2_VERSION__SHIFT 0x15 +#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER__SHIFT 0x16 +#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS2_REG__SHIFT 0x18 +#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_CNTL__SHIFT 0x1c +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA2_PUB_REG_TYPE1__SDMA2_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA2_PUB_REG_TYPE1__SDMA2_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA2_PUB_REG_TYPE1__SDMA2_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA2_PUB_REG_TYPE1__SDMA2_PROGRAM_MASK 0x00000010L +#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS_REG_MASK 0x00000020L +#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS1_REG_MASK 0x00000040L +#define SDMA2_PUB_REG_TYPE1__SDMA2_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA2_PUB_REG_TYPE1__SDMA2_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA2_PUB_REG_TYPE1__SDMA2_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA2_PUB_REG_TYPE1__SDMA2_F32_CNTL_MASK 0x00000400L +#define SDMA2_PUB_REG_TYPE1__SDMA2_FREEZE_MASK 0x00000800L +#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA2_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA2_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_CONFIG_MASK 0x00040000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_BA_THRESHOLD_MASK 0x00080000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_ID_MASK 0x00100000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_VERSION_MASK 0x00200000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER_MASK 0x00400000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_STATUS2_REG_MASK 0x01000000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_CNTL_MASK 0x10000000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA2_PUB_REG_TYPE1__SDMA2_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA2_PUB_REG_TYPE2 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV0__SHIFT 0x0 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV1__SHIFT 0x1 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV2__SHIFT 0x2 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_PAGE__SHIFT 0x8 +#define SDMA2_PUB_REG_TYPE2__SDMA2_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA2_PUB_REG_TYPE2__SDMA2_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA2_PUB_REG_TYPE2__SDMA2_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA2_PUB_REG_TYPE2__SDMA2_STATUS3_REG__SHIFT 0xc +#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA2_PUB_REG_TYPE2__SDMA2_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA2_PUB_REG_TYPE2__SDMA2_ERROR_LOG__SHIFT 0x10 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA2_PUB_REG_TYPE2__SDMA2_F32_COUNTER__SHIFT 0x15 +#define SDMA2_PUB_REG_TYPE2__SDMA2_UNBREAKABLE__SHIFT 0x16 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFMON_CNTL__SHIFT 0x17 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA2_PUB_REG_TYPE2__SDMA2_CRD_CNTL__SHIFT 0x1b +#define SDMA2_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA2_PUB_REG_TYPE2__SDMA2_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA2_PUB_REG_TYPE2__SDMA2_ULV_CNTL__SHIFT 0x1e +#define SDMA2_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV0_MASK 0x00000001L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV1_MASK 0x00000002L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_INV2_MASK 0x00000004L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UTCL1_PAGE_MASK 0x00000100L +#define SDMA2_PUB_REG_TYPE2__SDMA2_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA2_PUB_REG_TYPE2__SDMA2_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA2_PUB_REG_TYPE2__SDMA2_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA2_PUB_REG_TYPE2__SDMA2_STATUS3_REG_MASK 0x00001000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_ERROR_LOG_MASK 0x00010000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_F32_COUNTER_MASK 0x00200000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_UNBREAKABLE_MASK 0x00400000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFMON_CNTL_MASK 0x00800000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_CRD_CNTL_MASK 0x08000000L +#define SDMA2_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA2_PUB_REG_TYPE2__SDMA2_ULV_CNTL_MASK 0x40000000L +#define SDMA2_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA2_PUB_REG_TYPE3 +#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA2_PUB_REG_TYPE3__SDMA2_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA2_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA2_PUB_REG_TYPE3__SDMA2_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA2_PUB_REG_TYPE3__SDMA2_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA2_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA2_MMHUB_CNTL +#define SDMA2_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA2_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA2_CONTEXT_GROUP_BOUNDARY +#define SDMA2_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA2_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA2_POWER_CNTL +#define SDMA2_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA2_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA2_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA2_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA2_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA2_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA2_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA2_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA2_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA2_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA2_CLK_CTRL +#define SDMA2_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA2_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA2_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA2_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA2_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA2_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA2_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA2_CNTL +#define SDMA2_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA2_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA2_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA2_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA2_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA2_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA2_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA2_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA2_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA2_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA2_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA2_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA2_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA2_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA2_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA2_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA2_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA2_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA2_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA2_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA2_CHICKEN_BITS +#define SDMA2_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA2_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA2_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA2_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA2_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA2_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA2_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA2_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA2_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA2_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA2_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA2_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA2_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA2_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA2_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA2_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA2_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA2_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA2_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA2_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA2_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA2_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA2_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA2_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA2_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA2_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA2_GB_ADDR_CONFIG +#define SDMA2_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA2_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA2_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA2_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA2_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA2_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA2_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA2_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA2_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA2_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA2_GB_ADDR_CONFIG_READ +#define SDMA2_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA2_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA2_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA2_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA2_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA2_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA2_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA2_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA2_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA2_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA2_RB_RPTR_FETCH_HI +#define SDMA2_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA2_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA2_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA2_RB_RPTR_FETCH +#define SDMA2_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA2_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA2_IB_OFFSET_FETCH +#define SDMA2_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA2_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA2_PROGRAM +#define SDMA2_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA2_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA2_STATUS_REG +#define SDMA2_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA2_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA2_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA2_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA2_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA2_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA2_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA2_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA2_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA2_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA2_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA2_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA2_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA2_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA2_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA2_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA2_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA2_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA2_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA2_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA2_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA2_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA2_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA2_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA2_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA2_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA2_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA2_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA2_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA2_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA2_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA2_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA2_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA2_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA2_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA2_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA2_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA2_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA2_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA2_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA2_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA2_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA2_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA2_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA2_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA2_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA2_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA2_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA2_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA2_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA2_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA2_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA2_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA2_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA2_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA2_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA2_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA2_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA2_STATUS1_REG +#define SDMA2_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA2_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA2_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA2_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA2_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA2_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA2_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA2_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA2_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA2_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA2_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA2_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA2_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA2_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA2_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA2_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA2_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA2_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA2_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA2_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA2_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA2_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA2_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA2_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA2_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA2_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA2_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA2_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA2_RD_BURST_CNTL +#define SDMA2_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA2_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA2_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA2_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA2_HBM_PAGE_CONFIG +#define SDMA2_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA2_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA2_UCODE_CHECKSUM +#define SDMA2_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA2_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA2_F32_CNTL +#define SDMA2_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA2_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA2_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA2_F32_CNTL__STEP_MASK 0x00000002L +//SDMA2_FREEZE +#define SDMA2_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA2_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA2_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA2_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA2_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA2_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA2_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA2_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA2_PHASE0_QUANTUM +#define SDMA2_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA2_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA2_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA2_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA2_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA2_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA2_PHASE1_QUANTUM +#define SDMA2_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA2_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA2_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA2_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA2_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA2_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA2_EDC_CONFIG +#define SDMA2_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA2_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA2_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA2_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA2_BA_THRESHOLD +#define SDMA2_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA2_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA2_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA2_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA2_ID +#define SDMA2_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA2_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA2_VERSION +#define SDMA2_VERSION__MINVER__SHIFT 0x0 +#define SDMA2_VERSION__MAJVER__SHIFT 0x8 +#define SDMA2_VERSION__REV__SHIFT 0x10 +#define SDMA2_VERSION__MINVER_MASK 0x0000007FL +#define SDMA2_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA2_VERSION__REV_MASK 0x003F0000L +//SDMA2_EDC_COUNTER +#define SDMA2_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA2_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA2_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA2_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA2_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA2_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA2_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA2_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA2_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA2_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA2_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA2_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA2_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA2_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA2_EDC_COUNTER_CLEAR +#define SDMA2_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA2_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA2_STATUS2_REG +#define SDMA2_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA2_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA2_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA2_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA2_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA2_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA2_ATOMIC_CNTL +#define SDMA2_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA2_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA2_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA2_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA2_ATOMIC_PREOP_LO +#define SDMA2_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA2_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA2_ATOMIC_PREOP_HI +#define SDMA2_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA2_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA2_UTCL1_CNTL +#define SDMA2_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA2_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA2_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA2_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA2_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA2_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA2_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA2_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA2_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA2_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA2_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA2_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA2_UTCL1_WATERMK +#define SDMA2_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA2_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA2_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA2_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA2_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA2_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA2_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA2_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA2_UTCL1_RD_STATUS +#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA2_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA2_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA2_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA2_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA2_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA2_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA2_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA2_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA2_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA2_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA2_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA2_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA2_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA2_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA2_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA2_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA2_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA2_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA2_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA2_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA2_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA2_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA2_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA2_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA2_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA2_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA2_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA2_UTCL1_WR_STATUS +#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA2_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA2_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA2_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA2_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA2_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA2_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA2_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA2_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA2_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA2_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA2_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA2_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA2_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA2_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA2_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA2_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA2_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA2_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA2_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA2_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA2_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA2_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA2_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA2_UTCL1_INV0 +#define SDMA2_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA2_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA2_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA2_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA2_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA2_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA2_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA2_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA2_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA2_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA2_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA2_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA2_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA2_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA2_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA2_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA2_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA2_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA2_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA2_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA2_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA2_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA2_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA2_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA2_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA2_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA2_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA2_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA2_UTCL1_INV1 +#define SDMA2_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA2_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA2_UTCL1_INV2 +#define SDMA2_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA2_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA2_UTCL1_RD_XNACK0 +#define SDMA2_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA2_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA2_UTCL1_RD_XNACK1 +#define SDMA2_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA2_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA2_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA2_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA2_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA2_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA2_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA2_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA2_UTCL1_WR_XNACK0 +#define SDMA2_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA2_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA2_UTCL1_WR_XNACK1 +#define SDMA2_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA2_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA2_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA2_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA2_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA2_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA2_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA2_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA2_UTCL1_TIMEOUT +#define SDMA2_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA2_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA2_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA2_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA2_UTCL1_PAGE +#define SDMA2_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA2_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA2_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA2_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA2_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA2_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA2_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA2_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA2_POWER_CNTL_IDLE +#define SDMA2_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA2_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA2_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA2_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA2_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA2_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA2_RELAX_ORDERING_LUT +#define SDMA2_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA2_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA2_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA2_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA2_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA2_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA2_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA2_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA2_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA2_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA2_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA2_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA2_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA2_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA2_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA2_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA2_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA2_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA2_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA2_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA2_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA2_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA2_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA2_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA2_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA2_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA2_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA2_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA2_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA2_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA2_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA2_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA2_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA2_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA2_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA2_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA2_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA2_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA2_CHICKEN_BITS_2 +#define SDMA2_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA2_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA2_STATUS3_REG +#define SDMA2_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA2_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA2_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA2_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA2_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA2_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA2_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA2_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA2_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA2_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA2_PHYSICAL_ADDR_LO +#define SDMA2_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA2_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA2_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA2_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA2_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA2_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA2_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA2_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA2_PHYSICAL_ADDR_HI +#define SDMA2_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA2_PHASE2_QUANTUM +#define SDMA2_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA2_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA2_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA2_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA2_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA2_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA2_ERROR_LOG +#define SDMA2_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA2_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA2_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA2_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA2_PUB_DUMMY_REG0 +#define SDMA2_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA2_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA2_PUB_DUMMY_REG1 +#define SDMA2_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA2_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA2_PUB_DUMMY_REG2 +#define SDMA2_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA2_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA2_PUB_DUMMY_REG3 +#define SDMA2_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA2_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA2_F32_COUNTER +#define SDMA2_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA2_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA2_UNBREAKABLE +#define SDMA2_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA2_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA2_PERFMON_CNTL +#define SDMA2_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA2_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA2_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA2_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA2_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA2_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA2_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA2_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA2_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA2_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA2_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA2_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA2_PERFCOUNTER0_RESULT +#define SDMA2_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA2_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA2_PERFCOUNTER1_RESULT +#define SDMA2_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA2_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA2_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA2_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA2_CRD_CNTL +#define SDMA2_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA2_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA2_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA2_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA2_GPU_IOV_VIOLATION_LOG +#define SDMA2_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA2_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA2_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA2_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA2_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA2_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA2_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA2_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA2_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA2_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA2_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA2_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA2_ULV_CNTL +#define SDMA2_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA2_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA2_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA2_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA2_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA2_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA2_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA2_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA2_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA2_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA2_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA2_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA2_EA_DBIT_ADDR_DATA +#define SDMA2_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA2_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA2_EA_DBIT_ADDR_INDEX +#define SDMA2_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA2_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA2_GPU_IOV_VIOLATION_LOG2 +#define SDMA2_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA2_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA2_GFX_RB_CNTL +#define SDMA2_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_GFX_RB_BASE +#define SDMA2_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_BASE_HI +#define SDMA2_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_GFX_RB_RPTR +#define SDMA2_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_RPTR_HI +#define SDMA2_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_WPTR +#define SDMA2_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_WPTR_HI +#define SDMA2_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_WPTR_POLL_CNTL +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_GFX_RB_RPTR_ADDR_HI +#define SDMA2_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_RPTR_ADDR_LO +#define SDMA2_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_GFX_IB_CNTL +#define SDMA2_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_GFX_IB_RPTR +#define SDMA2_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_GFX_IB_OFFSET +#define SDMA2_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_GFX_IB_BASE_LO +#define SDMA2_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_GFX_IB_BASE_HI +#define SDMA2_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_GFX_IB_SIZE +#define SDMA2_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_GFX_SKIP_CNTL +#define SDMA2_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_GFX_CONTEXT_STATUS +#define SDMA2_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_GFX_DOORBELL +#define SDMA2_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_GFX_CONTEXT_CNTL +#define SDMA2_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA2_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA2_GFX_STATUS +#define SDMA2_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_GFX_DOORBELL_LOG +#define SDMA2_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_GFX_WATERMARK +#define SDMA2_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_GFX_DOORBELL_OFFSET +#define SDMA2_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_GFX_CSA_ADDR_LO +#define SDMA2_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_GFX_CSA_ADDR_HI +#define SDMA2_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_GFX_IB_SUB_REMAIN +#define SDMA2_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_GFX_PREEMPT +#define SDMA2_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_GFX_DUMMY_REG +#define SDMA2_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA2_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA2_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_GFX_RB_AQL_CNTL +#define SDMA2_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_GFX_MINOR_PTR_UPDATE +#define SDMA2_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_GFX_MIDCMD_DATA0 +#define SDMA2_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA1 +#define SDMA2_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA2 +#define SDMA2_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA3 +#define SDMA2_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA4 +#define SDMA2_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA5 +#define SDMA2_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA6 +#define SDMA2_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA7 +#define SDMA2_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_DATA8 +#define SDMA2_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_GFX_MIDCMD_CNTL +#define SDMA2_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_PAGE_RB_CNTL +#define SDMA2_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_PAGE_RB_BASE +#define SDMA2_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_BASE_HI +#define SDMA2_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_PAGE_RB_RPTR +#define SDMA2_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_RPTR_HI +#define SDMA2_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_WPTR +#define SDMA2_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_WPTR_HI +#define SDMA2_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_WPTR_POLL_CNTL +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_PAGE_RB_RPTR_ADDR_HI +#define SDMA2_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_RPTR_ADDR_LO +#define SDMA2_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_PAGE_IB_CNTL +#define SDMA2_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_PAGE_IB_RPTR +#define SDMA2_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_PAGE_IB_OFFSET +#define SDMA2_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_PAGE_IB_BASE_LO +#define SDMA2_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_PAGE_IB_BASE_HI +#define SDMA2_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_PAGE_IB_SIZE +#define SDMA2_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_PAGE_SKIP_CNTL +#define SDMA2_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_PAGE_CONTEXT_STATUS +#define SDMA2_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_PAGE_DOORBELL +#define SDMA2_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_PAGE_STATUS +#define SDMA2_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_PAGE_DOORBELL_LOG +#define SDMA2_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_PAGE_WATERMARK +#define SDMA2_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_PAGE_DOORBELL_OFFSET +#define SDMA2_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_PAGE_CSA_ADDR_LO +#define SDMA2_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_PAGE_CSA_ADDR_HI +#define SDMA2_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_PAGE_IB_SUB_REMAIN +#define SDMA2_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_PAGE_PREEMPT +#define SDMA2_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_PAGE_DUMMY_REG +#define SDMA2_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_PAGE_RB_AQL_CNTL +#define SDMA2_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_PAGE_MINOR_PTR_UPDATE +#define SDMA2_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_PAGE_MIDCMD_DATA0 +#define SDMA2_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA1 +#define SDMA2_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA2 +#define SDMA2_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA3 +#define SDMA2_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA4 +#define SDMA2_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA5 +#define SDMA2_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA6 +#define SDMA2_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA7 +#define SDMA2_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_DATA8 +#define SDMA2_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_PAGE_MIDCMD_CNTL +#define SDMA2_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC0_RB_CNTL +#define SDMA2_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC0_RB_BASE +#define SDMA2_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_BASE_HI +#define SDMA2_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC0_RB_RPTR +#define SDMA2_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_RPTR_HI +#define SDMA2_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_WPTR +#define SDMA2_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_WPTR_HI +#define SDMA2_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_WPTR_POLL_CNTL +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC0_RB_RPTR_ADDR_HI +#define SDMA2_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_RPTR_ADDR_LO +#define SDMA2_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC0_IB_CNTL +#define SDMA2_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC0_IB_RPTR +#define SDMA2_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC0_IB_OFFSET +#define SDMA2_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC0_IB_BASE_LO +#define SDMA2_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC0_IB_BASE_HI +#define SDMA2_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC0_IB_SIZE +#define SDMA2_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC0_SKIP_CNTL +#define SDMA2_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC0_CONTEXT_STATUS +#define SDMA2_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC0_DOORBELL +#define SDMA2_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC0_STATUS +#define SDMA2_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC0_DOORBELL_LOG +#define SDMA2_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC0_WATERMARK +#define SDMA2_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC0_DOORBELL_OFFSET +#define SDMA2_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC0_CSA_ADDR_LO +#define SDMA2_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC0_CSA_ADDR_HI +#define SDMA2_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC0_IB_SUB_REMAIN +#define SDMA2_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC0_PREEMPT +#define SDMA2_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC0_DUMMY_REG +#define SDMA2_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC0_RB_AQL_CNTL +#define SDMA2_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC0_MINOR_PTR_UPDATE +#define SDMA2_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC0_MIDCMD_DATA0 +#define SDMA2_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA1 +#define SDMA2_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA2 +#define SDMA2_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA3 +#define SDMA2_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA4 +#define SDMA2_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA5 +#define SDMA2_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA6 +#define SDMA2_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA7 +#define SDMA2_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_DATA8 +#define SDMA2_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC0_MIDCMD_CNTL +#define SDMA2_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC1_RB_CNTL +#define SDMA2_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC1_RB_BASE +#define SDMA2_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_BASE_HI +#define SDMA2_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC1_RB_RPTR +#define SDMA2_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_RPTR_HI +#define SDMA2_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_WPTR +#define SDMA2_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_WPTR_HI +#define SDMA2_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_WPTR_POLL_CNTL +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC1_RB_RPTR_ADDR_HI +#define SDMA2_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_RPTR_ADDR_LO +#define SDMA2_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC1_IB_CNTL +#define SDMA2_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC1_IB_RPTR +#define SDMA2_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC1_IB_OFFSET +#define SDMA2_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC1_IB_BASE_LO +#define SDMA2_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC1_IB_BASE_HI +#define SDMA2_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC1_IB_SIZE +#define SDMA2_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC1_SKIP_CNTL +#define SDMA2_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC1_CONTEXT_STATUS +#define SDMA2_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC1_DOORBELL +#define SDMA2_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC1_STATUS +#define SDMA2_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC1_DOORBELL_LOG +#define SDMA2_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC1_WATERMARK +#define SDMA2_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC1_DOORBELL_OFFSET +#define SDMA2_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC1_CSA_ADDR_LO +#define SDMA2_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC1_CSA_ADDR_HI +#define SDMA2_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC1_IB_SUB_REMAIN +#define SDMA2_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC1_PREEMPT +#define SDMA2_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC1_DUMMY_REG +#define SDMA2_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC1_RB_AQL_CNTL +#define SDMA2_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC1_MINOR_PTR_UPDATE +#define SDMA2_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC1_MIDCMD_DATA0 +#define SDMA2_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA1 +#define SDMA2_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA2 +#define SDMA2_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA3 +#define SDMA2_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA4 +#define SDMA2_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA5 +#define SDMA2_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA6 +#define SDMA2_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA7 +#define SDMA2_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_DATA8 +#define SDMA2_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC1_MIDCMD_CNTL +#define SDMA2_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC2_RB_CNTL +#define SDMA2_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC2_RB_BASE +#define SDMA2_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_BASE_HI +#define SDMA2_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC2_RB_RPTR +#define SDMA2_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_RPTR_HI +#define SDMA2_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_WPTR +#define SDMA2_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_WPTR_HI +#define SDMA2_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_WPTR_POLL_CNTL +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC2_RB_RPTR_ADDR_HI +#define SDMA2_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_RPTR_ADDR_LO +#define SDMA2_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC2_IB_CNTL +#define SDMA2_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC2_IB_RPTR +#define SDMA2_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC2_IB_OFFSET +#define SDMA2_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC2_IB_BASE_LO +#define SDMA2_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC2_IB_BASE_HI +#define SDMA2_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC2_IB_SIZE +#define SDMA2_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC2_SKIP_CNTL +#define SDMA2_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC2_CONTEXT_STATUS +#define SDMA2_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC2_DOORBELL +#define SDMA2_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC2_STATUS +#define SDMA2_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC2_DOORBELL_LOG +#define SDMA2_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC2_WATERMARK +#define SDMA2_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC2_DOORBELL_OFFSET +#define SDMA2_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC2_CSA_ADDR_LO +#define SDMA2_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC2_CSA_ADDR_HI +#define SDMA2_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC2_IB_SUB_REMAIN +#define SDMA2_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC2_PREEMPT +#define SDMA2_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC2_DUMMY_REG +#define SDMA2_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC2_RB_AQL_CNTL +#define SDMA2_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC2_MINOR_PTR_UPDATE +#define SDMA2_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC2_MIDCMD_DATA0 +#define SDMA2_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA1 +#define SDMA2_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA2 +#define SDMA2_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA3 +#define SDMA2_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA4 +#define SDMA2_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA5 +#define SDMA2_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA6 +#define SDMA2_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA7 +#define SDMA2_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_DATA8 +#define SDMA2_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC2_MIDCMD_CNTL +#define SDMA2_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC3_RB_CNTL +#define SDMA2_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC3_RB_BASE +#define SDMA2_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_BASE_HI +#define SDMA2_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC3_RB_RPTR +#define SDMA2_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_RPTR_HI +#define SDMA2_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_WPTR +#define SDMA2_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_WPTR_HI +#define SDMA2_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_WPTR_POLL_CNTL +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC3_RB_RPTR_ADDR_HI +#define SDMA2_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_RPTR_ADDR_LO +#define SDMA2_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC3_IB_CNTL +#define SDMA2_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC3_IB_RPTR +#define SDMA2_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC3_IB_OFFSET +#define SDMA2_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC3_IB_BASE_LO +#define SDMA2_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC3_IB_BASE_HI +#define SDMA2_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC3_IB_SIZE +#define SDMA2_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC3_SKIP_CNTL +#define SDMA2_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC3_CONTEXT_STATUS +#define SDMA2_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC3_DOORBELL +#define SDMA2_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC3_STATUS +#define SDMA2_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC3_DOORBELL_LOG +#define SDMA2_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC3_WATERMARK +#define SDMA2_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC3_DOORBELL_OFFSET +#define SDMA2_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC3_CSA_ADDR_LO +#define SDMA2_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC3_CSA_ADDR_HI +#define SDMA2_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC3_IB_SUB_REMAIN +#define SDMA2_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC3_PREEMPT +#define SDMA2_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC3_DUMMY_REG +#define SDMA2_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC3_RB_AQL_CNTL +#define SDMA2_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC3_MINOR_PTR_UPDATE +#define SDMA2_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC3_MIDCMD_DATA0 +#define SDMA2_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA1 +#define SDMA2_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA2 +#define SDMA2_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA3 +#define SDMA2_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA4 +#define SDMA2_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA5 +#define SDMA2_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA6 +#define SDMA2_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA7 +#define SDMA2_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_DATA8 +#define SDMA2_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC3_MIDCMD_CNTL +#define SDMA2_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC4_RB_CNTL +#define SDMA2_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC4_RB_BASE +#define SDMA2_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_BASE_HI +#define SDMA2_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC4_RB_RPTR +#define SDMA2_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_RPTR_HI +#define SDMA2_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_WPTR +#define SDMA2_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_WPTR_HI +#define SDMA2_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_WPTR_POLL_CNTL +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC4_RB_RPTR_ADDR_HI +#define SDMA2_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_RPTR_ADDR_LO +#define SDMA2_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC4_IB_CNTL +#define SDMA2_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC4_IB_RPTR +#define SDMA2_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC4_IB_OFFSET +#define SDMA2_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC4_IB_BASE_LO +#define SDMA2_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC4_IB_BASE_HI +#define SDMA2_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC4_IB_SIZE +#define SDMA2_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC4_SKIP_CNTL +#define SDMA2_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC4_CONTEXT_STATUS +#define SDMA2_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC4_DOORBELL +#define SDMA2_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC4_STATUS +#define SDMA2_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC4_DOORBELL_LOG +#define SDMA2_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC4_WATERMARK +#define SDMA2_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC4_DOORBELL_OFFSET +#define SDMA2_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC4_CSA_ADDR_LO +#define SDMA2_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC4_CSA_ADDR_HI +#define SDMA2_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC4_IB_SUB_REMAIN +#define SDMA2_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC4_PREEMPT +#define SDMA2_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC4_DUMMY_REG +#define SDMA2_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC4_RB_AQL_CNTL +#define SDMA2_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC4_MINOR_PTR_UPDATE +#define SDMA2_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC4_MIDCMD_DATA0 +#define SDMA2_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA1 +#define SDMA2_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA2 +#define SDMA2_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA3 +#define SDMA2_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA4 +#define SDMA2_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA5 +#define SDMA2_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA6 +#define SDMA2_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA7 +#define SDMA2_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_DATA8 +#define SDMA2_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC4_MIDCMD_CNTL +#define SDMA2_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC5_RB_CNTL +#define SDMA2_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC5_RB_BASE +#define SDMA2_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_BASE_HI +#define SDMA2_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC5_RB_RPTR +#define SDMA2_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_RPTR_HI +#define SDMA2_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_WPTR +#define SDMA2_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_WPTR_HI +#define SDMA2_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_WPTR_POLL_CNTL +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC5_RB_RPTR_ADDR_HI +#define SDMA2_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_RPTR_ADDR_LO +#define SDMA2_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC5_IB_CNTL +#define SDMA2_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC5_IB_RPTR +#define SDMA2_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC5_IB_OFFSET +#define SDMA2_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC5_IB_BASE_LO +#define SDMA2_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC5_IB_BASE_HI +#define SDMA2_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC5_IB_SIZE +#define SDMA2_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC5_SKIP_CNTL +#define SDMA2_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC5_CONTEXT_STATUS +#define SDMA2_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC5_DOORBELL +#define SDMA2_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC5_STATUS +#define SDMA2_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC5_DOORBELL_LOG +#define SDMA2_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC5_WATERMARK +#define SDMA2_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC5_DOORBELL_OFFSET +#define SDMA2_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC5_CSA_ADDR_LO +#define SDMA2_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC5_CSA_ADDR_HI +#define SDMA2_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC5_IB_SUB_REMAIN +#define SDMA2_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC5_PREEMPT +#define SDMA2_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC5_DUMMY_REG +#define SDMA2_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC5_RB_AQL_CNTL +#define SDMA2_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC5_MINOR_PTR_UPDATE +#define SDMA2_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC5_MIDCMD_DATA0 +#define SDMA2_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA1 +#define SDMA2_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA2 +#define SDMA2_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA3 +#define SDMA2_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA4 +#define SDMA2_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA5 +#define SDMA2_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA6 +#define SDMA2_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA7 +#define SDMA2_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_DATA8 +#define SDMA2_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC5_MIDCMD_CNTL +#define SDMA2_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC6_RB_CNTL +#define SDMA2_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC6_RB_BASE +#define SDMA2_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_BASE_HI +#define SDMA2_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC6_RB_RPTR +#define SDMA2_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_RPTR_HI +#define SDMA2_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_WPTR +#define SDMA2_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_WPTR_HI +#define SDMA2_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_WPTR_POLL_CNTL +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC6_RB_RPTR_ADDR_HI +#define SDMA2_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_RPTR_ADDR_LO +#define SDMA2_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC6_IB_CNTL +#define SDMA2_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC6_IB_RPTR +#define SDMA2_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC6_IB_OFFSET +#define SDMA2_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC6_IB_BASE_LO +#define SDMA2_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC6_IB_BASE_HI +#define SDMA2_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC6_IB_SIZE +#define SDMA2_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC6_SKIP_CNTL +#define SDMA2_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC6_CONTEXT_STATUS +#define SDMA2_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC6_DOORBELL +#define SDMA2_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC6_STATUS +#define SDMA2_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC6_DOORBELL_LOG +#define SDMA2_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC6_WATERMARK +#define SDMA2_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC6_DOORBELL_OFFSET +#define SDMA2_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC6_CSA_ADDR_LO +#define SDMA2_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC6_CSA_ADDR_HI +#define SDMA2_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC6_IB_SUB_REMAIN +#define SDMA2_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC6_PREEMPT +#define SDMA2_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC6_DUMMY_REG +#define SDMA2_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC6_RB_AQL_CNTL +#define SDMA2_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC6_MINOR_PTR_UPDATE +#define SDMA2_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC6_MIDCMD_DATA0 +#define SDMA2_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA1 +#define SDMA2_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA2 +#define SDMA2_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA3 +#define SDMA2_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA4 +#define SDMA2_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA5 +#define SDMA2_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA6 +#define SDMA2_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA7 +#define SDMA2_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_DATA8 +#define SDMA2_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC6_MIDCMD_CNTL +#define SDMA2_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA2_RLC7_RB_CNTL +#define SDMA2_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA2_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA2_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA2_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA2_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA2_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA2_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA2_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA2_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA2_RLC7_RB_BASE +#define SDMA2_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA2_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_BASE_HI +#define SDMA2_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA2_RLC7_RB_RPTR +#define SDMA2_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_RPTR_HI +#define SDMA2_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_WPTR +#define SDMA2_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA2_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_WPTR_HI +#define SDMA2_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA2_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_WPTR_POLL_CNTL +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA2_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA2_RLC7_RB_RPTR_ADDR_HI +#define SDMA2_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_RPTR_ADDR_LO +#define SDMA2_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA2_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA2_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC7_IB_CNTL +#define SDMA2_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA2_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA2_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA2_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA2_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA2_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA2_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA2_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA2_RLC7_IB_RPTR +#define SDMA2_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA2_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC7_IB_OFFSET +#define SDMA2_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA2_RLC7_IB_BASE_LO +#define SDMA2_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA2_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA2_RLC7_IB_BASE_HI +#define SDMA2_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC7_IB_SIZE +#define SDMA2_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA2_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC7_SKIP_CNTL +#define SDMA2_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA2_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA2_RLC7_CONTEXT_STATUS +#define SDMA2_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA2_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA2_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA2_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA2_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA2_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA2_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA2_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA2_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA2_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA2_RLC7_DOORBELL +#define SDMA2_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA2_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA2_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA2_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA2_RLC7_STATUS +#define SDMA2_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA2_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA2_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA2_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA2_RLC7_DOORBELL_LOG +#define SDMA2_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA2_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA2_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA2_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA2_RLC7_WATERMARK +#define SDMA2_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA2_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA2_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA2_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA2_RLC7_DOORBELL_OFFSET +#define SDMA2_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA2_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA2_RLC7_CSA_ADDR_LO +#define SDMA2_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC7_CSA_ADDR_HI +#define SDMA2_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC7_IB_SUB_REMAIN +#define SDMA2_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA2_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA2_RLC7_PREEMPT +#define SDMA2_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA2_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA2_RLC7_DUMMY_REG +#define SDMA2_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA2_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA2_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA2_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA2_RLC7_RB_AQL_CNTL +#define SDMA2_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA2_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA2_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA2_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA2_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA2_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA2_RLC7_MINOR_PTR_UPDATE +#define SDMA2_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA2_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA2_RLC7_MIDCMD_DATA0 +#define SDMA2_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA1 +#define SDMA2_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA2 +#define SDMA2_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA3 +#define SDMA2_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA4 +#define SDMA2_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA5 +#define SDMA2_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA6 +#define SDMA2_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA7 +#define SDMA2_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_DATA8 +#define SDMA2_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA2_RLC7_MIDCMD_CNTL +#define SDMA2_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA2_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA2_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA2_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA2_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA2_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA2_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA2_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h new file mode 100644 index 000000000000..09e8302715cb --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h @@ -0,0 +1,1043 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma3_4_2_2_OFFSET_HEADER +#define _sdma3_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma3_sdma3dec +// base address: 0x79000 +#define mmSDMA3_UCODE_ADDR 0x0000 +#define mmSDMA3_UCODE_ADDR_BASE_IDX 1 +#define mmSDMA3_UCODE_DATA 0x0001 +#define mmSDMA3_UCODE_DATA_BASE_IDX 1 +#define mmSDMA3_VM_CNTL 0x0004 +#define mmSDMA3_VM_CNTL_BASE_IDX 1 +#define mmSDMA3_VM_CTX_LO 0x0005 +#define mmSDMA3_VM_CTX_LO_BASE_IDX 1 +#define mmSDMA3_VM_CTX_HI 0x0006 +#define mmSDMA3_VM_CTX_HI_BASE_IDX 1 +#define mmSDMA3_ACTIVE_FCN_ID 0x0007 +#define mmSDMA3_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmSDMA3_VM_CTX_CNTL 0x0008 +#define mmSDMA3_VM_CTX_CNTL_BASE_IDX 1 +#define mmSDMA3_VIRT_RESET_REQ 0x0009 +#define mmSDMA3_VIRT_RESET_REQ_BASE_IDX 1 +#define mmSDMA3_VF_ENABLE 0x000a +#define mmSDMA3_VF_ENABLE_BASE_IDX 1 +#define mmSDMA3_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA3_CONTEXT_REG_TYPE0_BASE_IDX 1 +#define mmSDMA3_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA3_CONTEXT_REG_TYPE1_BASE_IDX 1 +#define mmSDMA3_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA3_CONTEXT_REG_TYPE2_BASE_IDX 1 +#define mmSDMA3_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA3_CONTEXT_REG_TYPE3_BASE_IDX 1 +#define mmSDMA3_PUB_REG_TYPE0 0x000f +#define mmSDMA3_PUB_REG_TYPE0_BASE_IDX 1 +#define mmSDMA3_PUB_REG_TYPE1 0x0010 +#define mmSDMA3_PUB_REG_TYPE1_BASE_IDX 1 +#define mmSDMA3_PUB_REG_TYPE2 0x0011 +#define mmSDMA3_PUB_REG_TYPE2_BASE_IDX 1 +#define mmSDMA3_PUB_REG_TYPE3 0x0012 +#define mmSDMA3_PUB_REG_TYPE3_BASE_IDX 1 +#define mmSDMA3_MMHUB_CNTL 0x0013 +#define mmSDMA3_MMHUB_CNTL_BASE_IDX 1 +#define mmSDMA3_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA3_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1 +#define mmSDMA3_POWER_CNTL 0x001a +#define mmSDMA3_POWER_CNTL_BASE_IDX 1 +#define mmSDMA3_CLK_CTRL 0x001b +#define mmSDMA3_CLK_CTRL_BASE_IDX 1 +#define mmSDMA3_CNTL 0x001c +#define mmSDMA3_CNTL_BASE_IDX 1 +#define mmSDMA3_CHICKEN_BITS 0x001d +#define mmSDMA3_CHICKEN_BITS_BASE_IDX 1 +#define mmSDMA3_GB_ADDR_CONFIG 0x001e +#define mmSDMA3_GB_ADDR_CONFIG_BASE_IDX 1 +#define mmSDMA3_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA3_GB_ADDR_CONFIG_READ_BASE_IDX 1 +#define mmSDMA3_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA3_RB_RPTR_FETCH_HI_BASE_IDX 1 +#define mmSDMA3_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA3_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1 +#define mmSDMA3_RB_RPTR_FETCH 0x0022 +#define mmSDMA3_RB_RPTR_FETCH_BASE_IDX 1 +#define mmSDMA3_IB_OFFSET_FETCH 0x0023 +#define mmSDMA3_IB_OFFSET_FETCH_BASE_IDX 1 +#define mmSDMA3_PROGRAM 0x0024 +#define mmSDMA3_PROGRAM_BASE_IDX 1 +#define mmSDMA3_STATUS_REG 0x0025 +#define mmSDMA3_STATUS_REG_BASE_IDX 1 +#define mmSDMA3_STATUS1_REG 0x0026 +#define mmSDMA3_STATUS1_REG_BASE_IDX 1 +#define mmSDMA3_RD_BURST_CNTL 0x0027 +#define mmSDMA3_RD_BURST_CNTL_BASE_IDX 1 +#define mmSDMA3_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA3_HBM_PAGE_CONFIG_BASE_IDX 1 +#define mmSDMA3_UCODE_CHECKSUM 0x0029 +#define mmSDMA3_UCODE_CHECKSUM_BASE_IDX 1 +#define mmSDMA3_F32_CNTL 0x002a +#define mmSDMA3_F32_CNTL_BASE_IDX 1 +#define mmSDMA3_FREEZE 0x002b +#define mmSDMA3_FREEZE_BASE_IDX 1 +#define mmSDMA3_PHASE0_QUANTUM 0x002c +#define mmSDMA3_PHASE0_QUANTUM_BASE_IDX 1 +#define mmSDMA3_PHASE1_QUANTUM 0x002d +#define mmSDMA3_PHASE1_QUANTUM_BASE_IDX 1 +#define mmSDMA3_EDC_CONFIG 0x0032 +#define mmSDMA3_EDC_CONFIG_BASE_IDX 1 +#define mmSDMA3_BA_THRESHOLD 0x0033 +#define mmSDMA3_BA_THRESHOLD_BASE_IDX 1 +#define mmSDMA3_ID 0x0034 +#define mmSDMA3_ID_BASE_IDX 1 +#define mmSDMA3_VERSION 0x0035 +#define mmSDMA3_VERSION_BASE_IDX 1 +#define mmSDMA3_EDC_COUNTER 0x0036 +#define mmSDMA3_EDC_COUNTER_BASE_IDX 1 +#define mmSDMA3_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA3_EDC_COUNTER_CLEAR_BASE_IDX 1 +#define mmSDMA3_STATUS2_REG 0x0038 +#define mmSDMA3_STATUS2_REG_BASE_IDX 1 +#define mmSDMA3_ATOMIC_CNTL 0x0039 +#define mmSDMA3_ATOMIC_CNTL_BASE_IDX 1 +#define mmSDMA3_ATOMIC_PREOP_LO 0x003a +#define mmSDMA3_ATOMIC_PREOP_LO_BASE_IDX 1 +#define mmSDMA3_ATOMIC_PREOP_HI 0x003b +#define mmSDMA3_ATOMIC_PREOP_HI_BASE_IDX 1 +#define mmSDMA3_UTCL1_CNTL 0x003c +#define mmSDMA3_UTCL1_CNTL_BASE_IDX 1 +#define mmSDMA3_UTCL1_WATERMK 0x003d +#define mmSDMA3_UTCL1_WATERMK_BASE_IDX 1 +#define mmSDMA3_UTCL1_RD_STATUS 0x003e +#define mmSDMA3_UTCL1_RD_STATUS_BASE_IDX 1 +#define mmSDMA3_UTCL1_WR_STATUS 0x003f +#define mmSDMA3_UTCL1_WR_STATUS_BASE_IDX 1 +#define mmSDMA3_UTCL1_INV0 0x0040 +#define mmSDMA3_UTCL1_INV0_BASE_IDX 1 +#define mmSDMA3_UTCL1_INV1 0x0041 +#define mmSDMA3_UTCL1_INV1_BASE_IDX 1 +#define mmSDMA3_UTCL1_INV2 0x0042 +#define mmSDMA3_UTCL1_INV2_BASE_IDX 1 +#define mmSDMA3_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA3_UTCL1_RD_XNACK0_BASE_IDX 1 +#define mmSDMA3_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA3_UTCL1_RD_XNACK1_BASE_IDX 1 +#define mmSDMA3_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA3_UTCL1_WR_XNACK0_BASE_IDX 1 +#define mmSDMA3_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA3_UTCL1_WR_XNACK1_BASE_IDX 1 +#define mmSDMA3_UTCL1_TIMEOUT 0x0047 +#define mmSDMA3_UTCL1_TIMEOUT_BASE_IDX 1 +#define mmSDMA3_UTCL1_PAGE 0x0048 +#define mmSDMA3_UTCL1_PAGE_BASE_IDX 1 +#define mmSDMA3_POWER_CNTL_IDLE 0x0049 +#define mmSDMA3_POWER_CNTL_IDLE_BASE_IDX 1 +#define mmSDMA3_RELAX_ORDERING_LUT 0x004a +#define mmSDMA3_RELAX_ORDERING_LUT_BASE_IDX 1 +#define mmSDMA3_CHICKEN_BITS_2 0x004b +#define mmSDMA3_CHICKEN_BITS_2_BASE_IDX 1 +#define mmSDMA3_STATUS3_REG 0x004c +#define mmSDMA3_STATUS3_REG_BASE_IDX 1 +#define mmSDMA3_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA3_PHYSICAL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA3_PHYSICAL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_PHASE2_QUANTUM 0x004f +#define mmSDMA3_PHASE2_QUANTUM_BASE_IDX 1 +#define mmSDMA3_ERROR_LOG 0x0050 +#define mmSDMA3_ERROR_LOG_BASE_IDX 1 +#define mmSDMA3_PUB_DUMMY_REG0 0x0051 +#define mmSDMA3_PUB_DUMMY_REG0_BASE_IDX 1 +#define mmSDMA3_PUB_DUMMY_REG1 0x0052 +#define mmSDMA3_PUB_DUMMY_REG1_BASE_IDX 1 +#define mmSDMA3_PUB_DUMMY_REG2 0x0053 +#define mmSDMA3_PUB_DUMMY_REG2_BASE_IDX 1 +#define mmSDMA3_PUB_DUMMY_REG3 0x0054 +#define mmSDMA3_PUB_DUMMY_REG3_BASE_IDX 1 +#define mmSDMA3_F32_COUNTER 0x0055 +#define mmSDMA3_F32_COUNTER_BASE_IDX 1 +#define mmSDMA3_UNBREAKABLE 0x0056 +#define mmSDMA3_UNBREAKABLE_BASE_IDX 1 +#define mmSDMA3_PERFMON_CNTL 0x0057 +#define mmSDMA3_PERFMON_CNTL_BASE_IDX 1 +#define mmSDMA3_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA3_PERFCOUNTER0_RESULT_BASE_IDX 1 +#define mmSDMA3_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA3_PERFCOUNTER1_RESULT_BASE_IDX 1 +#define mmSDMA3_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA3_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1 +#define mmSDMA3_CRD_CNTL 0x005b +#define mmSDMA3_CRD_CNTL_BASE_IDX 1 +#define mmSDMA3_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA3_GPU_IOV_VIOLATION_LOG_BASE_IDX 1 +#define mmSDMA3_ULV_CNTL 0x005e +#define mmSDMA3_ULV_CNTL_BASE_IDX 1 +#define mmSDMA3_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA3_EA_DBIT_ADDR_DATA_BASE_IDX 1 +#define mmSDMA3_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA3_EA_DBIT_ADDR_INDEX_BASE_IDX 1 +#define mmSDMA3_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA3_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1 +#define mmSDMA3_GFX_RB_CNTL 0x0080 +#define mmSDMA3_GFX_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_GFX_RB_BASE 0x0081 +#define mmSDMA3_GFX_RB_BASE_BASE_IDX 1 +#define mmSDMA3_GFX_RB_BASE_HI 0x0082 +#define mmSDMA3_GFX_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_GFX_RB_RPTR 0x0083 +#define mmSDMA3_GFX_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA3_GFX_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_GFX_RB_WPTR 0x0085 +#define mmSDMA3_GFX_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA3_GFX_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA3_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA3_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA3_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_GFX_IB_CNTL 0x008a +#define mmSDMA3_GFX_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_GFX_IB_RPTR 0x008b +#define mmSDMA3_GFX_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_GFX_IB_OFFSET 0x008c +#define mmSDMA3_GFX_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_GFX_IB_BASE_LO 0x008d +#define mmSDMA3_GFX_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_GFX_IB_BASE_HI 0x008e +#define mmSDMA3_GFX_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_GFX_IB_SIZE 0x008f +#define mmSDMA3_GFX_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_GFX_SKIP_CNTL 0x0090 +#define mmSDMA3_GFX_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA3_GFX_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_GFX_DOORBELL 0x0092 +#define mmSDMA3_GFX_DOORBELL_BASE_IDX 1 +#define mmSDMA3_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA3_GFX_CONTEXT_CNTL_BASE_IDX 1 +#define mmSDMA3_GFX_STATUS 0x00a8 +#define mmSDMA3_GFX_STATUS_BASE_IDX 1 +#define mmSDMA3_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA3_GFX_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_GFX_WATERMARK 0x00aa +#define mmSDMA3_GFX_WATERMARK_BASE_IDX 1 +#define mmSDMA3_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA3_GFX_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA3_GFX_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA3_GFX_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA3_GFX_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_GFX_PREEMPT 0x00b0 +#define mmSDMA3_GFX_PREEMPT_BASE_IDX 1 +#define mmSDMA3_GFX_DUMMY_REG 0x00b1 +#define mmSDMA3_GFX_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA3_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA3_GFX_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA3_GFX_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA3_GFX_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA3_GFX_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA3_GFX_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA3_GFX_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA3_GFX_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA3_GFX_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA3_GFX_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA3_GFX_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA3_GFX_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA3_GFX_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_CNTL 0x00d8 +#define mmSDMA3_PAGE_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_BASE 0x00d9 +#define mmSDMA3_PAGE_RB_BASE_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_BASE_HI 0x00da +#define mmSDMA3_PAGE_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_RPTR 0x00db +#define mmSDMA3_PAGE_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA3_PAGE_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_WPTR 0x00dd +#define mmSDMA3_PAGE_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA3_PAGE_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA3_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA3_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA3_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_PAGE_IB_CNTL 0x00e2 +#define mmSDMA3_PAGE_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_PAGE_IB_RPTR 0x00e3 +#define mmSDMA3_PAGE_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA3_PAGE_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA3_PAGE_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA3_PAGE_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_PAGE_IB_SIZE 0x00e7 +#define mmSDMA3_PAGE_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA3_PAGE_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA3_PAGE_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_PAGE_DOORBELL 0x00ea +#define mmSDMA3_PAGE_DOORBELL_BASE_IDX 1 +#define mmSDMA3_PAGE_STATUS 0x0100 +#define mmSDMA3_PAGE_STATUS_BASE_IDX 1 +#define mmSDMA3_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA3_PAGE_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_PAGE_WATERMARK 0x0102 +#define mmSDMA3_PAGE_WATERMARK_BASE_IDX 1 +#define mmSDMA3_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA3_PAGE_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA3_PAGE_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA3_PAGE_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA3_PAGE_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_PAGE_PREEMPT 0x0108 +#define mmSDMA3_PAGE_PREEMPT_BASE_IDX 1 +#define mmSDMA3_PAGE_DUMMY_REG 0x0109 +#define mmSDMA3_PAGE_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA3_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA3_PAGE_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA3_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA3_PAGE_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA3_PAGE_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA3_PAGE_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA3_PAGE_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA3_PAGE_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA3_PAGE_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA3_PAGE_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA3_PAGE_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA3_PAGE_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA3_PAGE_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_CNTL 0x0130 +#define mmSDMA3_RLC0_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_BASE 0x0131 +#define mmSDMA3_RLC0_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA3_RLC0_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_RPTR 0x0133 +#define mmSDMA3_RLC0_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA3_RLC0_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_WPTR 0x0135 +#define mmSDMA3_RLC0_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA3_RLC0_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA3_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA3_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA3_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC0_IB_CNTL 0x013a +#define mmSDMA3_RLC0_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC0_IB_RPTR 0x013b +#define mmSDMA3_RLC0_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC0_IB_OFFSET 0x013c +#define mmSDMA3_RLC0_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC0_IB_BASE_LO 0x013d +#define mmSDMA3_RLC0_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC0_IB_BASE_HI 0x013e +#define mmSDMA3_RLC0_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC0_IB_SIZE 0x013f +#define mmSDMA3_RLC0_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA3_RLC0_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA3_RLC0_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC0_DOORBELL 0x0142 +#define mmSDMA3_RLC0_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC0_STATUS 0x0158 +#define mmSDMA3_RLC0_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA3_RLC0_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC0_WATERMARK 0x015a +#define mmSDMA3_RLC0_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA3_RLC0_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA3_RLC0_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA3_RLC0_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA3_RLC0_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC0_PREEMPT 0x0160 +#define mmSDMA3_RLC0_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC0_DUMMY_REG 0x0161 +#define mmSDMA3_RLC0_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA3_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA3_RLC0_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA3_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA3_RLC0_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA3_RLC0_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA3_RLC0_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA3_RLC0_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA3_RLC0_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA3_RLC0_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA3_RLC0_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA3_RLC0_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA3_RLC0_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA3_RLC0_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_CNTL 0x0188 +#define mmSDMA3_RLC1_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_BASE 0x0189 +#define mmSDMA3_RLC1_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_BASE_HI 0x018a +#define mmSDMA3_RLC1_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_RPTR 0x018b +#define mmSDMA3_RLC1_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA3_RLC1_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_WPTR 0x018d +#define mmSDMA3_RLC1_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA3_RLC1_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA3_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA3_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA3_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC1_IB_CNTL 0x0192 +#define mmSDMA3_RLC1_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC1_IB_RPTR 0x0193 +#define mmSDMA3_RLC1_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC1_IB_OFFSET 0x0194 +#define mmSDMA3_RLC1_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA3_RLC1_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA3_RLC1_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC1_IB_SIZE 0x0197 +#define mmSDMA3_RLC1_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA3_RLC1_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA3_RLC1_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC1_DOORBELL 0x019a +#define mmSDMA3_RLC1_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC1_STATUS 0x01b0 +#define mmSDMA3_RLC1_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA3_RLC1_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC1_WATERMARK 0x01b2 +#define mmSDMA3_RLC1_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA3_RLC1_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA3_RLC1_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA3_RLC1_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA3_RLC1_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC1_PREEMPT 0x01b8 +#define mmSDMA3_RLC1_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA3_RLC1_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA3_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA3_RLC1_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA3_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA3_RLC1_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA3_RLC1_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA3_RLC1_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA3_RLC1_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA3_RLC1_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA3_RLC1_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA3_RLC1_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA3_RLC1_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA3_RLC1_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA3_RLC1_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_CNTL 0x01e0 +#define mmSDMA3_RLC2_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_BASE 0x01e1 +#define mmSDMA3_RLC2_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA3_RLC2_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_RPTR 0x01e3 +#define mmSDMA3_RLC2_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA3_RLC2_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_WPTR 0x01e5 +#define mmSDMA3_RLC2_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA3_RLC2_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA3_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA3_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA3_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC2_IB_CNTL 0x01ea +#define mmSDMA3_RLC2_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC2_IB_RPTR 0x01eb +#define mmSDMA3_RLC2_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC2_IB_OFFSET 0x01ec +#define mmSDMA3_RLC2_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA3_RLC2_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA3_RLC2_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC2_IB_SIZE 0x01ef +#define mmSDMA3_RLC2_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA3_RLC2_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA3_RLC2_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC2_DOORBELL 0x01f2 +#define mmSDMA3_RLC2_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC2_STATUS 0x0208 +#define mmSDMA3_RLC2_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA3_RLC2_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC2_WATERMARK 0x020a +#define mmSDMA3_RLC2_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA3_RLC2_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA3_RLC2_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA3_RLC2_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA3_RLC2_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC2_PREEMPT 0x0210 +#define mmSDMA3_RLC2_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC2_DUMMY_REG 0x0211 +#define mmSDMA3_RLC2_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA3_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA3_RLC2_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA3_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA3_RLC2_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA3_RLC2_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA3_RLC2_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA3_RLC2_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA3_RLC2_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA3_RLC2_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA3_RLC2_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA3_RLC2_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA3_RLC2_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA3_RLC2_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_CNTL 0x0238 +#define mmSDMA3_RLC3_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_BASE 0x0239 +#define mmSDMA3_RLC3_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_BASE_HI 0x023a +#define mmSDMA3_RLC3_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_RPTR 0x023b +#define mmSDMA3_RLC3_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA3_RLC3_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_WPTR 0x023d +#define mmSDMA3_RLC3_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA3_RLC3_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA3_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA3_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA3_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC3_IB_CNTL 0x0242 +#define mmSDMA3_RLC3_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC3_IB_RPTR 0x0243 +#define mmSDMA3_RLC3_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC3_IB_OFFSET 0x0244 +#define mmSDMA3_RLC3_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA3_RLC3_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA3_RLC3_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC3_IB_SIZE 0x0247 +#define mmSDMA3_RLC3_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA3_RLC3_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA3_RLC3_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC3_DOORBELL 0x024a +#define mmSDMA3_RLC3_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC3_STATUS 0x0260 +#define mmSDMA3_RLC3_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA3_RLC3_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC3_WATERMARK 0x0262 +#define mmSDMA3_RLC3_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA3_RLC3_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA3_RLC3_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA3_RLC3_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA3_RLC3_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC3_PREEMPT 0x0268 +#define mmSDMA3_RLC3_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC3_DUMMY_REG 0x0269 +#define mmSDMA3_RLC3_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA3_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA3_RLC3_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA3_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA3_RLC3_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA3_RLC3_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA3_RLC3_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA3_RLC3_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA3_RLC3_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA3_RLC3_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA3_RLC3_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA3_RLC3_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA3_RLC3_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA3_RLC3_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_CNTL 0x0290 +#define mmSDMA3_RLC4_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_BASE 0x0291 +#define mmSDMA3_RLC4_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA3_RLC4_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_RPTR 0x0293 +#define mmSDMA3_RLC4_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA3_RLC4_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_WPTR 0x0295 +#define mmSDMA3_RLC4_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA3_RLC4_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA3_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA3_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA3_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC4_IB_CNTL 0x029a +#define mmSDMA3_RLC4_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC4_IB_RPTR 0x029b +#define mmSDMA3_RLC4_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC4_IB_OFFSET 0x029c +#define mmSDMA3_RLC4_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC4_IB_BASE_LO 0x029d +#define mmSDMA3_RLC4_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC4_IB_BASE_HI 0x029e +#define mmSDMA3_RLC4_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC4_IB_SIZE 0x029f +#define mmSDMA3_RLC4_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA3_RLC4_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA3_RLC4_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC4_DOORBELL 0x02a2 +#define mmSDMA3_RLC4_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC4_STATUS 0x02b8 +#define mmSDMA3_RLC4_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA3_RLC4_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC4_WATERMARK 0x02ba +#define mmSDMA3_RLC4_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA3_RLC4_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA3_RLC4_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA3_RLC4_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA3_RLC4_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC4_PREEMPT 0x02c0 +#define mmSDMA3_RLC4_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA3_RLC4_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA3_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA3_RLC4_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA3_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA3_RLC4_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA3_RLC4_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA3_RLC4_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA3_RLC4_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA3_RLC4_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA3_RLC4_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA3_RLC4_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA3_RLC4_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA3_RLC4_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA3_RLC4_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_CNTL 0x02e8 +#define mmSDMA3_RLC5_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_BASE 0x02e9 +#define mmSDMA3_RLC5_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA3_RLC5_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_RPTR 0x02eb +#define mmSDMA3_RLC5_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA3_RLC5_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_WPTR 0x02ed +#define mmSDMA3_RLC5_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA3_RLC5_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA3_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA3_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA3_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC5_IB_CNTL 0x02f2 +#define mmSDMA3_RLC5_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC5_IB_RPTR 0x02f3 +#define mmSDMA3_RLC5_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA3_RLC5_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA3_RLC5_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA3_RLC5_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC5_IB_SIZE 0x02f7 +#define mmSDMA3_RLC5_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA3_RLC5_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA3_RLC5_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC5_DOORBELL 0x02fa +#define mmSDMA3_RLC5_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC5_STATUS 0x0310 +#define mmSDMA3_RLC5_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA3_RLC5_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC5_WATERMARK 0x0312 +#define mmSDMA3_RLC5_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA3_RLC5_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA3_RLC5_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA3_RLC5_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA3_RLC5_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC5_PREEMPT 0x0318 +#define mmSDMA3_RLC5_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC5_DUMMY_REG 0x0319 +#define mmSDMA3_RLC5_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA3_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA3_RLC5_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA3_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA3_RLC5_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA3_RLC5_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA3_RLC5_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA3_RLC5_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA3_RLC5_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA3_RLC5_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA3_RLC5_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA3_RLC5_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA3_RLC5_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA3_RLC5_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_CNTL 0x0340 +#define mmSDMA3_RLC6_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_BASE 0x0341 +#define mmSDMA3_RLC6_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA3_RLC6_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_RPTR 0x0343 +#define mmSDMA3_RLC6_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA3_RLC6_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_WPTR 0x0345 +#define mmSDMA3_RLC6_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA3_RLC6_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA3_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA3_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA3_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC6_IB_CNTL 0x034a +#define mmSDMA3_RLC6_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC6_IB_RPTR 0x034b +#define mmSDMA3_RLC6_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC6_IB_OFFSET 0x034c +#define mmSDMA3_RLC6_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC6_IB_BASE_LO 0x034d +#define mmSDMA3_RLC6_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC6_IB_BASE_HI 0x034e +#define mmSDMA3_RLC6_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC6_IB_SIZE 0x034f +#define mmSDMA3_RLC6_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA3_RLC6_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA3_RLC6_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC6_DOORBELL 0x0352 +#define mmSDMA3_RLC6_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC6_STATUS 0x0368 +#define mmSDMA3_RLC6_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA3_RLC6_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC6_WATERMARK 0x036a +#define mmSDMA3_RLC6_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA3_RLC6_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA3_RLC6_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA3_RLC6_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA3_RLC6_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC6_PREEMPT 0x0370 +#define mmSDMA3_RLC6_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC6_DUMMY_REG 0x0371 +#define mmSDMA3_RLC6_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA3_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA3_RLC6_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA3_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA3_RLC6_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA3_RLC6_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA3_RLC6_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA3_RLC6_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA3_RLC6_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA3_RLC6_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA3_RLC6_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA3_RLC6_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA3_RLC6_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA3_RLC6_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_CNTL 0x0398 +#define mmSDMA3_RLC7_RB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_BASE 0x0399 +#define mmSDMA3_RLC7_RB_BASE_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_BASE_HI 0x039a +#define mmSDMA3_RLC7_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_RPTR 0x039b +#define mmSDMA3_RLC7_RB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA3_RLC7_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_WPTR 0x039d +#define mmSDMA3_RLC7_RB_WPTR_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA3_RLC7_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA3_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA3_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA3_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC7_IB_CNTL 0x03a2 +#define mmSDMA3_RLC7_IB_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC7_IB_RPTR 0x03a3 +#define mmSDMA3_RLC7_IB_RPTR_BASE_IDX 1 +#define mmSDMA3_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA3_RLC7_IB_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA3_RLC7_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA3_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA3_RLC7_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA3_RLC7_IB_SIZE 0x03a7 +#define mmSDMA3_RLC7_IB_SIZE_BASE_IDX 1 +#define mmSDMA3_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA3_RLC7_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA3_RLC7_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC7_DOORBELL 0x03aa +#define mmSDMA3_RLC7_DOORBELL_BASE_IDX 1 +#define mmSDMA3_RLC7_STATUS 0x03c0 +#define mmSDMA3_RLC7_STATUS_BASE_IDX 1 +#define mmSDMA3_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA3_RLC7_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA3_RLC7_WATERMARK 0x03c2 +#define mmSDMA3_RLC7_WATERMARK_BASE_IDX 1 +#define mmSDMA3_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA3_RLC7_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA3_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA3_RLC7_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA3_RLC7_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA3_RLC7_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA3_RLC7_PREEMPT 0x03c8 +#define mmSDMA3_RLC7_PREEMPT_BASE_IDX 1 +#define mmSDMA3_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA3_RLC7_DUMMY_REG_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA3_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA3_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA3_RLC7_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA3_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA3_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA3_RLC7_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA3_RLC7_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA3_RLC7_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA3_RLC7_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA3_RLC7_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA3_RLC7_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA3_RLC7_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA3_RLC7_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA3_RLC7_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA3_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA3_RLC7_MIDCMD_CNTL_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h new file mode 100644 index 000000000000..6f2d5ad00488 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h @@ -0,0 +1,2956 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma3_4_2_2_SH_MASK_HEADER +#define _sdma3_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma3_sdma3dec +//SDMA3_UCODE_ADDR +#define SDMA3_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA3_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA3_UCODE_DATA +#define SDMA3_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA3_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA3_VM_CNTL +#define SDMA3_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA3_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA3_VM_CTX_LO +#define SDMA3_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA3_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_VM_CTX_HI +#define SDMA3_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA3_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_ACTIVE_FCN_ID +#define SDMA3_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA3_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA3_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA3_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA3_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA3_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA3_VM_CTX_CNTL +#define SDMA3_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA3_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA3_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA3_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA3_VIRT_RESET_REQ +#define SDMA3_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA3_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA3_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA3_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA3_VF_ENABLE +#define SDMA3_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA3_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA3_CONTEXT_REG_TYPE0 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE__SHIFT 0x1 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_CNTL__SHIFT 0xa +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_RPTR__SHIFT 0xb +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_SIZE__SHIFT 0xf +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_DOORBELL__SHIFT 0x12 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE_MASK 0x00000002L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_DOORBELL_MASK 0x00040000L +#define SDMA3_CONTEXT_REG_TYPE0__SDMA3_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA3_CONTEXT_REG_TYPE1 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_STATUS__SHIFT 0x8 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_WATERMARK__SHIFT 0xa +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA3_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_PREEMPT__SHIFT 0x10 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA3_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_STATUS_MASK 0x00000100L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_WATERMARK_MASK 0x00000400L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA3_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_PREEMPT_MASK 0x00010000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA3_CONTEXT_REG_TYPE1__SDMA3_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA3_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA3_CONTEXT_REG_TYPE2 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA3_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA3_CONTEXT_REG_TYPE2__SDMA3_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA3_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA3_CONTEXT_REG_TYPE3 +#define SDMA3_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA3_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA3_PUB_REG_TYPE0 +#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_ADDR__SHIFT 0x0 +#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_DATA__SHIFT 0x1 +#define SDMA3_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CNTL__SHIFT 0x4 +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_LO__SHIFT 0x5 +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_HI__SHIFT 0x6 +#define SDMA3_PUB_REG_TYPE0__SDMA3_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA3_PUB_REG_TYPE0__SDMA3_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA3_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA3_PUB_REG_TYPE0__SDMA3_MMHUB_CNTL__SHIFT 0x13 +#define SDMA3_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA3_PUB_REG_TYPE0__SDMA3_POWER_CNTL__SHIFT 0x1a +#define SDMA3_PUB_REG_TYPE0__SDMA3_CLK_CTRL__SHIFT 0x1b +#define SDMA3_PUB_REG_TYPE0__SDMA3_CNTL__SHIFT 0x1c +#define SDMA3_PUB_REG_TYPE0__SDMA3_CHICKEN_BITS__SHIFT 0x1d +#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_ADDR_MASK 0x00000001L +#define SDMA3_PUB_REG_TYPE0__SDMA3_UCODE_DATA_MASK 0x00000002L +#define SDMA3_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CNTL_MASK 0x00000010L +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_LO_MASK 0x00000020L +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_HI_MASK 0x00000040L +#define SDMA3_PUB_REG_TYPE0__SDMA3_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA3_PUB_REG_TYPE0__SDMA3_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA3_PUB_REG_TYPE0__SDMA3_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA3_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_MMHUB_CNTL_MASK 0x00080000L +#define SDMA3_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_POWER_CNTL_MASK 0x04000000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CLK_CTRL_MASK 0x08000000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CNTL_MASK 0x10000000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_CHICKEN_BITS_MASK 0x20000000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA3_PUB_REG_TYPE0__SDMA3_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA3_PUB_REG_TYPE1 +#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA3_PUB_REG_TYPE1__SDMA3_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA3_PUB_REG_TYPE1__SDMA3_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA3_PUB_REG_TYPE1__SDMA3_PROGRAM__SHIFT 0x4 +#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS_REG__SHIFT 0x5 +#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS1_REG__SHIFT 0x6 +#define SDMA3_PUB_REG_TYPE1__SDMA3_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA3_PUB_REG_TYPE1__SDMA3_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA3_PUB_REG_TYPE1__SDMA3_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA3_PUB_REG_TYPE1__SDMA3_F32_CNTL__SHIFT 0xa +#define SDMA3_PUB_REG_TYPE1__SDMA3_FREEZE__SHIFT 0xb +#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA3_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_CONFIG__SHIFT 0x12 +#define SDMA3_PUB_REG_TYPE1__SDMA3_BA_THRESHOLD__SHIFT 0x13 +#define SDMA3_PUB_REG_TYPE1__SDMA3_ID__SHIFT 0x14 +#define SDMA3_PUB_REG_TYPE1__SDMA3_VERSION__SHIFT 0x15 +#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER__SHIFT 0x16 +#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS2_REG__SHIFT 0x18 +#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_CNTL__SHIFT 0x1c +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA3_PUB_REG_TYPE1__SDMA3_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA3_PUB_REG_TYPE1__SDMA3_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA3_PUB_REG_TYPE1__SDMA3_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA3_PUB_REG_TYPE1__SDMA3_PROGRAM_MASK 0x00000010L +#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS_REG_MASK 0x00000020L +#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS1_REG_MASK 0x00000040L +#define SDMA3_PUB_REG_TYPE1__SDMA3_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA3_PUB_REG_TYPE1__SDMA3_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA3_PUB_REG_TYPE1__SDMA3_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA3_PUB_REG_TYPE1__SDMA3_F32_CNTL_MASK 0x00000400L +#define SDMA3_PUB_REG_TYPE1__SDMA3_FREEZE_MASK 0x00000800L +#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA3_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA3_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_CONFIG_MASK 0x00040000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_BA_THRESHOLD_MASK 0x00080000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_ID_MASK 0x00100000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_VERSION_MASK 0x00200000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER_MASK 0x00400000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_STATUS2_REG_MASK 0x01000000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_CNTL_MASK 0x10000000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA3_PUB_REG_TYPE1__SDMA3_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA3_PUB_REG_TYPE2 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV0__SHIFT 0x0 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV1__SHIFT 0x1 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV2__SHIFT 0x2 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_PAGE__SHIFT 0x8 +#define SDMA3_PUB_REG_TYPE2__SDMA3_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA3_PUB_REG_TYPE2__SDMA3_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA3_PUB_REG_TYPE2__SDMA3_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA3_PUB_REG_TYPE2__SDMA3_STATUS3_REG__SHIFT 0xc +#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA3_PUB_REG_TYPE2__SDMA3_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA3_PUB_REG_TYPE2__SDMA3_ERROR_LOG__SHIFT 0x10 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA3_PUB_REG_TYPE2__SDMA3_F32_COUNTER__SHIFT 0x15 +#define SDMA3_PUB_REG_TYPE2__SDMA3_UNBREAKABLE__SHIFT 0x16 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFMON_CNTL__SHIFT 0x17 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA3_PUB_REG_TYPE2__SDMA3_CRD_CNTL__SHIFT 0x1b +#define SDMA3_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA3_PUB_REG_TYPE2__SDMA3_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA3_PUB_REG_TYPE2__SDMA3_ULV_CNTL__SHIFT 0x1e +#define SDMA3_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV0_MASK 0x00000001L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV1_MASK 0x00000002L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_INV2_MASK 0x00000004L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UTCL1_PAGE_MASK 0x00000100L +#define SDMA3_PUB_REG_TYPE2__SDMA3_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA3_PUB_REG_TYPE2__SDMA3_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA3_PUB_REG_TYPE2__SDMA3_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA3_PUB_REG_TYPE2__SDMA3_STATUS3_REG_MASK 0x00001000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_ERROR_LOG_MASK 0x00010000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_F32_COUNTER_MASK 0x00200000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_UNBREAKABLE_MASK 0x00400000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFMON_CNTL_MASK 0x00800000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_CRD_CNTL_MASK 0x08000000L +#define SDMA3_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA3_PUB_REG_TYPE2__SDMA3_ULV_CNTL_MASK 0x40000000L +#define SDMA3_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA3_PUB_REG_TYPE3 +#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA3_PUB_REG_TYPE3__SDMA3_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA3_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA3_PUB_REG_TYPE3__SDMA3_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA3_PUB_REG_TYPE3__SDMA3_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA3_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA3_MMHUB_CNTL +#define SDMA3_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA3_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA3_CONTEXT_GROUP_BOUNDARY +#define SDMA3_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA3_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA3_POWER_CNTL +#define SDMA3_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA3_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA3_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA3_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA3_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA3_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA3_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA3_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA3_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA3_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA3_CLK_CTRL +#define SDMA3_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA3_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA3_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA3_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA3_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA3_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA3_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA3_CNTL +#define SDMA3_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA3_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA3_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA3_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA3_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA3_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA3_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA3_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA3_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA3_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA3_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA3_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA3_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA3_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA3_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA3_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA3_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA3_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA3_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA3_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA3_CHICKEN_BITS +#define SDMA3_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA3_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA3_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA3_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA3_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA3_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA3_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA3_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA3_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA3_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA3_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA3_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA3_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA3_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA3_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA3_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA3_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA3_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA3_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA3_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA3_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA3_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA3_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA3_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA3_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA3_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA3_GB_ADDR_CONFIG +#define SDMA3_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA3_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA3_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA3_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA3_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA3_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA3_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA3_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA3_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA3_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA3_GB_ADDR_CONFIG_READ +#define SDMA3_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA3_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA3_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA3_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA3_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA3_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA3_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA3_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA3_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA3_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA3_RB_RPTR_FETCH_HI +#define SDMA3_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA3_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA3_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA3_RB_RPTR_FETCH +#define SDMA3_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA3_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA3_IB_OFFSET_FETCH +#define SDMA3_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA3_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA3_PROGRAM +#define SDMA3_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA3_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA3_STATUS_REG +#define SDMA3_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA3_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA3_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA3_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA3_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA3_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA3_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA3_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA3_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA3_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA3_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA3_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA3_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA3_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA3_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA3_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA3_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA3_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA3_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA3_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA3_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA3_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA3_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA3_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA3_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA3_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA3_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA3_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA3_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA3_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA3_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA3_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA3_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA3_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA3_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA3_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA3_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA3_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA3_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA3_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA3_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA3_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA3_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA3_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA3_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA3_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA3_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA3_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA3_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA3_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA3_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA3_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA3_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA3_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA3_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA3_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA3_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA3_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA3_STATUS1_REG +#define SDMA3_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA3_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA3_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA3_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA3_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA3_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA3_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA3_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA3_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA3_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA3_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA3_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA3_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA3_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA3_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA3_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA3_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA3_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA3_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA3_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA3_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA3_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA3_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA3_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA3_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA3_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA3_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA3_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA3_RD_BURST_CNTL +#define SDMA3_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA3_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA3_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA3_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA3_HBM_PAGE_CONFIG +#define SDMA3_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA3_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA3_UCODE_CHECKSUM +#define SDMA3_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA3_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA3_F32_CNTL +#define SDMA3_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA3_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA3_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA3_F32_CNTL__STEP_MASK 0x00000002L +//SDMA3_FREEZE +#define SDMA3_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA3_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA3_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA3_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA3_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA3_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA3_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA3_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA3_PHASE0_QUANTUM +#define SDMA3_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA3_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA3_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA3_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA3_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA3_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA3_PHASE1_QUANTUM +#define SDMA3_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA3_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA3_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA3_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA3_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA3_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA3_EDC_CONFIG +#define SDMA3_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA3_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA3_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA3_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA3_BA_THRESHOLD +#define SDMA3_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA3_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA3_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA3_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA3_ID +#define SDMA3_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA3_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA3_VERSION +#define SDMA3_VERSION__MINVER__SHIFT 0x0 +#define SDMA3_VERSION__MAJVER__SHIFT 0x8 +#define SDMA3_VERSION__REV__SHIFT 0x10 +#define SDMA3_VERSION__MINVER_MASK 0x0000007FL +#define SDMA3_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA3_VERSION__REV_MASK 0x003F0000L +//SDMA3_EDC_COUNTER +#define SDMA3_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA3_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA3_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA3_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA3_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA3_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA3_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA3_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA3_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA3_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA3_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA3_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA3_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA3_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA3_EDC_COUNTER_CLEAR +#define SDMA3_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA3_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA3_STATUS2_REG +#define SDMA3_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA3_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA3_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA3_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA3_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA3_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA3_ATOMIC_CNTL +#define SDMA3_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA3_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA3_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA3_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA3_ATOMIC_PREOP_LO +#define SDMA3_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA3_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA3_ATOMIC_PREOP_HI +#define SDMA3_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA3_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA3_UTCL1_CNTL +#define SDMA3_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA3_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA3_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA3_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA3_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA3_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA3_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA3_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA3_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA3_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA3_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA3_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA3_UTCL1_WATERMK +#define SDMA3_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA3_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA3_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA3_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA3_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA3_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA3_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA3_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA3_UTCL1_RD_STATUS +#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA3_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA3_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA3_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA3_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA3_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA3_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA3_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA3_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA3_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA3_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA3_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA3_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA3_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA3_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA3_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA3_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA3_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA3_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA3_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA3_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA3_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA3_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA3_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA3_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA3_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA3_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA3_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA3_UTCL1_WR_STATUS +#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA3_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA3_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA3_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA3_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA3_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA3_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA3_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA3_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA3_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA3_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA3_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA3_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA3_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA3_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA3_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA3_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA3_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA3_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA3_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA3_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA3_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA3_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA3_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA3_UTCL1_INV0 +#define SDMA3_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA3_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA3_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA3_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA3_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA3_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA3_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA3_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA3_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA3_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA3_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA3_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA3_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA3_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA3_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA3_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA3_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA3_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA3_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA3_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA3_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA3_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA3_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA3_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA3_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA3_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA3_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA3_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA3_UTCL1_INV1 +#define SDMA3_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA3_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA3_UTCL1_INV2 +#define SDMA3_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA3_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA3_UTCL1_RD_XNACK0 +#define SDMA3_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA3_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA3_UTCL1_RD_XNACK1 +#define SDMA3_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA3_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA3_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA3_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA3_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA3_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA3_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA3_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA3_UTCL1_WR_XNACK0 +#define SDMA3_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA3_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA3_UTCL1_WR_XNACK1 +#define SDMA3_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA3_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA3_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA3_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA3_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA3_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA3_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA3_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA3_UTCL1_TIMEOUT +#define SDMA3_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA3_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA3_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA3_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA3_UTCL1_PAGE +#define SDMA3_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA3_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA3_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA3_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA3_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA3_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA3_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA3_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA3_POWER_CNTL_IDLE +#define SDMA3_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA3_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA3_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA3_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA3_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA3_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA3_RELAX_ORDERING_LUT +#define SDMA3_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA3_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA3_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA3_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA3_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA3_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA3_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA3_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA3_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA3_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA3_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA3_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA3_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA3_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA3_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA3_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA3_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA3_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA3_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA3_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA3_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA3_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA3_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA3_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA3_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA3_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA3_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA3_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA3_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA3_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA3_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA3_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA3_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA3_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA3_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA3_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA3_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA3_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA3_CHICKEN_BITS_2 +#define SDMA3_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA3_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA3_STATUS3_REG +#define SDMA3_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA3_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA3_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA3_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA3_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA3_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA3_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA3_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA3_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA3_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA3_PHYSICAL_ADDR_LO +#define SDMA3_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA3_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA3_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA3_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA3_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA3_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA3_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA3_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA3_PHYSICAL_ADDR_HI +#define SDMA3_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA3_PHASE2_QUANTUM +#define SDMA3_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA3_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA3_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA3_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA3_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA3_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA3_ERROR_LOG +#define SDMA3_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA3_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA3_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA3_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA3_PUB_DUMMY_REG0 +#define SDMA3_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA3_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA3_PUB_DUMMY_REG1 +#define SDMA3_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA3_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA3_PUB_DUMMY_REG2 +#define SDMA3_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA3_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA3_PUB_DUMMY_REG3 +#define SDMA3_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA3_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA3_F32_COUNTER +#define SDMA3_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA3_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA3_UNBREAKABLE +#define SDMA3_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA3_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA3_PERFMON_CNTL +#define SDMA3_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA3_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA3_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA3_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA3_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA3_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA3_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA3_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA3_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA3_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA3_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA3_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA3_PERFCOUNTER0_RESULT +#define SDMA3_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA3_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA3_PERFCOUNTER1_RESULT +#define SDMA3_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA3_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA3_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA3_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA3_CRD_CNTL +#define SDMA3_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA3_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA3_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA3_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA3_GPU_IOV_VIOLATION_LOG +#define SDMA3_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA3_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA3_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA3_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA3_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA3_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA3_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA3_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA3_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA3_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA3_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA3_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA3_ULV_CNTL +#define SDMA3_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA3_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA3_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA3_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA3_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA3_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA3_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA3_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA3_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA3_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA3_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA3_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA3_EA_DBIT_ADDR_DATA +#define SDMA3_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA3_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA3_EA_DBIT_ADDR_INDEX +#define SDMA3_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA3_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA3_GPU_IOV_VIOLATION_LOG2 +#define SDMA3_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA3_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA3_GFX_RB_CNTL +#define SDMA3_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_GFX_RB_BASE +#define SDMA3_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_BASE_HI +#define SDMA3_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_GFX_RB_RPTR +#define SDMA3_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_RPTR_HI +#define SDMA3_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_WPTR +#define SDMA3_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_WPTR_HI +#define SDMA3_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_WPTR_POLL_CNTL +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_GFX_RB_RPTR_ADDR_HI +#define SDMA3_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_RPTR_ADDR_LO +#define SDMA3_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_GFX_IB_CNTL +#define SDMA3_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_GFX_IB_RPTR +#define SDMA3_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_GFX_IB_OFFSET +#define SDMA3_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_GFX_IB_BASE_LO +#define SDMA3_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_GFX_IB_BASE_HI +#define SDMA3_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_GFX_IB_SIZE +#define SDMA3_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_GFX_SKIP_CNTL +#define SDMA3_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_GFX_CONTEXT_STATUS +#define SDMA3_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_GFX_DOORBELL +#define SDMA3_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_GFX_CONTEXT_CNTL +#define SDMA3_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA3_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA3_GFX_STATUS +#define SDMA3_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_GFX_DOORBELL_LOG +#define SDMA3_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_GFX_WATERMARK +#define SDMA3_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_GFX_DOORBELL_OFFSET +#define SDMA3_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_GFX_CSA_ADDR_LO +#define SDMA3_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_GFX_CSA_ADDR_HI +#define SDMA3_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_GFX_IB_SUB_REMAIN +#define SDMA3_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_GFX_PREEMPT +#define SDMA3_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_GFX_DUMMY_REG +#define SDMA3_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA3_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA3_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_GFX_RB_AQL_CNTL +#define SDMA3_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_GFX_MINOR_PTR_UPDATE +#define SDMA3_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_GFX_MIDCMD_DATA0 +#define SDMA3_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA1 +#define SDMA3_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA2 +#define SDMA3_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA3 +#define SDMA3_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA4 +#define SDMA3_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA5 +#define SDMA3_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA6 +#define SDMA3_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA7 +#define SDMA3_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_DATA8 +#define SDMA3_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_GFX_MIDCMD_CNTL +#define SDMA3_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_PAGE_RB_CNTL +#define SDMA3_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_PAGE_RB_BASE +#define SDMA3_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_BASE_HI +#define SDMA3_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_PAGE_RB_RPTR +#define SDMA3_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_RPTR_HI +#define SDMA3_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_WPTR +#define SDMA3_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_WPTR_HI +#define SDMA3_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_WPTR_POLL_CNTL +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_PAGE_RB_RPTR_ADDR_HI +#define SDMA3_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_RPTR_ADDR_LO +#define SDMA3_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_PAGE_IB_CNTL +#define SDMA3_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_PAGE_IB_RPTR +#define SDMA3_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_PAGE_IB_OFFSET +#define SDMA3_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_PAGE_IB_BASE_LO +#define SDMA3_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_PAGE_IB_BASE_HI +#define SDMA3_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_PAGE_IB_SIZE +#define SDMA3_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_PAGE_SKIP_CNTL +#define SDMA3_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_PAGE_CONTEXT_STATUS +#define SDMA3_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_PAGE_DOORBELL +#define SDMA3_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_PAGE_STATUS +#define SDMA3_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_PAGE_DOORBELL_LOG +#define SDMA3_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_PAGE_WATERMARK +#define SDMA3_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_PAGE_DOORBELL_OFFSET +#define SDMA3_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_PAGE_CSA_ADDR_LO +#define SDMA3_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_PAGE_CSA_ADDR_HI +#define SDMA3_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_PAGE_IB_SUB_REMAIN +#define SDMA3_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_PAGE_PREEMPT +#define SDMA3_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_PAGE_DUMMY_REG +#define SDMA3_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_PAGE_RB_AQL_CNTL +#define SDMA3_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_PAGE_MINOR_PTR_UPDATE +#define SDMA3_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_PAGE_MIDCMD_DATA0 +#define SDMA3_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA1 +#define SDMA3_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA2 +#define SDMA3_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA3 +#define SDMA3_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA4 +#define SDMA3_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA5 +#define SDMA3_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA6 +#define SDMA3_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA7 +#define SDMA3_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_DATA8 +#define SDMA3_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_PAGE_MIDCMD_CNTL +#define SDMA3_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC0_RB_CNTL +#define SDMA3_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC0_RB_BASE +#define SDMA3_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_BASE_HI +#define SDMA3_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC0_RB_RPTR +#define SDMA3_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_RPTR_HI +#define SDMA3_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_WPTR +#define SDMA3_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_WPTR_HI +#define SDMA3_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_WPTR_POLL_CNTL +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC0_RB_RPTR_ADDR_HI +#define SDMA3_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_RPTR_ADDR_LO +#define SDMA3_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC0_IB_CNTL +#define SDMA3_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC0_IB_RPTR +#define SDMA3_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC0_IB_OFFSET +#define SDMA3_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC0_IB_BASE_LO +#define SDMA3_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC0_IB_BASE_HI +#define SDMA3_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC0_IB_SIZE +#define SDMA3_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC0_SKIP_CNTL +#define SDMA3_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC0_CONTEXT_STATUS +#define SDMA3_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC0_DOORBELL +#define SDMA3_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC0_STATUS +#define SDMA3_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC0_DOORBELL_LOG +#define SDMA3_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC0_WATERMARK +#define SDMA3_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC0_DOORBELL_OFFSET +#define SDMA3_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC0_CSA_ADDR_LO +#define SDMA3_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC0_CSA_ADDR_HI +#define SDMA3_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC0_IB_SUB_REMAIN +#define SDMA3_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC0_PREEMPT +#define SDMA3_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC0_DUMMY_REG +#define SDMA3_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC0_RB_AQL_CNTL +#define SDMA3_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC0_MINOR_PTR_UPDATE +#define SDMA3_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC0_MIDCMD_DATA0 +#define SDMA3_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA1 +#define SDMA3_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA2 +#define SDMA3_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA3 +#define SDMA3_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA4 +#define SDMA3_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA5 +#define SDMA3_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA6 +#define SDMA3_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA7 +#define SDMA3_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_DATA8 +#define SDMA3_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC0_MIDCMD_CNTL +#define SDMA3_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC1_RB_CNTL +#define SDMA3_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC1_RB_BASE +#define SDMA3_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_BASE_HI +#define SDMA3_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC1_RB_RPTR +#define SDMA3_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_RPTR_HI +#define SDMA3_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_WPTR +#define SDMA3_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_WPTR_HI +#define SDMA3_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_WPTR_POLL_CNTL +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC1_RB_RPTR_ADDR_HI +#define SDMA3_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_RPTR_ADDR_LO +#define SDMA3_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC1_IB_CNTL +#define SDMA3_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC1_IB_RPTR +#define SDMA3_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC1_IB_OFFSET +#define SDMA3_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC1_IB_BASE_LO +#define SDMA3_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC1_IB_BASE_HI +#define SDMA3_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC1_IB_SIZE +#define SDMA3_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC1_SKIP_CNTL +#define SDMA3_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC1_CONTEXT_STATUS +#define SDMA3_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC1_DOORBELL +#define SDMA3_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC1_STATUS +#define SDMA3_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC1_DOORBELL_LOG +#define SDMA3_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC1_WATERMARK +#define SDMA3_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC1_DOORBELL_OFFSET +#define SDMA3_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC1_CSA_ADDR_LO +#define SDMA3_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC1_CSA_ADDR_HI +#define SDMA3_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC1_IB_SUB_REMAIN +#define SDMA3_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC1_PREEMPT +#define SDMA3_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC1_DUMMY_REG +#define SDMA3_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC1_RB_AQL_CNTL +#define SDMA3_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC1_MINOR_PTR_UPDATE +#define SDMA3_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC1_MIDCMD_DATA0 +#define SDMA3_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA1 +#define SDMA3_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA2 +#define SDMA3_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA3 +#define SDMA3_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA4 +#define SDMA3_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA5 +#define SDMA3_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA6 +#define SDMA3_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA7 +#define SDMA3_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_DATA8 +#define SDMA3_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC1_MIDCMD_CNTL +#define SDMA3_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC2_RB_CNTL +#define SDMA3_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC2_RB_BASE +#define SDMA3_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_BASE_HI +#define SDMA3_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC2_RB_RPTR +#define SDMA3_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_RPTR_HI +#define SDMA3_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_WPTR +#define SDMA3_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_WPTR_HI +#define SDMA3_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_WPTR_POLL_CNTL +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC2_RB_RPTR_ADDR_HI +#define SDMA3_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_RPTR_ADDR_LO +#define SDMA3_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC2_IB_CNTL +#define SDMA3_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC2_IB_RPTR +#define SDMA3_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC2_IB_OFFSET +#define SDMA3_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC2_IB_BASE_LO +#define SDMA3_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC2_IB_BASE_HI +#define SDMA3_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC2_IB_SIZE +#define SDMA3_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC2_SKIP_CNTL +#define SDMA3_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC2_CONTEXT_STATUS +#define SDMA3_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC2_DOORBELL +#define SDMA3_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC2_STATUS +#define SDMA3_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC2_DOORBELL_LOG +#define SDMA3_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC2_WATERMARK +#define SDMA3_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC2_DOORBELL_OFFSET +#define SDMA3_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC2_CSA_ADDR_LO +#define SDMA3_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC2_CSA_ADDR_HI +#define SDMA3_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC2_IB_SUB_REMAIN +#define SDMA3_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC2_PREEMPT +#define SDMA3_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC2_DUMMY_REG +#define SDMA3_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC2_RB_AQL_CNTL +#define SDMA3_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC2_MINOR_PTR_UPDATE +#define SDMA3_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC2_MIDCMD_DATA0 +#define SDMA3_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA1 +#define SDMA3_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA2 +#define SDMA3_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA3 +#define SDMA3_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA4 +#define SDMA3_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA5 +#define SDMA3_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA6 +#define SDMA3_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA7 +#define SDMA3_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_DATA8 +#define SDMA3_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC2_MIDCMD_CNTL +#define SDMA3_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC3_RB_CNTL +#define SDMA3_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC3_RB_BASE +#define SDMA3_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_BASE_HI +#define SDMA3_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC3_RB_RPTR +#define SDMA3_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_RPTR_HI +#define SDMA3_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_WPTR +#define SDMA3_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_WPTR_HI +#define SDMA3_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_WPTR_POLL_CNTL +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC3_RB_RPTR_ADDR_HI +#define SDMA3_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_RPTR_ADDR_LO +#define SDMA3_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC3_IB_CNTL +#define SDMA3_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC3_IB_RPTR +#define SDMA3_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC3_IB_OFFSET +#define SDMA3_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC3_IB_BASE_LO +#define SDMA3_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC3_IB_BASE_HI +#define SDMA3_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC3_IB_SIZE +#define SDMA3_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC3_SKIP_CNTL +#define SDMA3_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC3_CONTEXT_STATUS +#define SDMA3_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC3_DOORBELL +#define SDMA3_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC3_STATUS +#define SDMA3_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC3_DOORBELL_LOG +#define SDMA3_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC3_WATERMARK +#define SDMA3_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC3_DOORBELL_OFFSET +#define SDMA3_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC3_CSA_ADDR_LO +#define SDMA3_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC3_CSA_ADDR_HI +#define SDMA3_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC3_IB_SUB_REMAIN +#define SDMA3_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC3_PREEMPT +#define SDMA3_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC3_DUMMY_REG +#define SDMA3_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC3_RB_AQL_CNTL +#define SDMA3_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC3_MINOR_PTR_UPDATE +#define SDMA3_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC3_MIDCMD_DATA0 +#define SDMA3_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA1 +#define SDMA3_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA2 +#define SDMA3_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA3 +#define SDMA3_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA4 +#define SDMA3_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA5 +#define SDMA3_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA6 +#define SDMA3_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA7 +#define SDMA3_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_DATA8 +#define SDMA3_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC3_MIDCMD_CNTL +#define SDMA3_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC4_RB_CNTL +#define SDMA3_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC4_RB_BASE +#define SDMA3_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_BASE_HI +#define SDMA3_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC4_RB_RPTR +#define SDMA3_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_RPTR_HI +#define SDMA3_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_WPTR +#define SDMA3_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_WPTR_HI +#define SDMA3_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_WPTR_POLL_CNTL +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC4_RB_RPTR_ADDR_HI +#define SDMA3_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_RPTR_ADDR_LO +#define SDMA3_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC4_IB_CNTL +#define SDMA3_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC4_IB_RPTR +#define SDMA3_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC4_IB_OFFSET +#define SDMA3_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC4_IB_BASE_LO +#define SDMA3_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC4_IB_BASE_HI +#define SDMA3_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC4_IB_SIZE +#define SDMA3_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC4_SKIP_CNTL +#define SDMA3_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC4_CONTEXT_STATUS +#define SDMA3_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC4_DOORBELL +#define SDMA3_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC4_STATUS +#define SDMA3_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC4_DOORBELL_LOG +#define SDMA3_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC4_WATERMARK +#define SDMA3_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC4_DOORBELL_OFFSET +#define SDMA3_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC4_CSA_ADDR_LO +#define SDMA3_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC4_CSA_ADDR_HI +#define SDMA3_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC4_IB_SUB_REMAIN +#define SDMA3_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC4_PREEMPT +#define SDMA3_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC4_DUMMY_REG +#define SDMA3_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC4_RB_AQL_CNTL +#define SDMA3_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC4_MINOR_PTR_UPDATE +#define SDMA3_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC4_MIDCMD_DATA0 +#define SDMA3_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA1 +#define SDMA3_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA2 +#define SDMA3_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA3 +#define SDMA3_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA4 +#define SDMA3_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA5 +#define SDMA3_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA6 +#define SDMA3_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA7 +#define SDMA3_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_DATA8 +#define SDMA3_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC4_MIDCMD_CNTL +#define SDMA3_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC5_RB_CNTL +#define SDMA3_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC5_RB_BASE +#define SDMA3_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_BASE_HI +#define SDMA3_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC5_RB_RPTR +#define SDMA3_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_RPTR_HI +#define SDMA3_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_WPTR +#define SDMA3_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_WPTR_HI +#define SDMA3_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_WPTR_POLL_CNTL +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC5_RB_RPTR_ADDR_HI +#define SDMA3_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_RPTR_ADDR_LO +#define SDMA3_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC5_IB_CNTL +#define SDMA3_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC5_IB_RPTR +#define SDMA3_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC5_IB_OFFSET +#define SDMA3_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC5_IB_BASE_LO +#define SDMA3_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC5_IB_BASE_HI +#define SDMA3_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC5_IB_SIZE +#define SDMA3_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC5_SKIP_CNTL +#define SDMA3_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC5_CONTEXT_STATUS +#define SDMA3_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC5_DOORBELL +#define SDMA3_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC5_STATUS +#define SDMA3_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC5_DOORBELL_LOG +#define SDMA3_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC5_WATERMARK +#define SDMA3_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC5_DOORBELL_OFFSET +#define SDMA3_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC5_CSA_ADDR_LO +#define SDMA3_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC5_CSA_ADDR_HI +#define SDMA3_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC5_IB_SUB_REMAIN +#define SDMA3_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC5_PREEMPT +#define SDMA3_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC5_DUMMY_REG +#define SDMA3_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC5_RB_AQL_CNTL +#define SDMA3_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC5_MINOR_PTR_UPDATE +#define SDMA3_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC5_MIDCMD_DATA0 +#define SDMA3_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA1 +#define SDMA3_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA2 +#define SDMA3_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA3 +#define SDMA3_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA4 +#define SDMA3_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA5 +#define SDMA3_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA6 +#define SDMA3_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA7 +#define SDMA3_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_DATA8 +#define SDMA3_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC5_MIDCMD_CNTL +#define SDMA3_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC6_RB_CNTL +#define SDMA3_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC6_RB_BASE +#define SDMA3_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_BASE_HI +#define SDMA3_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC6_RB_RPTR +#define SDMA3_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_RPTR_HI +#define SDMA3_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_WPTR +#define SDMA3_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_WPTR_HI +#define SDMA3_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_WPTR_POLL_CNTL +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC6_RB_RPTR_ADDR_HI +#define SDMA3_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_RPTR_ADDR_LO +#define SDMA3_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC6_IB_CNTL +#define SDMA3_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC6_IB_RPTR +#define SDMA3_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC6_IB_OFFSET +#define SDMA3_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC6_IB_BASE_LO +#define SDMA3_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC6_IB_BASE_HI +#define SDMA3_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC6_IB_SIZE +#define SDMA3_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC6_SKIP_CNTL +#define SDMA3_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC6_CONTEXT_STATUS +#define SDMA3_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC6_DOORBELL +#define SDMA3_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC6_STATUS +#define SDMA3_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC6_DOORBELL_LOG +#define SDMA3_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC6_WATERMARK +#define SDMA3_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC6_DOORBELL_OFFSET +#define SDMA3_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC6_CSA_ADDR_LO +#define SDMA3_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC6_CSA_ADDR_HI +#define SDMA3_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC6_IB_SUB_REMAIN +#define SDMA3_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC6_PREEMPT +#define SDMA3_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC6_DUMMY_REG +#define SDMA3_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC6_RB_AQL_CNTL +#define SDMA3_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC6_MINOR_PTR_UPDATE +#define SDMA3_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC6_MIDCMD_DATA0 +#define SDMA3_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA1 +#define SDMA3_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA2 +#define SDMA3_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA3 +#define SDMA3_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA4 +#define SDMA3_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA5 +#define SDMA3_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA6 +#define SDMA3_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA7 +#define SDMA3_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_DATA8 +#define SDMA3_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC6_MIDCMD_CNTL +#define SDMA3_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA3_RLC7_RB_CNTL +#define SDMA3_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA3_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA3_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA3_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA3_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA3_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA3_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA3_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA3_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA3_RLC7_RB_BASE +#define SDMA3_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA3_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_BASE_HI +#define SDMA3_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA3_RLC7_RB_RPTR +#define SDMA3_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_RPTR_HI +#define SDMA3_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_WPTR +#define SDMA3_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA3_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_WPTR_HI +#define SDMA3_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA3_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_WPTR_POLL_CNTL +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA3_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA3_RLC7_RB_RPTR_ADDR_HI +#define SDMA3_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_RPTR_ADDR_LO +#define SDMA3_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA3_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA3_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC7_IB_CNTL +#define SDMA3_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA3_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA3_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA3_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA3_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA3_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA3_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA3_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA3_RLC7_IB_RPTR +#define SDMA3_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA3_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC7_IB_OFFSET +#define SDMA3_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA3_RLC7_IB_BASE_LO +#define SDMA3_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA3_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA3_RLC7_IB_BASE_HI +#define SDMA3_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC7_IB_SIZE +#define SDMA3_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA3_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC7_SKIP_CNTL +#define SDMA3_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA3_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA3_RLC7_CONTEXT_STATUS +#define SDMA3_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA3_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA3_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA3_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA3_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA3_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA3_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA3_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA3_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA3_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA3_RLC7_DOORBELL +#define SDMA3_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA3_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA3_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA3_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA3_RLC7_STATUS +#define SDMA3_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA3_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA3_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA3_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA3_RLC7_DOORBELL_LOG +#define SDMA3_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA3_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA3_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA3_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA3_RLC7_WATERMARK +#define SDMA3_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA3_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA3_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA3_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA3_RLC7_DOORBELL_OFFSET +#define SDMA3_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA3_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA3_RLC7_CSA_ADDR_LO +#define SDMA3_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC7_CSA_ADDR_HI +#define SDMA3_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC7_IB_SUB_REMAIN +#define SDMA3_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA3_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA3_RLC7_PREEMPT +#define SDMA3_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA3_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA3_RLC7_DUMMY_REG +#define SDMA3_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA3_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA3_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA3_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA3_RLC7_RB_AQL_CNTL +#define SDMA3_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA3_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA3_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA3_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA3_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA3_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA3_RLC7_MINOR_PTR_UPDATE +#define SDMA3_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA3_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA3_RLC7_MIDCMD_DATA0 +#define SDMA3_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA1 +#define SDMA3_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA2 +#define SDMA3_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA3 +#define SDMA3_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA4 +#define SDMA3_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA5 +#define SDMA3_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA6 +#define SDMA3_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA7 +#define SDMA3_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_DATA8 +#define SDMA3_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA3_RLC7_MIDCMD_CNTL +#define SDMA3_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA3_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA3_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA3_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA3_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA3_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA3_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA3_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h new file mode 100644 index 000000000000..755ffa5781de --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h @@ -0,0 +1,1043 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma4_4_2_2_OFFSET_HEADER +#define _sdma4_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma4_sdma4dec +// base address: 0x7a000 +#define mmSDMA4_UCODE_ADDR 0x0000 +#define mmSDMA4_UCODE_ADDR_BASE_IDX 1 +#define mmSDMA4_UCODE_DATA 0x0001 +#define mmSDMA4_UCODE_DATA_BASE_IDX 1 +#define mmSDMA4_VM_CNTL 0x0004 +#define mmSDMA4_VM_CNTL_BASE_IDX 1 +#define mmSDMA4_VM_CTX_LO 0x0005 +#define mmSDMA4_VM_CTX_LO_BASE_IDX 1 +#define mmSDMA4_VM_CTX_HI 0x0006 +#define mmSDMA4_VM_CTX_HI_BASE_IDX 1 +#define mmSDMA4_ACTIVE_FCN_ID 0x0007 +#define mmSDMA4_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmSDMA4_VM_CTX_CNTL 0x0008 +#define mmSDMA4_VM_CTX_CNTL_BASE_IDX 1 +#define mmSDMA4_VIRT_RESET_REQ 0x0009 +#define mmSDMA4_VIRT_RESET_REQ_BASE_IDX 1 +#define mmSDMA4_VF_ENABLE 0x000a +#define mmSDMA4_VF_ENABLE_BASE_IDX 1 +#define mmSDMA4_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA4_CONTEXT_REG_TYPE0_BASE_IDX 1 +#define mmSDMA4_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA4_CONTEXT_REG_TYPE1_BASE_IDX 1 +#define mmSDMA4_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA4_CONTEXT_REG_TYPE2_BASE_IDX 1 +#define mmSDMA4_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA4_CONTEXT_REG_TYPE3_BASE_IDX 1 +#define mmSDMA4_PUB_REG_TYPE0 0x000f +#define mmSDMA4_PUB_REG_TYPE0_BASE_IDX 1 +#define mmSDMA4_PUB_REG_TYPE1 0x0010 +#define mmSDMA4_PUB_REG_TYPE1_BASE_IDX 1 +#define mmSDMA4_PUB_REG_TYPE2 0x0011 +#define mmSDMA4_PUB_REG_TYPE2_BASE_IDX 1 +#define mmSDMA4_PUB_REG_TYPE3 0x0012 +#define mmSDMA4_PUB_REG_TYPE3_BASE_IDX 1 +#define mmSDMA4_MMHUB_CNTL 0x0013 +#define mmSDMA4_MMHUB_CNTL_BASE_IDX 1 +#define mmSDMA4_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA4_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1 +#define mmSDMA4_POWER_CNTL 0x001a +#define mmSDMA4_POWER_CNTL_BASE_IDX 1 +#define mmSDMA4_CLK_CTRL 0x001b +#define mmSDMA4_CLK_CTRL_BASE_IDX 1 +#define mmSDMA4_CNTL 0x001c +#define mmSDMA4_CNTL_BASE_IDX 1 +#define mmSDMA4_CHICKEN_BITS 0x001d +#define mmSDMA4_CHICKEN_BITS_BASE_IDX 1 +#define mmSDMA4_GB_ADDR_CONFIG 0x001e +#define mmSDMA4_GB_ADDR_CONFIG_BASE_IDX 1 +#define mmSDMA4_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA4_GB_ADDR_CONFIG_READ_BASE_IDX 1 +#define mmSDMA4_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA4_RB_RPTR_FETCH_HI_BASE_IDX 1 +#define mmSDMA4_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA4_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1 +#define mmSDMA4_RB_RPTR_FETCH 0x0022 +#define mmSDMA4_RB_RPTR_FETCH_BASE_IDX 1 +#define mmSDMA4_IB_OFFSET_FETCH 0x0023 +#define mmSDMA4_IB_OFFSET_FETCH_BASE_IDX 1 +#define mmSDMA4_PROGRAM 0x0024 +#define mmSDMA4_PROGRAM_BASE_IDX 1 +#define mmSDMA4_STATUS_REG 0x0025 +#define mmSDMA4_STATUS_REG_BASE_IDX 1 +#define mmSDMA4_STATUS1_REG 0x0026 +#define mmSDMA4_STATUS1_REG_BASE_IDX 1 +#define mmSDMA4_RD_BURST_CNTL 0x0027 +#define mmSDMA4_RD_BURST_CNTL_BASE_IDX 1 +#define mmSDMA4_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA4_HBM_PAGE_CONFIG_BASE_IDX 1 +#define mmSDMA4_UCODE_CHECKSUM 0x0029 +#define mmSDMA4_UCODE_CHECKSUM_BASE_IDX 1 +#define mmSDMA4_F32_CNTL 0x002a +#define mmSDMA4_F32_CNTL_BASE_IDX 1 +#define mmSDMA4_FREEZE 0x002b +#define mmSDMA4_FREEZE_BASE_IDX 1 +#define mmSDMA4_PHASE0_QUANTUM 0x002c +#define mmSDMA4_PHASE0_QUANTUM_BASE_IDX 1 +#define mmSDMA4_PHASE1_QUANTUM 0x002d +#define mmSDMA4_PHASE1_QUANTUM_BASE_IDX 1 +#define mmSDMA4_EDC_CONFIG 0x0032 +#define mmSDMA4_EDC_CONFIG_BASE_IDX 1 +#define mmSDMA4_BA_THRESHOLD 0x0033 +#define mmSDMA4_BA_THRESHOLD_BASE_IDX 1 +#define mmSDMA4_ID 0x0034 +#define mmSDMA4_ID_BASE_IDX 1 +#define mmSDMA4_VERSION 0x0035 +#define mmSDMA4_VERSION_BASE_IDX 1 +#define mmSDMA4_EDC_COUNTER 0x0036 +#define mmSDMA4_EDC_COUNTER_BASE_IDX 1 +#define mmSDMA4_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA4_EDC_COUNTER_CLEAR_BASE_IDX 1 +#define mmSDMA4_STATUS2_REG 0x0038 +#define mmSDMA4_STATUS2_REG_BASE_IDX 1 +#define mmSDMA4_ATOMIC_CNTL 0x0039 +#define mmSDMA4_ATOMIC_CNTL_BASE_IDX 1 +#define mmSDMA4_ATOMIC_PREOP_LO 0x003a +#define mmSDMA4_ATOMIC_PREOP_LO_BASE_IDX 1 +#define mmSDMA4_ATOMIC_PREOP_HI 0x003b +#define mmSDMA4_ATOMIC_PREOP_HI_BASE_IDX 1 +#define mmSDMA4_UTCL1_CNTL 0x003c +#define mmSDMA4_UTCL1_CNTL_BASE_IDX 1 +#define mmSDMA4_UTCL1_WATERMK 0x003d +#define mmSDMA4_UTCL1_WATERMK_BASE_IDX 1 +#define mmSDMA4_UTCL1_RD_STATUS 0x003e +#define mmSDMA4_UTCL1_RD_STATUS_BASE_IDX 1 +#define mmSDMA4_UTCL1_WR_STATUS 0x003f +#define mmSDMA4_UTCL1_WR_STATUS_BASE_IDX 1 +#define mmSDMA4_UTCL1_INV0 0x0040 +#define mmSDMA4_UTCL1_INV0_BASE_IDX 1 +#define mmSDMA4_UTCL1_INV1 0x0041 +#define mmSDMA4_UTCL1_INV1_BASE_IDX 1 +#define mmSDMA4_UTCL1_INV2 0x0042 +#define mmSDMA4_UTCL1_INV2_BASE_IDX 1 +#define mmSDMA4_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA4_UTCL1_RD_XNACK0_BASE_IDX 1 +#define mmSDMA4_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA4_UTCL1_RD_XNACK1_BASE_IDX 1 +#define mmSDMA4_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA4_UTCL1_WR_XNACK0_BASE_IDX 1 +#define mmSDMA4_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA4_UTCL1_WR_XNACK1_BASE_IDX 1 +#define mmSDMA4_UTCL1_TIMEOUT 0x0047 +#define mmSDMA4_UTCL1_TIMEOUT_BASE_IDX 1 +#define mmSDMA4_UTCL1_PAGE 0x0048 +#define mmSDMA4_UTCL1_PAGE_BASE_IDX 1 +#define mmSDMA4_POWER_CNTL_IDLE 0x0049 +#define mmSDMA4_POWER_CNTL_IDLE_BASE_IDX 1 +#define mmSDMA4_RELAX_ORDERING_LUT 0x004a +#define mmSDMA4_RELAX_ORDERING_LUT_BASE_IDX 1 +#define mmSDMA4_CHICKEN_BITS_2 0x004b +#define mmSDMA4_CHICKEN_BITS_2_BASE_IDX 1 +#define mmSDMA4_STATUS3_REG 0x004c +#define mmSDMA4_STATUS3_REG_BASE_IDX 1 +#define mmSDMA4_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA4_PHYSICAL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA4_PHYSICAL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_PHASE2_QUANTUM 0x004f +#define mmSDMA4_PHASE2_QUANTUM_BASE_IDX 1 +#define mmSDMA4_ERROR_LOG 0x0050 +#define mmSDMA4_ERROR_LOG_BASE_IDX 1 +#define mmSDMA4_PUB_DUMMY_REG0 0x0051 +#define mmSDMA4_PUB_DUMMY_REG0_BASE_IDX 1 +#define mmSDMA4_PUB_DUMMY_REG1 0x0052 +#define mmSDMA4_PUB_DUMMY_REG1_BASE_IDX 1 +#define mmSDMA4_PUB_DUMMY_REG2 0x0053 +#define mmSDMA4_PUB_DUMMY_REG2_BASE_IDX 1 +#define mmSDMA4_PUB_DUMMY_REG3 0x0054 +#define mmSDMA4_PUB_DUMMY_REG3_BASE_IDX 1 +#define mmSDMA4_F32_COUNTER 0x0055 +#define mmSDMA4_F32_COUNTER_BASE_IDX 1 +#define mmSDMA4_UNBREAKABLE 0x0056 +#define mmSDMA4_UNBREAKABLE_BASE_IDX 1 +#define mmSDMA4_PERFMON_CNTL 0x0057 +#define mmSDMA4_PERFMON_CNTL_BASE_IDX 1 +#define mmSDMA4_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA4_PERFCOUNTER0_RESULT_BASE_IDX 1 +#define mmSDMA4_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA4_PERFCOUNTER1_RESULT_BASE_IDX 1 +#define mmSDMA4_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA4_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1 +#define mmSDMA4_CRD_CNTL 0x005b +#define mmSDMA4_CRD_CNTL_BASE_IDX 1 +#define mmSDMA4_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA4_GPU_IOV_VIOLATION_LOG_BASE_IDX 1 +#define mmSDMA4_ULV_CNTL 0x005e +#define mmSDMA4_ULV_CNTL_BASE_IDX 1 +#define mmSDMA4_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA4_EA_DBIT_ADDR_DATA_BASE_IDX 1 +#define mmSDMA4_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA4_EA_DBIT_ADDR_INDEX_BASE_IDX 1 +#define mmSDMA4_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA4_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1 +#define mmSDMA4_GFX_RB_CNTL 0x0080 +#define mmSDMA4_GFX_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_GFX_RB_BASE 0x0081 +#define mmSDMA4_GFX_RB_BASE_BASE_IDX 1 +#define mmSDMA4_GFX_RB_BASE_HI 0x0082 +#define mmSDMA4_GFX_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_GFX_RB_RPTR 0x0083 +#define mmSDMA4_GFX_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA4_GFX_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_GFX_RB_WPTR 0x0085 +#define mmSDMA4_GFX_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA4_GFX_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA4_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA4_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA4_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_GFX_IB_CNTL 0x008a +#define mmSDMA4_GFX_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_GFX_IB_RPTR 0x008b +#define mmSDMA4_GFX_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_GFX_IB_OFFSET 0x008c +#define mmSDMA4_GFX_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_GFX_IB_BASE_LO 0x008d +#define mmSDMA4_GFX_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_GFX_IB_BASE_HI 0x008e +#define mmSDMA4_GFX_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_GFX_IB_SIZE 0x008f +#define mmSDMA4_GFX_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_GFX_SKIP_CNTL 0x0090 +#define mmSDMA4_GFX_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA4_GFX_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_GFX_DOORBELL 0x0092 +#define mmSDMA4_GFX_DOORBELL_BASE_IDX 1 +#define mmSDMA4_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA4_GFX_CONTEXT_CNTL_BASE_IDX 1 +#define mmSDMA4_GFX_STATUS 0x00a8 +#define mmSDMA4_GFX_STATUS_BASE_IDX 1 +#define mmSDMA4_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA4_GFX_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_GFX_WATERMARK 0x00aa +#define mmSDMA4_GFX_WATERMARK_BASE_IDX 1 +#define mmSDMA4_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA4_GFX_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA4_GFX_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA4_GFX_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA4_GFX_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_GFX_PREEMPT 0x00b0 +#define mmSDMA4_GFX_PREEMPT_BASE_IDX 1 +#define mmSDMA4_GFX_DUMMY_REG 0x00b1 +#define mmSDMA4_GFX_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA4_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA4_GFX_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA4_GFX_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA4_GFX_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA4_GFX_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA4_GFX_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA4_GFX_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA4_GFX_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA4_GFX_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA4_GFX_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA4_GFX_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA4_GFX_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA4_GFX_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_CNTL 0x00d8 +#define mmSDMA4_PAGE_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_BASE 0x00d9 +#define mmSDMA4_PAGE_RB_BASE_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_BASE_HI 0x00da +#define mmSDMA4_PAGE_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_RPTR 0x00db +#define mmSDMA4_PAGE_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA4_PAGE_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_WPTR 0x00dd +#define mmSDMA4_PAGE_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA4_PAGE_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA4_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA4_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA4_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_PAGE_IB_CNTL 0x00e2 +#define mmSDMA4_PAGE_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_PAGE_IB_RPTR 0x00e3 +#define mmSDMA4_PAGE_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA4_PAGE_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA4_PAGE_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA4_PAGE_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_PAGE_IB_SIZE 0x00e7 +#define mmSDMA4_PAGE_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA4_PAGE_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA4_PAGE_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_PAGE_DOORBELL 0x00ea +#define mmSDMA4_PAGE_DOORBELL_BASE_IDX 1 +#define mmSDMA4_PAGE_STATUS 0x0100 +#define mmSDMA4_PAGE_STATUS_BASE_IDX 1 +#define mmSDMA4_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA4_PAGE_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_PAGE_WATERMARK 0x0102 +#define mmSDMA4_PAGE_WATERMARK_BASE_IDX 1 +#define mmSDMA4_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA4_PAGE_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA4_PAGE_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA4_PAGE_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA4_PAGE_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_PAGE_PREEMPT 0x0108 +#define mmSDMA4_PAGE_PREEMPT_BASE_IDX 1 +#define mmSDMA4_PAGE_DUMMY_REG 0x0109 +#define mmSDMA4_PAGE_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA4_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA4_PAGE_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA4_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA4_PAGE_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA4_PAGE_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA4_PAGE_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA4_PAGE_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA4_PAGE_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA4_PAGE_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA4_PAGE_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA4_PAGE_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA4_PAGE_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA4_PAGE_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_CNTL 0x0130 +#define mmSDMA4_RLC0_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_BASE 0x0131 +#define mmSDMA4_RLC0_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA4_RLC0_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_RPTR 0x0133 +#define mmSDMA4_RLC0_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA4_RLC0_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_WPTR 0x0135 +#define mmSDMA4_RLC0_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA4_RLC0_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA4_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA4_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA4_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC0_IB_CNTL 0x013a +#define mmSDMA4_RLC0_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC0_IB_RPTR 0x013b +#define mmSDMA4_RLC0_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC0_IB_OFFSET 0x013c +#define mmSDMA4_RLC0_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC0_IB_BASE_LO 0x013d +#define mmSDMA4_RLC0_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC0_IB_BASE_HI 0x013e +#define mmSDMA4_RLC0_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC0_IB_SIZE 0x013f +#define mmSDMA4_RLC0_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA4_RLC0_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA4_RLC0_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC0_DOORBELL 0x0142 +#define mmSDMA4_RLC0_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC0_STATUS 0x0158 +#define mmSDMA4_RLC0_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA4_RLC0_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC0_WATERMARK 0x015a +#define mmSDMA4_RLC0_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA4_RLC0_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA4_RLC0_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA4_RLC0_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA4_RLC0_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC0_PREEMPT 0x0160 +#define mmSDMA4_RLC0_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC0_DUMMY_REG 0x0161 +#define mmSDMA4_RLC0_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA4_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA4_RLC0_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA4_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA4_RLC0_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA4_RLC0_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA4_RLC0_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA4_RLC0_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA4_RLC0_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA4_RLC0_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA4_RLC0_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA4_RLC0_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA4_RLC0_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA4_RLC0_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_CNTL 0x0188 +#define mmSDMA4_RLC1_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_BASE 0x0189 +#define mmSDMA4_RLC1_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_BASE_HI 0x018a +#define mmSDMA4_RLC1_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_RPTR 0x018b +#define mmSDMA4_RLC1_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA4_RLC1_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_WPTR 0x018d +#define mmSDMA4_RLC1_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA4_RLC1_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA4_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA4_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA4_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC1_IB_CNTL 0x0192 +#define mmSDMA4_RLC1_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC1_IB_RPTR 0x0193 +#define mmSDMA4_RLC1_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC1_IB_OFFSET 0x0194 +#define mmSDMA4_RLC1_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA4_RLC1_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA4_RLC1_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC1_IB_SIZE 0x0197 +#define mmSDMA4_RLC1_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA4_RLC1_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA4_RLC1_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC1_DOORBELL 0x019a +#define mmSDMA4_RLC1_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC1_STATUS 0x01b0 +#define mmSDMA4_RLC1_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA4_RLC1_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC1_WATERMARK 0x01b2 +#define mmSDMA4_RLC1_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA4_RLC1_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA4_RLC1_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA4_RLC1_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA4_RLC1_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC1_PREEMPT 0x01b8 +#define mmSDMA4_RLC1_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA4_RLC1_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA4_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA4_RLC1_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA4_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA4_RLC1_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA4_RLC1_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA4_RLC1_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA4_RLC1_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA4_RLC1_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA4_RLC1_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA4_RLC1_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA4_RLC1_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA4_RLC1_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA4_RLC1_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_CNTL 0x01e0 +#define mmSDMA4_RLC2_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_BASE 0x01e1 +#define mmSDMA4_RLC2_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA4_RLC2_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_RPTR 0x01e3 +#define mmSDMA4_RLC2_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA4_RLC2_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_WPTR 0x01e5 +#define mmSDMA4_RLC2_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA4_RLC2_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA4_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA4_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA4_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC2_IB_CNTL 0x01ea +#define mmSDMA4_RLC2_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC2_IB_RPTR 0x01eb +#define mmSDMA4_RLC2_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC2_IB_OFFSET 0x01ec +#define mmSDMA4_RLC2_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA4_RLC2_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA4_RLC2_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC2_IB_SIZE 0x01ef +#define mmSDMA4_RLC2_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA4_RLC2_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA4_RLC2_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC2_DOORBELL 0x01f2 +#define mmSDMA4_RLC2_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC2_STATUS 0x0208 +#define mmSDMA4_RLC2_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA4_RLC2_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC2_WATERMARK 0x020a +#define mmSDMA4_RLC2_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA4_RLC2_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA4_RLC2_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA4_RLC2_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA4_RLC2_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC2_PREEMPT 0x0210 +#define mmSDMA4_RLC2_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC2_DUMMY_REG 0x0211 +#define mmSDMA4_RLC2_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA4_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA4_RLC2_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA4_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA4_RLC2_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA4_RLC2_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA4_RLC2_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA4_RLC2_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA4_RLC2_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA4_RLC2_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA4_RLC2_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA4_RLC2_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA4_RLC2_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA4_RLC2_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_CNTL 0x0238 +#define mmSDMA4_RLC3_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_BASE 0x0239 +#define mmSDMA4_RLC3_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_BASE_HI 0x023a +#define mmSDMA4_RLC3_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_RPTR 0x023b +#define mmSDMA4_RLC3_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA4_RLC3_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_WPTR 0x023d +#define mmSDMA4_RLC3_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA4_RLC3_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA4_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA4_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA4_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC3_IB_CNTL 0x0242 +#define mmSDMA4_RLC3_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC3_IB_RPTR 0x0243 +#define mmSDMA4_RLC3_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC3_IB_OFFSET 0x0244 +#define mmSDMA4_RLC3_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA4_RLC3_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA4_RLC3_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC3_IB_SIZE 0x0247 +#define mmSDMA4_RLC3_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA4_RLC3_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA4_RLC3_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC3_DOORBELL 0x024a +#define mmSDMA4_RLC3_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC3_STATUS 0x0260 +#define mmSDMA4_RLC3_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA4_RLC3_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC3_WATERMARK 0x0262 +#define mmSDMA4_RLC3_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA4_RLC3_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA4_RLC3_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA4_RLC3_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA4_RLC3_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC3_PREEMPT 0x0268 +#define mmSDMA4_RLC3_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC3_DUMMY_REG 0x0269 +#define mmSDMA4_RLC3_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA4_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA4_RLC3_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA4_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA4_RLC3_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA4_RLC3_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA4_RLC3_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA4_RLC3_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA4_RLC3_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA4_RLC3_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA4_RLC3_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA4_RLC3_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA4_RLC3_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA4_RLC3_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_CNTL 0x0290 +#define mmSDMA4_RLC4_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_BASE 0x0291 +#define mmSDMA4_RLC4_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA4_RLC4_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_RPTR 0x0293 +#define mmSDMA4_RLC4_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA4_RLC4_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_WPTR 0x0295 +#define mmSDMA4_RLC4_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA4_RLC4_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA4_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA4_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA4_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC4_IB_CNTL 0x029a +#define mmSDMA4_RLC4_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC4_IB_RPTR 0x029b +#define mmSDMA4_RLC4_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC4_IB_OFFSET 0x029c +#define mmSDMA4_RLC4_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC4_IB_BASE_LO 0x029d +#define mmSDMA4_RLC4_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC4_IB_BASE_HI 0x029e +#define mmSDMA4_RLC4_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC4_IB_SIZE 0x029f +#define mmSDMA4_RLC4_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA4_RLC4_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA4_RLC4_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC4_DOORBELL 0x02a2 +#define mmSDMA4_RLC4_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC4_STATUS 0x02b8 +#define mmSDMA4_RLC4_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA4_RLC4_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC4_WATERMARK 0x02ba +#define mmSDMA4_RLC4_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA4_RLC4_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA4_RLC4_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA4_RLC4_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA4_RLC4_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC4_PREEMPT 0x02c0 +#define mmSDMA4_RLC4_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA4_RLC4_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA4_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA4_RLC4_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA4_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA4_RLC4_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA4_RLC4_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA4_RLC4_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA4_RLC4_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA4_RLC4_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA4_RLC4_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA4_RLC4_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA4_RLC4_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA4_RLC4_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA4_RLC4_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_CNTL 0x02e8 +#define mmSDMA4_RLC5_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_BASE 0x02e9 +#define mmSDMA4_RLC5_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA4_RLC5_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_RPTR 0x02eb +#define mmSDMA4_RLC5_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA4_RLC5_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_WPTR 0x02ed +#define mmSDMA4_RLC5_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA4_RLC5_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA4_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA4_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA4_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC5_IB_CNTL 0x02f2 +#define mmSDMA4_RLC5_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC5_IB_RPTR 0x02f3 +#define mmSDMA4_RLC5_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA4_RLC5_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA4_RLC5_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA4_RLC5_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC5_IB_SIZE 0x02f7 +#define mmSDMA4_RLC5_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA4_RLC5_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA4_RLC5_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC5_DOORBELL 0x02fa +#define mmSDMA4_RLC5_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC5_STATUS 0x0310 +#define mmSDMA4_RLC5_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA4_RLC5_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC5_WATERMARK 0x0312 +#define mmSDMA4_RLC5_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA4_RLC5_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA4_RLC5_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA4_RLC5_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA4_RLC5_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC5_PREEMPT 0x0318 +#define mmSDMA4_RLC5_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC5_DUMMY_REG 0x0319 +#define mmSDMA4_RLC5_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA4_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA4_RLC5_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA4_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA4_RLC5_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA4_RLC5_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA4_RLC5_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA4_RLC5_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA4_RLC5_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA4_RLC5_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA4_RLC5_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA4_RLC5_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA4_RLC5_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA4_RLC5_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_CNTL 0x0340 +#define mmSDMA4_RLC6_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_BASE 0x0341 +#define mmSDMA4_RLC6_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA4_RLC6_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_RPTR 0x0343 +#define mmSDMA4_RLC6_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA4_RLC6_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_WPTR 0x0345 +#define mmSDMA4_RLC6_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA4_RLC6_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA4_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA4_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA4_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC6_IB_CNTL 0x034a +#define mmSDMA4_RLC6_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC6_IB_RPTR 0x034b +#define mmSDMA4_RLC6_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC6_IB_OFFSET 0x034c +#define mmSDMA4_RLC6_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC6_IB_BASE_LO 0x034d +#define mmSDMA4_RLC6_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC6_IB_BASE_HI 0x034e +#define mmSDMA4_RLC6_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC6_IB_SIZE 0x034f +#define mmSDMA4_RLC6_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA4_RLC6_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA4_RLC6_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC6_DOORBELL 0x0352 +#define mmSDMA4_RLC6_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC6_STATUS 0x0368 +#define mmSDMA4_RLC6_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA4_RLC6_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC6_WATERMARK 0x036a +#define mmSDMA4_RLC6_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA4_RLC6_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA4_RLC6_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA4_RLC6_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA4_RLC6_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC6_PREEMPT 0x0370 +#define mmSDMA4_RLC6_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC6_DUMMY_REG 0x0371 +#define mmSDMA4_RLC6_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA4_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA4_RLC6_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA4_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA4_RLC6_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA4_RLC6_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA4_RLC6_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA4_RLC6_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA4_RLC6_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA4_RLC6_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA4_RLC6_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA4_RLC6_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA4_RLC6_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA4_RLC6_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_CNTL 0x0398 +#define mmSDMA4_RLC7_RB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_BASE 0x0399 +#define mmSDMA4_RLC7_RB_BASE_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_BASE_HI 0x039a +#define mmSDMA4_RLC7_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_RPTR 0x039b +#define mmSDMA4_RLC7_RB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA4_RLC7_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_WPTR 0x039d +#define mmSDMA4_RLC7_RB_WPTR_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA4_RLC7_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA4_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA4_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA4_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC7_IB_CNTL 0x03a2 +#define mmSDMA4_RLC7_IB_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC7_IB_RPTR 0x03a3 +#define mmSDMA4_RLC7_IB_RPTR_BASE_IDX 1 +#define mmSDMA4_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA4_RLC7_IB_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA4_RLC7_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA4_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA4_RLC7_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA4_RLC7_IB_SIZE 0x03a7 +#define mmSDMA4_RLC7_IB_SIZE_BASE_IDX 1 +#define mmSDMA4_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA4_RLC7_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA4_RLC7_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC7_DOORBELL 0x03aa +#define mmSDMA4_RLC7_DOORBELL_BASE_IDX 1 +#define mmSDMA4_RLC7_STATUS 0x03c0 +#define mmSDMA4_RLC7_STATUS_BASE_IDX 1 +#define mmSDMA4_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA4_RLC7_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA4_RLC7_WATERMARK 0x03c2 +#define mmSDMA4_RLC7_WATERMARK_BASE_IDX 1 +#define mmSDMA4_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA4_RLC7_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA4_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA4_RLC7_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA4_RLC7_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA4_RLC7_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA4_RLC7_PREEMPT 0x03c8 +#define mmSDMA4_RLC7_PREEMPT_BASE_IDX 1 +#define mmSDMA4_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA4_RLC7_DUMMY_REG_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA4_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA4_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA4_RLC7_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA4_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA4_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA4_RLC7_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA4_RLC7_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA4_RLC7_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA4_RLC7_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA4_RLC7_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA4_RLC7_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA4_RLC7_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA4_RLC7_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA4_RLC7_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA4_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA4_RLC7_MIDCMD_CNTL_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h new file mode 100644 index 000000000000..2cc510913214 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h @@ -0,0 +1,2956 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma4_4_2_2_SH_MASK_HEADER +#define _sdma4_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma4_sdma4dec +//SDMA4_UCODE_ADDR +#define SDMA4_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA4_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA4_UCODE_DATA +#define SDMA4_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA4_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA4_VM_CNTL +#define SDMA4_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA4_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA4_VM_CTX_LO +#define SDMA4_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA4_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_VM_CTX_HI +#define SDMA4_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA4_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_ACTIVE_FCN_ID +#define SDMA4_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA4_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA4_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA4_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA4_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA4_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA4_VM_CTX_CNTL +#define SDMA4_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA4_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA4_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA4_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA4_VIRT_RESET_REQ +#define SDMA4_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA4_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA4_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA4_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA4_VF_ENABLE +#define SDMA4_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA4_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA4_CONTEXT_REG_TYPE0 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE__SHIFT 0x1 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_CNTL__SHIFT 0xa +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_RPTR__SHIFT 0xb +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_SIZE__SHIFT 0xf +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_DOORBELL__SHIFT 0x12 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE_MASK 0x00000002L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_DOORBELL_MASK 0x00040000L +#define SDMA4_CONTEXT_REG_TYPE0__SDMA4_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA4_CONTEXT_REG_TYPE1 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_STATUS__SHIFT 0x8 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_WATERMARK__SHIFT 0xa +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA4_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_PREEMPT__SHIFT 0x10 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA4_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_STATUS_MASK 0x00000100L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_WATERMARK_MASK 0x00000400L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA4_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_PREEMPT_MASK 0x00010000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA4_CONTEXT_REG_TYPE1__SDMA4_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA4_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA4_CONTEXT_REG_TYPE2 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA4_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA4_CONTEXT_REG_TYPE2__SDMA4_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA4_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA4_CONTEXT_REG_TYPE3 +#define SDMA4_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA4_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA4_PUB_REG_TYPE0 +#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_ADDR__SHIFT 0x0 +#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_DATA__SHIFT 0x1 +#define SDMA4_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CNTL__SHIFT 0x4 +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_LO__SHIFT 0x5 +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_HI__SHIFT 0x6 +#define SDMA4_PUB_REG_TYPE0__SDMA4_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA4_PUB_REG_TYPE0__SDMA4_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA4_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA4_PUB_REG_TYPE0__SDMA4_MMHUB_CNTL__SHIFT 0x13 +#define SDMA4_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA4_PUB_REG_TYPE0__SDMA4_POWER_CNTL__SHIFT 0x1a +#define SDMA4_PUB_REG_TYPE0__SDMA4_CLK_CTRL__SHIFT 0x1b +#define SDMA4_PUB_REG_TYPE0__SDMA4_CNTL__SHIFT 0x1c +#define SDMA4_PUB_REG_TYPE0__SDMA4_CHICKEN_BITS__SHIFT 0x1d +#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_ADDR_MASK 0x00000001L +#define SDMA4_PUB_REG_TYPE0__SDMA4_UCODE_DATA_MASK 0x00000002L +#define SDMA4_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CNTL_MASK 0x00000010L +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_LO_MASK 0x00000020L +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_HI_MASK 0x00000040L +#define SDMA4_PUB_REG_TYPE0__SDMA4_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA4_PUB_REG_TYPE0__SDMA4_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA4_PUB_REG_TYPE0__SDMA4_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA4_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_MMHUB_CNTL_MASK 0x00080000L +#define SDMA4_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_POWER_CNTL_MASK 0x04000000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CLK_CTRL_MASK 0x08000000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CNTL_MASK 0x10000000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_CHICKEN_BITS_MASK 0x20000000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA4_PUB_REG_TYPE0__SDMA4_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA4_PUB_REG_TYPE1 +#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA4_PUB_REG_TYPE1__SDMA4_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA4_PUB_REG_TYPE1__SDMA4_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA4_PUB_REG_TYPE1__SDMA4_PROGRAM__SHIFT 0x4 +#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS_REG__SHIFT 0x5 +#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS1_REG__SHIFT 0x6 +#define SDMA4_PUB_REG_TYPE1__SDMA4_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA4_PUB_REG_TYPE1__SDMA4_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA4_PUB_REG_TYPE1__SDMA4_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA4_PUB_REG_TYPE1__SDMA4_F32_CNTL__SHIFT 0xa +#define SDMA4_PUB_REG_TYPE1__SDMA4_FREEZE__SHIFT 0xb +#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA4_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_CONFIG__SHIFT 0x12 +#define SDMA4_PUB_REG_TYPE1__SDMA4_BA_THRESHOLD__SHIFT 0x13 +#define SDMA4_PUB_REG_TYPE1__SDMA4_ID__SHIFT 0x14 +#define SDMA4_PUB_REG_TYPE1__SDMA4_VERSION__SHIFT 0x15 +#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER__SHIFT 0x16 +#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS2_REG__SHIFT 0x18 +#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_CNTL__SHIFT 0x1c +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA4_PUB_REG_TYPE1__SDMA4_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA4_PUB_REG_TYPE1__SDMA4_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA4_PUB_REG_TYPE1__SDMA4_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA4_PUB_REG_TYPE1__SDMA4_PROGRAM_MASK 0x00000010L +#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS_REG_MASK 0x00000020L +#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS1_REG_MASK 0x00000040L +#define SDMA4_PUB_REG_TYPE1__SDMA4_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA4_PUB_REG_TYPE1__SDMA4_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA4_PUB_REG_TYPE1__SDMA4_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA4_PUB_REG_TYPE1__SDMA4_F32_CNTL_MASK 0x00000400L +#define SDMA4_PUB_REG_TYPE1__SDMA4_FREEZE_MASK 0x00000800L +#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA4_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA4_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_CONFIG_MASK 0x00040000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_BA_THRESHOLD_MASK 0x00080000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_ID_MASK 0x00100000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_VERSION_MASK 0x00200000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER_MASK 0x00400000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_STATUS2_REG_MASK 0x01000000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_CNTL_MASK 0x10000000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA4_PUB_REG_TYPE1__SDMA4_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA4_PUB_REG_TYPE2 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV0__SHIFT 0x0 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV1__SHIFT 0x1 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV2__SHIFT 0x2 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_PAGE__SHIFT 0x8 +#define SDMA4_PUB_REG_TYPE2__SDMA4_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA4_PUB_REG_TYPE2__SDMA4_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA4_PUB_REG_TYPE2__SDMA4_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA4_PUB_REG_TYPE2__SDMA4_STATUS3_REG__SHIFT 0xc +#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA4_PUB_REG_TYPE2__SDMA4_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA4_PUB_REG_TYPE2__SDMA4_ERROR_LOG__SHIFT 0x10 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA4_PUB_REG_TYPE2__SDMA4_F32_COUNTER__SHIFT 0x15 +#define SDMA4_PUB_REG_TYPE2__SDMA4_UNBREAKABLE__SHIFT 0x16 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFMON_CNTL__SHIFT 0x17 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA4_PUB_REG_TYPE2__SDMA4_CRD_CNTL__SHIFT 0x1b +#define SDMA4_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA4_PUB_REG_TYPE2__SDMA4_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA4_PUB_REG_TYPE2__SDMA4_ULV_CNTL__SHIFT 0x1e +#define SDMA4_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV0_MASK 0x00000001L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV1_MASK 0x00000002L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_INV2_MASK 0x00000004L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UTCL1_PAGE_MASK 0x00000100L +#define SDMA4_PUB_REG_TYPE2__SDMA4_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA4_PUB_REG_TYPE2__SDMA4_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA4_PUB_REG_TYPE2__SDMA4_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA4_PUB_REG_TYPE2__SDMA4_STATUS3_REG_MASK 0x00001000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_ERROR_LOG_MASK 0x00010000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_F32_COUNTER_MASK 0x00200000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_UNBREAKABLE_MASK 0x00400000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFMON_CNTL_MASK 0x00800000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_CRD_CNTL_MASK 0x08000000L +#define SDMA4_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA4_PUB_REG_TYPE2__SDMA4_ULV_CNTL_MASK 0x40000000L +#define SDMA4_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA4_PUB_REG_TYPE3 +#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA4_PUB_REG_TYPE3__SDMA4_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA4_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA4_PUB_REG_TYPE3__SDMA4_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA4_PUB_REG_TYPE3__SDMA4_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA4_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA4_MMHUB_CNTL +#define SDMA4_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA4_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA4_CONTEXT_GROUP_BOUNDARY +#define SDMA4_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA4_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA4_POWER_CNTL +#define SDMA4_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA4_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA4_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA4_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA4_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA4_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA4_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA4_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA4_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA4_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA4_CLK_CTRL +#define SDMA4_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA4_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA4_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA4_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA4_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA4_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA4_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA4_CNTL +#define SDMA4_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA4_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA4_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA4_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA4_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA4_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA4_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA4_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA4_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA4_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA4_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA4_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA4_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA4_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA4_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA4_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA4_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA4_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA4_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA4_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA4_CHICKEN_BITS +#define SDMA4_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA4_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA4_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA4_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA4_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA4_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA4_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA4_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA4_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA4_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA4_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA4_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA4_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA4_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA4_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA4_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA4_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA4_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA4_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA4_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA4_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA4_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA4_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA4_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA4_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA4_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA4_GB_ADDR_CONFIG +#define SDMA4_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA4_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA4_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA4_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA4_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA4_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA4_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA4_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA4_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA4_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA4_GB_ADDR_CONFIG_READ +#define SDMA4_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA4_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA4_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA4_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA4_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA4_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA4_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA4_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA4_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA4_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA4_RB_RPTR_FETCH_HI +#define SDMA4_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA4_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA4_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA4_RB_RPTR_FETCH +#define SDMA4_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA4_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA4_IB_OFFSET_FETCH +#define SDMA4_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA4_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA4_PROGRAM +#define SDMA4_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA4_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA4_STATUS_REG +#define SDMA4_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA4_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA4_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA4_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA4_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA4_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA4_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA4_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA4_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA4_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA4_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA4_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA4_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA4_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA4_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA4_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA4_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA4_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA4_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA4_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA4_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA4_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA4_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA4_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA4_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA4_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA4_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA4_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA4_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA4_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA4_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA4_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA4_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA4_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA4_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA4_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA4_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA4_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA4_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA4_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA4_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA4_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA4_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA4_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA4_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA4_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA4_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA4_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA4_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA4_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA4_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA4_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA4_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA4_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA4_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA4_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA4_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA4_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA4_STATUS1_REG +#define SDMA4_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA4_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA4_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA4_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA4_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA4_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA4_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA4_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA4_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA4_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA4_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA4_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA4_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA4_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA4_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA4_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA4_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA4_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA4_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA4_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA4_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA4_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA4_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA4_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA4_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA4_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA4_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA4_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA4_RD_BURST_CNTL +#define SDMA4_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA4_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA4_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA4_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA4_HBM_PAGE_CONFIG +#define SDMA4_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA4_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA4_UCODE_CHECKSUM +#define SDMA4_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA4_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA4_F32_CNTL +#define SDMA4_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA4_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA4_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA4_F32_CNTL__STEP_MASK 0x00000002L +//SDMA4_FREEZE +#define SDMA4_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA4_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA4_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA4_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA4_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA4_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA4_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA4_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA4_PHASE0_QUANTUM +#define SDMA4_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA4_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA4_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA4_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA4_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA4_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA4_PHASE1_QUANTUM +#define SDMA4_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA4_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA4_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA4_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA4_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA4_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA4_EDC_CONFIG +#define SDMA4_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA4_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA4_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA4_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA4_BA_THRESHOLD +#define SDMA4_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA4_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA4_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA4_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA4_ID +#define SDMA4_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA4_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA4_VERSION +#define SDMA4_VERSION__MINVER__SHIFT 0x0 +#define SDMA4_VERSION__MAJVER__SHIFT 0x8 +#define SDMA4_VERSION__REV__SHIFT 0x10 +#define SDMA4_VERSION__MINVER_MASK 0x0000007FL +#define SDMA4_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA4_VERSION__REV_MASK 0x003F0000L +//SDMA4_EDC_COUNTER +#define SDMA4_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA4_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA4_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA4_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA4_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA4_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA4_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA4_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA4_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA4_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA4_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA4_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA4_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA4_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA4_EDC_COUNTER_CLEAR +#define SDMA4_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA4_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA4_STATUS2_REG +#define SDMA4_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA4_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA4_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA4_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA4_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA4_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA4_ATOMIC_CNTL +#define SDMA4_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA4_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA4_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA4_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA4_ATOMIC_PREOP_LO +#define SDMA4_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA4_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA4_ATOMIC_PREOP_HI +#define SDMA4_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA4_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA4_UTCL1_CNTL +#define SDMA4_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA4_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA4_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA4_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA4_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA4_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA4_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA4_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA4_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA4_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA4_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA4_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA4_UTCL1_WATERMK +#define SDMA4_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA4_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA4_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA4_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA4_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA4_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA4_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA4_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA4_UTCL1_RD_STATUS +#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA4_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA4_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA4_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA4_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA4_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA4_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA4_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA4_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA4_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA4_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA4_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA4_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA4_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA4_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA4_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA4_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA4_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA4_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA4_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA4_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA4_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA4_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA4_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA4_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA4_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA4_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA4_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA4_UTCL1_WR_STATUS +#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA4_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA4_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA4_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA4_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA4_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA4_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA4_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA4_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA4_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA4_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA4_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA4_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA4_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA4_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA4_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA4_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA4_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA4_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA4_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA4_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA4_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA4_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA4_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA4_UTCL1_INV0 +#define SDMA4_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA4_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA4_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA4_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA4_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA4_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA4_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA4_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA4_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA4_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA4_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA4_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA4_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA4_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA4_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA4_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA4_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA4_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA4_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA4_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA4_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA4_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA4_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA4_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA4_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA4_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA4_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA4_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA4_UTCL1_INV1 +#define SDMA4_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA4_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA4_UTCL1_INV2 +#define SDMA4_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA4_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA4_UTCL1_RD_XNACK0 +#define SDMA4_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA4_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA4_UTCL1_RD_XNACK1 +#define SDMA4_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA4_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA4_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA4_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA4_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA4_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA4_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA4_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA4_UTCL1_WR_XNACK0 +#define SDMA4_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA4_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA4_UTCL1_WR_XNACK1 +#define SDMA4_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA4_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA4_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA4_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA4_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA4_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA4_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA4_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA4_UTCL1_TIMEOUT +#define SDMA4_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA4_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA4_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA4_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA4_UTCL1_PAGE +#define SDMA4_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA4_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA4_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA4_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA4_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA4_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA4_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA4_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA4_POWER_CNTL_IDLE +#define SDMA4_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA4_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA4_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA4_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA4_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA4_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA4_RELAX_ORDERING_LUT +#define SDMA4_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA4_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA4_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA4_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA4_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA4_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA4_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA4_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA4_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA4_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA4_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA4_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA4_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA4_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA4_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA4_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA4_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA4_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA4_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA4_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA4_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA4_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA4_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA4_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA4_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA4_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA4_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA4_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA4_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA4_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA4_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA4_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA4_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA4_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA4_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA4_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA4_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA4_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA4_CHICKEN_BITS_2 +#define SDMA4_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA4_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA4_STATUS3_REG +#define SDMA4_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA4_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA4_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA4_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA4_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA4_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA4_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA4_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA4_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA4_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA4_PHYSICAL_ADDR_LO +#define SDMA4_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA4_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA4_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA4_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA4_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA4_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA4_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA4_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA4_PHYSICAL_ADDR_HI +#define SDMA4_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA4_PHASE2_QUANTUM +#define SDMA4_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA4_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA4_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA4_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA4_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA4_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA4_ERROR_LOG +#define SDMA4_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA4_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA4_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA4_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA4_PUB_DUMMY_REG0 +#define SDMA4_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA4_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA4_PUB_DUMMY_REG1 +#define SDMA4_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA4_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA4_PUB_DUMMY_REG2 +#define SDMA4_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA4_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA4_PUB_DUMMY_REG3 +#define SDMA4_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA4_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA4_F32_COUNTER +#define SDMA4_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA4_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA4_UNBREAKABLE +#define SDMA4_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA4_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA4_PERFMON_CNTL +#define SDMA4_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA4_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA4_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA4_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA4_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA4_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA4_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA4_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA4_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA4_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA4_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA4_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA4_PERFCOUNTER0_RESULT +#define SDMA4_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA4_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA4_PERFCOUNTER1_RESULT +#define SDMA4_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA4_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA4_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA4_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA4_CRD_CNTL +#define SDMA4_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA4_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA4_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA4_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA4_GPU_IOV_VIOLATION_LOG +#define SDMA4_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA4_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA4_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA4_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA4_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA4_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA4_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA4_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA4_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA4_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA4_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA4_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA4_ULV_CNTL +#define SDMA4_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA4_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA4_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA4_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA4_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA4_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA4_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA4_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA4_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA4_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA4_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA4_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA4_EA_DBIT_ADDR_DATA +#define SDMA4_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA4_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA4_EA_DBIT_ADDR_INDEX +#define SDMA4_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA4_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA4_GPU_IOV_VIOLATION_LOG2 +#define SDMA4_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA4_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA4_GFX_RB_CNTL +#define SDMA4_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_GFX_RB_BASE +#define SDMA4_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_BASE_HI +#define SDMA4_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_GFX_RB_RPTR +#define SDMA4_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_RPTR_HI +#define SDMA4_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_WPTR +#define SDMA4_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_WPTR_HI +#define SDMA4_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_WPTR_POLL_CNTL +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_GFX_RB_RPTR_ADDR_HI +#define SDMA4_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_RPTR_ADDR_LO +#define SDMA4_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_GFX_IB_CNTL +#define SDMA4_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_GFX_IB_RPTR +#define SDMA4_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_GFX_IB_OFFSET +#define SDMA4_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_GFX_IB_BASE_LO +#define SDMA4_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_GFX_IB_BASE_HI +#define SDMA4_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_GFX_IB_SIZE +#define SDMA4_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_GFX_SKIP_CNTL +#define SDMA4_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_GFX_CONTEXT_STATUS +#define SDMA4_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_GFX_DOORBELL +#define SDMA4_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_GFX_CONTEXT_CNTL +#define SDMA4_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA4_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA4_GFX_STATUS +#define SDMA4_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_GFX_DOORBELL_LOG +#define SDMA4_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_GFX_WATERMARK +#define SDMA4_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_GFX_DOORBELL_OFFSET +#define SDMA4_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_GFX_CSA_ADDR_LO +#define SDMA4_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_GFX_CSA_ADDR_HI +#define SDMA4_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_GFX_IB_SUB_REMAIN +#define SDMA4_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_GFX_PREEMPT +#define SDMA4_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_GFX_DUMMY_REG +#define SDMA4_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA4_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA4_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_GFX_RB_AQL_CNTL +#define SDMA4_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_GFX_MINOR_PTR_UPDATE +#define SDMA4_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_GFX_MIDCMD_DATA0 +#define SDMA4_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA1 +#define SDMA4_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA2 +#define SDMA4_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA3 +#define SDMA4_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA4 +#define SDMA4_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA5 +#define SDMA4_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA6 +#define SDMA4_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA7 +#define SDMA4_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_DATA8 +#define SDMA4_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_GFX_MIDCMD_CNTL +#define SDMA4_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_PAGE_RB_CNTL +#define SDMA4_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_PAGE_RB_BASE +#define SDMA4_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_BASE_HI +#define SDMA4_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_PAGE_RB_RPTR +#define SDMA4_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_RPTR_HI +#define SDMA4_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_WPTR +#define SDMA4_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_WPTR_HI +#define SDMA4_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_WPTR_POLL_CNTL +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_PAGE_RB_RPTR_ADDR_HI +#define SDMA4_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_RPTR_ADDR_LO +#define SDMA4_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_PAGE_IB_CNTL +#define SDMA4_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_PAGE_IB_RPTR +#define SDMA4_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_PAGE_IB_OFFSET +#define SDMA4_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_PAGE_IB_BASE_LO +#define SDMA4_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_PAGE_IB_BASE_HI +#define SDMA4_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_PAGE_IB_SIZE +#define SDMA4_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_PAGE_SKIP_CNTL +#define SDMA4_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_PAGE_CONTEXT_STATUS +#define SDMA4_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_PAGE_DOORBELL +#define SDMA4_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_PAGE_STATUS +#define SDMA4_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_PAGE_DOORBELL_LOG +#define SDMA4_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_PAGE_WATERMARK +#define SDMA4_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_PAGE_DOORBELL_OFFSET +#define SDMA4_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_PAGE_CSA_ADDR_LO +#define SDMA4_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_PAGE_CSA_ADDR_HI +#define SDMA4_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_PAGE_IB_SUB_REMAIN +#define SDMA4_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_PAGE_PREEMPT +#define SDMA4_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_PAGE_DUMMY_REG +#define SDMA4_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_PAGE_RB_AQL_CNTL +#define SDMA4_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_PAGE_MINOR_PTR_UPDATE +#define SDMA4_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_PAGE_MIDCMD_DATA0 +#define SDMA4_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA1 +#define SDMA4_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA2 +#define SDMA4_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA3 +#define SDMA4_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA4 +#define SDMA4_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA5 +#define SDMA4_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA6 +#define SDMA4_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA7 +#define SDMA4_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_DATA8 +#define SDMA4_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_PAGE_MIDCMD_CNTL +#define SDMA4_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC0_RB_CNTL +#define SDMA4_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC0_RB_BASE +#define SDMA4_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_BASE_HI +#define SDMA4_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC0_RB_RPTR +#define SDMA4_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_RPTR_HI +#define SDMA4_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_WPTR +#define SDMA4_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_WPTR_HI +#define SDMA4_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_WPTR_POLL_CNTL +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC0_RB_RPTR_ADDR_HI +#define SDMA4_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_RPTR_ADDR_LO +#define SDMA4_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC0_IB_CNTL +#define SDMA4_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC0_IB_RPTR +#define SDMA4_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC0_IB_OFFSET +#define SDMA4_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC0_IB_BASE_LO +#define SDMA4_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC0_IB_BASE_HI +#define SDMA4_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC0_IB_SIZE +#define SDMA4_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC0_SKIP_CNTL +#define SDMA4_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC0_CONTEXT_STATUS +#define SDMA4_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC0_DOORBELL +#define SDMA4_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC0_STATUS +#define SDMA4_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC0_DOORBELL_LOG +#define SDMA4_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC0_WATERMARK +#define SDMA4_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC0_DOORBELL_OFFSET +#define SDMA4_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC0_CSA_ADDR_LO +#define SDMA4_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC0_CSA_ADDR_HI +#define SDMA4_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC0_IB_SUB_REMAIN +#define SDMA4_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC0_PREEMPT +#define SDMA4_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC0_DUMMY_REG +#define SDMA4_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC0_RB_AQL_CNTL +#define SDMA4_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC0_MINOR_PTR_UPDATE +#define SDMA4_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC0_MIDCMD_DATA0 +#define SDMA4_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA1 +#define SDMA4_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA2 +#define SDMA4_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA3 +#define SDMA4_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA4 +#define SDMA4_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA5 +#define SDMA4_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA6 +#define SDMA4_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA7 +#define SDMA4_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_DATA8 +#define SDMA4_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC0_MIDCMD_CNTL +#define SDMA4_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC1_RB_CNTL +#define SDMA4_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC1_RB_BASE +#define SDMA4_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_BASE_HI +#define SDMA4_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC1_RB_RPTR +#define SDMA4_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_RPTR_HI +#define SDMA4_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_WPTR +#define SDMA4_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_WPTR_HI +#define SDMA4_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_WPTR_POLL_CNTL +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC1_RB_RPTR_ADDR_HI +#define SDMA4_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_RPTR_ADDR_LO +#define SDMA4_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC1_IB_CNTL +#define SDMA4_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC1_IB_RPTR +#define SDMA4_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC1_IB_OFFSET +#define SDMA4_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC1_IB_BASE_LO +#define SDMA4_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC1_IB_BASE_HI +#define SDMA4_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC1_IB_SIZE +#define SDMA4_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC1_SKIP_CNTL +#define SDMA4_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC1_CONTEXT_STATUS +#define SDMA4_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC1_DOORBELL +#define SDMA4_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC1_STATUS +#define SDMA4_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC1_DOORBELL_LOG +#define SDMA4_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC1_WATERMARK +#define SDMA4_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC1_DOORBELL_OFFSET +#define SDMA4_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC1_CSA_ADDR_LO +#define SDMA4_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC1_CSA_ADDR_HI +#define SDMA4_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC1_IB_SUB_REMAIN +#define SDMA4_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC1_PREEMPT +#define SDMA4_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC1_DUMMY_REG +#define SDMA4_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC1_RB_AQL_CNTL +#define SDMA4_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC1_MINOR_PTR_UPDATE +#define SDMA4_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC1_MIDCMD_DATA0 +#define SDMA4_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA1 +#define SDMA4_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA2 +#define SDMA4_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA3 +#define SDMA4_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA4 +#define SDMA4_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA5 +#define SDMA4_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA6 +#define SDMA4_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA7 +#define SDMA4_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_DATA8 +#define SDMA4_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC1_MIDCMD_CNTL +#define SDMA4_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC2_RB_CNTL +#define SDMA4_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC2_RB_BASE +#define SDMA4_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_BASE_HI +#define SDMA4_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC2_RB_RPTR +#define SDMA4_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_RPTR_HI +#define SDMA4_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_WPTR +#define SDMA4_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_WPTR_HI +#define SDMA4_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_WPTR_POLL_CNTL +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC2_RB_RPTR_ADDR_HI +#define SDMA4_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_RPTR_ADDR_LO +#define SDMA4_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC2_IB_CNTL +#define SDMA4_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC2_IB_RPTR +#define SDMA4_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC2_IB_OFFSET +#define SDMA4_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC2_IB_BASE_LO +#define SDMA4_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC2_IB_BASE_HI +#define SDMA4_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC2_IB_SIZE +#define SDMA4_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC2_SKIP_CNTL +#define SDMA4_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC2_CONTEXT_STATUS +#define SDMA4_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC2_DOORBELL +#define SDMA4_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC2_STATUS +#define SDMA4_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC2_DOORBELL_LOG +#define SDMA4_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC2_WATERMARK +#define SDMA4_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC2_DOORBELL_OFFSET +#define SDMA4_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC2_CSA_ADDR_LO +#define SDMA4_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC2_CSA_ADDR_HI +#define SDMA4_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC2_IB_SUB_REMAIN +#define SDMA4_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC2_PREEMPT +#define SDMA4_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC2_DUMMY_REG +#define SDMA4_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC2_RB_AQL_CNTL +#define SDMA4_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC2_MINOR_PTR_UPDATE +#define SDMA4_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC2_MIDCMD_DATA0 +#define SDMA4_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA1 +#define SDMA4_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA2 +#define SDMA4_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA3 +#define SDMA4_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA4 +#define SDMA4_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA5 +#define SDMA4_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA6 +#define SDMA4_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA7 +#define SDMA4_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_DATA8 +#define SDMA4_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC2_MIDCMD_CNTL +#define SDMA4_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC3_RB_CNTL +#define SDMA4_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC3_RB_BASE +#define SDMA4_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_BASE_HI +#define SDMA4_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC3_RB_RPTR +#define SDMA4_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_RPTR_HI +#define SDMA4_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_WPTR +#define SDMA4_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_WPTR_HI +#define SDMA4_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_WPTR_POLL_CNTL +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC3_RB_RPTR_ADDR_HI +#define SDMA4_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_RPTR_ADDR_LO +#define SDMA4_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC3_IB_CNTL +#define SDMA4_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC3_IB_RPTR +#define SDMA4_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC3_IB_OFFSET +#define SDMA4_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC3_IB_BASE_LO +#define SDMA4_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC3_IB_BASE_HI +#define SDMA4_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC3_IB_SIZE +#define SDMA4_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC3_SKIP_CNTL +#define SDMA4_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC3_CONTEXT_STATUS +#define SDMA4_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC3_DOORBELL +#define SDMA4_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC3_STATUS +#define SDMA4_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC3_DOORBELL_LOG +#define SDMA4_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC3_WATERMARK +#define SDMA4_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC3_DOORBELL_OFFSET +#define SDMA4_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC3_CSA_ADDR_LO +#define SDMA4_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC3_CSA_ADDR_HI +#define SDMA4_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC3_IB_SUB_REMAIN +#define SDMA4_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC3_PREEMPT +#define SDMA4_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC3_DUMMY_REG +#define SDMA4_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC3_RB_AQL_CNTL +#define SDMA4_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC3_MINOR_PTR_UPDATE +#define SDMA4_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC3_MIDCMD_DATA0 +#define SDMA4_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA1 +#define SDMA4_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA2 +#define SDMA4_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA3 +#define SDMA4_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA4 +#define SDMA4_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA5 +#define SDMA4_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA6 +#define SDMA4_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA7 +#define SDMA4_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_DATA8 +#define SDMA4_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC3_MIDCMD_CNTL +#define SDMA4_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC4_RB_CNTL +#define SDMA4_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC4_RB_BASE +#define SDMA4_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_BASE_HI +#define SDMA4_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC4_RB_RPTR +#define SDMA4_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_RPTR_HI +#define SDMA4_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_WPTR +#define SDMA4_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_WPTR_HI +#define SDMA4_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_WPTR_POLL_CNTL +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC4_RB_RPTR_ADDR_HI +#define SDMA4_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_RPTR_ADDR_LO +#define SDMA4_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC4_IB_CNTL +#define SDMA4_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC4_IB_RPTR +#define SDMA4_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC4_IB_OFFSET +#define SDMA4_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC4_IB_BASE_LO +#define SDMA4_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC4_IB_BASE_HI +#define SDMA4_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC4_IB_SIZE +#define SDMA4_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC4_SKIP_CNTL +#define SDMA4_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC4_CONTEXT_STATUS +#define SDMA4_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC4_DOORBELL +#define SDMA4_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC4_STATUS +#define SDMA4_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC4_DOORBELL_LOG +#define SDMA4_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC4_WATERMARK +#define SDMA4_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC4_DOORBELL_OFFSET +#define SDMA4_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC4_CSA_ADDR_LO +#define SDMA4_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC4_CSA_ADDR_HI +#define SDMA4_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC4_IB_SUB_REMAIN +#define SDMA4_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC4_PREEMPT +#define SDMA4_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC4_DUMMY_REG +#define SDMA4_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC4_RB_AQL_CNTL +#define SDMA4_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC4_MINOR_PTR_UPDATE +#define SDMA4_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC4_MIDCMD_DATA0 +#define SDMA4_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA1 +#define SDMA4_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA2 +#define SDMA4_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA3 +#define SDMA4_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA4 +#define SDMA4_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA5 +#define SDMA4_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA6 +#define SDMA4_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA7 +#define SDMA4_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_DATA8 +#define SDMA4_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC4_MIDCMD_CNTL +#define SDMA4_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC5_RB_CNTL +#define SDMA4_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC5_RB_BASE +#define SDMA4_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_BASE_HI +#define SDMA4_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC5_RB_RPTR +#define SDMA4_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_RPTR_HI +#define SDMA4_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_WPTR +#define SDMA4_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_WPTR_HI +#define SDMA4_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_WPTR_POLL_CNTL +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC5_RB_RPTR_ADDR_HI +#define SDMA4_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_RPTR_ADDR_LO +#define SDMA4_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC5_IB_CNTL +#define SDMA4_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC5_IB_RPTR +#define SDMA4_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC5_IB_OFFSET +#define SDMA4_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC5_IB_BASE_LO +#define SDMA4_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC5_IB_BASE_HI +#define SDMA4_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC5_IB_SIZE +#define SDMA4_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC5_SKIP_CNTL +#define SDMA4_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC5_CONTEXT_STATUS +#define SDMA4_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC5_DOORBELL +#define SDMA4_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC5_STATUS +#define SDMA4_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC5_DOORBELL_LOG +#define SDMA4_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC5_WATERMARK +#define SDMA4_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC5_DOORBELL_OFFSET +#define SDMA4_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC5_CSA_ADDR_LO +#define SDMA4_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC5_CSA_ADDR_HI +#define SDMA4_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC5_IB_SUB_REMAIN +#define SDMA4_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC5_PREEMPT +#define SDMA4_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC5_DUMMY_REG +#define SDMA4_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC5_RB_AQL_CNTL +#define SDMA4_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC5_MINOR_PTR_UPDATE +#define SDMA4_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC5_MIDCMD_DATA0 +#define SDMA4_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA1 +#define SDMA4_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA2 +#define SDMA4_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA3 +#define SDMA4_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA4 +#define SDMA4_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA5 +#define SDMA4_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA6 +#define SDMA4_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA7 +#define SDMA4_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_DATA8 +#define SDMA4_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC5_MIDCMD_CNTL +#define SDMA4_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC6_RB_CNTL +#define SDMA4_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC6_RB_BASE +#define SDMA4_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_BASE_HI +#define SDMA4_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC6_RB_RPTR +#define SDMA4_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_RPTR_HI +#define SDMA4_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_WPTR +#define SDMA4_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_WPTR_HI +#define SDMA4_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_WPTR_POLL_CNTL +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC6_RB_RPTR_ADDR_HI +#define SDMA4_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_RPTR_ADDR_LO +#define SDMA4_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC6_IB_CNTL +#define SDMA4_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC6_IB_RPTR +#define SDMA4_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC6_IB_OFFSET +#define SDMA4_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC6_IB_BASE_LO +#define SDMA4_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC6_IB_BASE_HI +#define SDMA4_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC6_IB_SIZE +#define SDMA4_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC6_SKIP_CNTL +#define SDMA4_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC6_CONTEXT_STATUS +#define SDMA4_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC6_DOORBELL +#define SDMA4_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC6_STATUS +#define SDMA4_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC6_DOORBELL_LOG +#define SDMA4_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC6_WATERMARK +#define SDMA4_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC6_DOORBELL_OFFSET +#define SDMA4_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC6_CSA_ADDR_LO +#define SDMA4_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC6_CSA_ADDR_HI +#define SDMA4_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC6_IB_SUB_REMAIN +#define SDMA4_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC6_PREEMPT +#define SDMA4_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC6_DUMMY_REG +#define SDMA4_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC6_RB_AQL_CNTL +#define SDMA4_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC6_MINOR_PTR_UPDATE +#define SDMA4_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC6_MIDCMD_DATA0 +#define SDMA4_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA1 +#define SDMA4_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA2 +#define SDMA4_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA3 +#define SDMA4_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA4 +#define SDMA4_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA5 +#define SDMA4_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA6 +#define SDMA4_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA7 +#define SDMA4_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_DATA8 +#define SDMA4_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC6_MIDCMD_CNTL +#define SDMA4_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA4_RLC7_RB_CNTL +#define SDMA4_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA4_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA4_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA4_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA4_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA4_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA4_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA4_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA4_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA4_RLC7_RB_BASE +#define SDMA4_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA4_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_BASE_HI +#define SDMA4_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA4_RLC7_RB_RPTR +#define SDMA4_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_RPTR_HI +#define SDMA4_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_WPTR +#define SDMA4_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA4_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_WPTR_HI +#define SDMA4_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA4_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_WPTR_POLL_CNTL +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA4_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA4_RLC7_RB_RPTR_ADDR_HI +#define SDMA4_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_RPTR_ADDR_LO +#define SDMA4_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA4_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA4_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC7_IB_CNTL +#define SDMA4_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA4_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA4_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA4_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA4_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA4_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA4_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA4_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA4_RLC7_IB_RPTR +#define SDMA4_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA4_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC7_IB_OFFSET +#define SDMA4_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA4_RLC7_IB_BASE_LO +#define SDMA4_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA4_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA4_RLC7_IB_BASE_HI +#define SDMA4_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC7_IB_SIZE +#define SDMA4_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA4_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC7_SKIP_CNTL +#define SDMA4_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA4_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA4_RLC7_CONTEXT_STATUS +#define SDMA4_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA4_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA4_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA4_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA4_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA4_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA4_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA4_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA4_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA4_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA4_RLC7_DOORBELL +#define SDMA4_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA4_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA4_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA4_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA4_RLC7_STATUS +#define SDMA4_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA4_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA4_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA4_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA4_RLC7_DOORBELL_LOG +#define SDMA4_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA4_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA4_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA4_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA4_RLC7_WATERMARK +#define SDMA4_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA4_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA4_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA4_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA4_RLC7_DOORBELL_OFFSET +#define SDMA4_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA4_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA4_RLC7_CSA_ADDR_LO +#define SDMA4_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC7_CSA_ADDR_HI +#define SDMA4_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC7_IB_SUB_REMAIN +#define SDMA4_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA4_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA4_RLC7_PREEMPT +#define SDMA4_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA4_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA4_RLC7_DUMMY_REG +#define SDMA4_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA4_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA4_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA4_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA4_RLC7_RB_AQL_CNTL +#define SDMA4_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA4_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA4_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA4_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA4_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA4_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA4_RLC7_MINOR_PTR_UPDATE +#define SDMA4_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA4_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA4_RLC7_MIDCMD_DATA0 +#define SDMA4_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA1 +#define SDMA4_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA2 +#define SDMA4_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA3 +#define SDMA4_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA4 +#define SDMA4_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA5 +#define SDMA4_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA6 +#define SDMA4_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA7 +#define SDMA4_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_DATA8 +#define SDMA4_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA4_RLC7_MIDCMD_CNTL +#define SDMA4_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA4_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA4_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA4_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA4_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA4_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA4_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA4_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h new file mode 100644 index 000000000000..ecb51b9f90b0 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h @@ -0,0 +1,1043 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma5_4_2_2_OFFSET_HEADER +#define _sdma5_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma5_sdma5dec +// base address: 0x7b000 +#define mmSDMA5_UCODE_ADDR 0x0000 +#define mmSDMA5_UCODE_ADDR_BASE_IDX 1 +#define mmSDMA5_UCODE_DATA 0x0001 +#define mmSDMA5_UCODE_DATA_BASE_IDX 1 +#define mmSDMA5_VM_CNTL 0x0004 +#define mmSDMA5_VM_CNTL_BASE_IDX 1 +#define mmSDMA5_VM_CTX_LO 0x0005 +#define mmSDMA5_VM_CTX_LO_BASE_IDX 1 +#define mmSDMA5_VM_CTX_HI 0x0006 +#define mmSDMA5_VM_CTX_HI_BASE_IDX 1 +#define mmSDMA5_ACTIVE_FCN_ID 0x0007 +#define mmSDMA5_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmSDMA5_VM_CTX_CNTL 0x0008 +#define mmSDMA5_VM_CTX_CNTL_BASE_IDX 1 +#define mmSDMA5_VIRT_RESET_REQ 0x0009 +#define mmSDMA5_VIRT_RESET_REQ_BASE_IDX 1 +#define mmSDMA5_VF_ENABLE 0x000a +#define mmSDMA5_VF_ENABLE_BASE_IDX 1 +#define mmSDMA5_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA5_CONTEXT_REG_TYPE0_BASE_IDX 1 +#define mmSDMA5_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA5_CONTEXT_REG_TYPE1_BASE_IDX 1 +#define mmSDMA5_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA5_CONTEXT_REG_TYPE2_BASE_IDX 1 +#define mmSDMA5_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA5_CONTEXT_REG_TYPE3_BASE_IDX 1 +#define mmSDMA5_PUB_REG_TYPE0 0x000f +#define mmSDMA5_PUB_REG_TYPE0_BASE_IDX 1 +#define mmSDMA5_PUB_REG_TYPE1 0x0010 +#define mmSDMA5_PUB_REG_TYPE1_BASE_IDX 1 +#define mmSDMA5_PUB_REG_TYPE2 0x0011 +#define mmSDMA5_PUB_REG_TYPE2_BASE_IDX 1 +#define mmSDMA5_PUB_REG_TYPE3 0x0012 +#define mmSDMA5_PUB_REG_TYPE3_BASE_IDX 1 +#define mmSDMA5_MMHUB_CNTL 0x0013 +#define mmSDMA5_MMHUB_CNTL_BASE_IDX 1 +#define mmSDMA5_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA5_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1 +#define mmSDMA5_POWER_CNTL 0x001a +#define mmSDMA5_POWER_CNTL_BASE_IDX 1 +#define mmSDMA5_CLK_CTRL 0x001b +#define mmSDMA5_CLK_CTRL_BASE_IDX 1 +#define mmSDMA5_CNTL 0x001c +#define mmSDMA5_CNTL_BASE_IDX 1 +#define mmSDMA5_CHICKEN_BITS 0x001d +#define mmSDMA5_CHICKEN_BITS_BASE_IDX 1 +#define mmSDMA5_GB_ADDR_CONFIG 0x001e +#define mmSDMA5_GB_ADDR_CONFIG_BASE_IDX 1 +#define mmSDMA5_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA5_GB_ADDR_CONFIG_READ_BASE_IDX 1 +#define mmSDMA5_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA5_RB_RPTR_FETCH_HI_BASE_IDX 1 +#define mmSDMA5_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA5_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1 +#define mmSDMA5_RB_RPTR_FETCH 0x0022 +#define mmSDMA5_RB_RPTR_FETCH_BASE_IDX 1 +#define mmSDMA5_IB_OFFSET_FETCH 0x0023 +#define mmSDMA5_IB_OFFSET_FETCH_BASE_IDX 1 +#define mmSDMA5_PROGRAM 0x0024 +#define mmSDMA5_PROGRAM_BASE_IDX 1 +#define mmSDMA5_STATUS_REG 0x0025 +#define mmSDMA5_STATUS_REG_BASE_IDX 1 +#define mmSDMA5_STATUS1_REG 0x0026 +#define mmSDMA5_STATUS1_REG_BASE_IDX 1 +#define mmSDMA5_RD_BURST_CNTL 0x0027 +#define mmSDMA5_RD_BURST_CNTL_BASE_IDX 1 +#define mmSDMA5_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA5_HBM_PAGE_CONFIG_BASE_IDX 1 +#define mmSDMA5_UCODE_CHECKSUM 0x0029 +#define mmSDMA5_UCODE_CHECKSUM_BASE_IDX 1 +#define mmSDMA5_F32_CNTL 0x002a +#define mmSDMA5_F32_CNTL_BASE_IDX 1 +#define mmSDMA5_FREEZE 0x002b +#define mmSDMA5_FREEZE_BASE_IDX 1 +#define mmSDMA5_PHASE0_QUANTUM 0x002c +#define mmSDMA5_PHASE0_QUANTUM_BASE_IDX 1 +#define mmSDMA5_PHASE1_QUANTUM 0x002d +#define mmSDMA5_PHASE1_QUANTUM_BASE_IDX 1 +#define mmSDMA5_EDC_CONFIG 0x0032 +#define mmSDMA5_EDC_CONFIG_BASE_IDX 1 +#define mmSDMA5_BA_THRESHOLD 0x0033 +#define mmSDMA5_BA_THRESHOLD_BASE_IDX 1 +#define mmSDMA5_ID 0x0034 +#define mmSDMA5_ID_BASE_IDX 1 +#define mmSDMA5_VERSION 0x0035 +#define mmSDMA5_VERSION_BASE_IDX 1 +#define mmSDMA5_EDC_COUNTER 0x0036 +#define mmSDMA5_EDC_COUNTER_BASE_IDX 1 +#define mmSDMA5_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA5_EDC_COUNTER_CLEAR_BASE_IDX 1 +#define mmSDMA5_STATUS2_REG 0x0038 +#define mmSDMA5_STATUS2_REG_BASE_IDX 1 +#define mmSDMA5_ATOMIC_CNTL 0x0039 +#define mmSDMA5_ATOMIC_CNTL_BASE_IDX 1 +#define mmSDMA5_ATOMIC_PREOP_LO 0x003a +#define mmSDMA5_ATOMIC_PREOP_LO_BASE_IDX 1 +#define mmSDMA5_ATOMIC_PREOP_HI 0x003b +#define mmSDMA5_ATOMIC_PREOP_HI_BASE_IDX 1 +#define mmSDMA5_UTCL1_CNTL 0x003c +#define mmSDMA5_UTCL1_CNTL_BASE_IDX 1 +#define mmSDMA5_UTCL1_WATERMK 0x003d +#define mmSDMA5_UTCL1_WATERMK_BASE_IDX 1 +#define mmSDMA5_UTCL1_RD_STATUS 0x003e +#define mmSDMA5_UTCL1_RD_STATUS_BASE_IDX 1 +#define mmSDMA5_UTCL1_WR_STATUS 0x003f +#define mmSDMA5_UTCL1_WR_STATUS_BASE_IDX 1 +#define mmSDMA5_UTCL1_INV0 0x0040 +#define mmSDMA5_UTCL1_INV0_BASE_IDX 1 +#define mmSDMA5_UTCL1_INV1 0x0041 +#define mmSDMA5_UTCL1_INV1_BASE_IDX 1 +#define mmSDMA5_UTCL1_INV2 0x0042 +#define mmSDMA5_UTCL1_INV2_BASE_IDX 1 +#define mmSDMA5_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA5_UTCL1_RD_XNACK0_BASE_IDX 1 +#define mmSDMA5_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA5_UTCL1_RD_XNACK1_BASE_IDX 1 +#define mmSDMA5_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA5_UTCL1_WR_XNACK0_BASE_IDX 1 +#define mmSDMA5_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA5_UTCL1_WR_XNACK1_BASE_IDX 1 +#define mmSDMA5_UTCL1_TIMEOUT 0x0047 +#define mmSDMA5_UTCL1_TIMEOUT_BASE_IDX 1 +#define mmSDMA5_UTCL1_PAGE 0x0048 +#define mmSDMA5_UTCL1_PAGE_BASE_IDX 1 +#define mmSDMA5_POWER_CNTL_IDLE 0x0049 +#define mmSDMA5_POWER_CNTL_IDLE_BASE_IDX 1 +#define mmSDMA5_RELAX_ORDERING_LUT 0x004a +#define mmSDMA5_RELAX_ORDERING_LUT_BASE_IDX 1 +#define mmSDMA5_CHICKEN_BITS_2 0x004b +#define mmSDMA5_CHICKEN_BITS_2_BASE_IDX 1 +#define mmSDMA5_STATUS3_REG 0x004c +#define mmSDMA5_STATUS3_REG_BASE_IDX 1 +#define mmSDMA5_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA5_PHYSICAL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA5_PHYSICAL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_PHASE2_QUANTUM 0x004f +#define mmSDMA5_PHASE2_QUANTUM_BASE_IDX 1 +#define mmSDMA5_ERROR_LOG 0x0050 +#define mmSDMA5_ERROR_LOG_BASE_IDX 1 +#define mmSDMA5_PUB_DUMMY_REG0 0x0051 +#define mmSDMA5_PUB_DUMMY_REG0_BASE_IDX 1 +#define mmSDMA5_PUB_DUMMY_REG1 0x0052 +#define mmSDMA5_PUB_DUMMY_REG1_BASE_IDX 1 +#define mmSDMA5_PUB_DUMMY_REG2 0x0053 +#define mmSDMA5_PUB_DUMMY_REG2_BASE_IDX 1 +#define mmSDMA5_PUB_DUMMY_REG3 0x0054 +#define mmSDMA5_PUB_DUMMY_REG3_BASE_IDX 1 +#define mmSDMA5_F32_COUNTER 0x0055 +#define mmSDMA5_F32_COUNTER_BASE_IDX 1 +#define mmSDMA5_UNBREAKABLE 0x0056 +#define mmSDMA5_UNBREAKABLE_BASE_IDX 1 +#define mmSDMA5_PERFMON_CNTL 0x0057 +#define mmSDMA5_PERFMON_CNTL_BASE_IDX 1 +#define mmSDMA5_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA5_PERFCOUNTER0_RESULT_BASE_IDX 1 +#define mmSDMA5_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA5_PERFCOUNTER1_RESULT_BASE_IDX 1 +#define mmSDMA5_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA5_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1 +#define mmSDMA5_CRD_CNTL 0x005b +#define mmSDMA5_CRD_CNTL_BASE_IDX 1 +#define mmSDMA5_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA5_GPU_IOV_VIOLATION_LOG_BASE_IDX 1 +#define mmSDMA5_ULV_CNTL 0x005e +#define mmSDMA5_ULV_CNTL_BASE_IDX 1 +#define mmSDMA5_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA5_EA_DBIT_ADDR_DATA_BASE_IDX 1 +#define mmSDMA5_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA5_EA_DBIT_ADDR_INDEX_BASE_IDX 1 +#define mmSDMA5_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA5_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1 +#define mmSDMA5_GFX_RB_CNTL 0x0080 +#define mmSDMA5_GFX_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_GFX_RB_BASE 0x0081 +#define mmSDMA5_GFX_RB_BASE_BASE_IDX 1 +#define mmSDMA5_GFX_RB_BASE_HI 0x0082 +#define mmSDMA5_GFX_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_GFX_RB_RPTR 0x0083 +#define mmSDMA5_GFX_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA5_GFX_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_GFX_RB_WPTR 0x0085 +#define mmSDMA5_GFX_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA5_GFX_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA5_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA5_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA5_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_GFX_IB_CNTL 0x008a +#define mmSDMA5_GFX_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_GFX_IB_RPTR 0x008b +#define mmSDMA5_GFX_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_GFX_IB_OFFSET 0x008c +#define mmSDMA5_GFX_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_GFX_IB_BASE_LO 0x008d +#define mmSDMA5_GFX_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_GFX_IB_BASE_HI 0x008e +#define mmSDMA5_GFX_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_GFX_IB_SIZE 0x008f +#define mmSDMA5_GFX_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_GFX_SKIP_CNTL 0x0090 +#define mmSDMA5_GFX_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA5_GFX_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_GFX_DOORBELL 0x0092 +#define mmSDMA5_GFX_DOORBELL_BASE_IDX 1 +#define mmSDMA5_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA5_GFX_CONTEXT_CNTL_BASE_IDX 1 +#define mmSDMA5_GFX_STATUS 0x00a8 +#define mmSDMA5_GFX_STATUS_BASE_IDX 1 +#define mmSDMA5_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA5_GFX_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_GFX_WATERMARK 0x00aa +#define mmSDMA5_GFX_WATERMARK_BASE_IDX 1 +#define mmSDMA5_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA5_GFX_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA5_GFX_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA5_GFX_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA5_GFX_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_GFX_PREEMPT 0x00b0 +#define mmSDMA5_GFX_PREEMPT_BASE_IDX 1 +#define mmSDMA5_GFX_DUMMY_REG 0x00b1 +#define mmSDMA5_GFX_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA5_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA5_GFX_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA5_GFX_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA5_GFX_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA5_GFX_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA5_GFX_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA5_GFX_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA5_GFX_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA5_GFX_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA5_GFX_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA5_GFX_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA5_GFX_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA5_GFX_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_CNTL 0x00d8 +#define mmSDMA5_PAGE_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_BASE 0x00d9 +#define mmSDMA5_PAGE_RB_BASE_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_BASE_HI 0x00da +#define mmSDMA5_PAGE_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_RPTR 0x00db +#define mmSDMA5_PAGE_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA5_PAGE_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_WPTR 0x00dd +#define mmSDMA5_PAGE_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA5_PAGE_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA5_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA5_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA5_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_PAGE_IB_CNTL 0x00e2 +#define mmSDMA5_PAGE_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_PAGE_IB_RPTR 0x00e3 +#define mmSDMA5_PAGE_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA5_PAGE_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA5_PAGE_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA5_PAGE_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_PAGE_IB_SIZE 0x00e7 +#define mmSDMA5_PAGE_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA5_PAGE_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA5_PAGE_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_PAGE_DOORBELL 0x00ea +#define mmSDMA5_PAGE_DOORBELL_BASE_IDX 1 +#define mmSDMA5_PAGE_STATUS 0x0100 +#define mmSDMA5_PAGE_STATUS_BASE_IDX 1 +#define mmSDMA5_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA5_PAGE_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_PAGE_WATERMARK 0x0102 +#define mmSDMA5_PAGE_WATERMARK_BASE_IDX 1 +#define mmSDMA5_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA5_PAGE_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA5_PAGE_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA5_PAGE_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA5_PAGE_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_PAGE_PREEMPT 0x0108 +#define mmSDMA5_PAGE_PREEMPT_BASE_IDX 1 +#define mmSDMA5_PAGE_DUMMY_REG 0x0109 +#define mmSDMA5_PAGE_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA5_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA5_PAGE_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA5_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA5_PAGE_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA5_PAGE_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA5_PAGE_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA5_PAGE_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA5_PAGE_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA5_PAGE_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA5_PAGE_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA5_PAGE_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA5_PAGE_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA5_PAGE_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_CNTL 0x0130 +#define mmSDMA5_RLC0_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_BASE 0x0131 +#define mmSDMA5_RLC0_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA5_RLC0_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_RPTR 0x0133 +#define mmSDMA5_RLC0_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA5_RLC0_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_WPTR 0x0135 +#define mmSDMA5_RLC0_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA5_RLC0_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA5_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA5_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA5_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC0_IB_CNTL 0x013a +#define mmSDMA5_RLC0_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC0_IB_RPTR 0x013b +#define mmSDMA5_RLC0_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC0_IB_OFFSET 0x013c +#define mmSDMA5_RLC0_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC0_IB_BASE_LO 0x013d +#define mmSDMA5_RLC0_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC0_IB_BASE_HI 0x013e +#define mmSDMA5_RLC0_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC0_IB_SIZE 0x013f +#define mmSDMA5_RLC0_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA5_RLC0_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA5_RLC0_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC0_DOORBELL 0x0142 +#define mmSDMA5_RLC0_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC0_STATUS 0x0158 +#define mmSDMA5_RLC0_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA5_RLC0_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC0_WATERMARK 0x015a +#define mmSDMA5_RLC0_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA5_RLC0_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA5_RLC0_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA5_RLC0_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA5_RLC0_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC0_PREEMPT 0x0160 +#define mmSDMA5_RLC0_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC0_DUMMY_REG 0x0161 +#define mmSDMA5_RLC0_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA5_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA5_RLC0_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA5_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA5_RLC0_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA5_RLC0_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA5_RLC0_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA5_RLC0_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA5_RLC0_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA5_RLC0_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA5_RLC0_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA5_RLC0_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA5_RLC0_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA5_RLC0_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_CNTL 0x0188 +#define mmSDMA5_RLC1_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_BASE 0x0189 +#define mmSDMA5_RLC1_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_BASE_HI 0x018a +#define mmSDMA5_RLC1_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_RPTR 0x018b +#define mmSDMA5_RLC1_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA5_RLC1_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_WPTR 0x018d +#define mmSDMA5_RLC1_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA5_RLC1_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA5_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA5_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA5_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC1_IB_CNTL 0x0192 +#define mmSDMA5_RLC1_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC1_IB_RPTR 0x0193 +#define mmSDMA5_RLC1_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC1_IB_OFFSET 0x0194 +#define mmSDMA5_RLC1_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA5_RLC1_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA5_RLC1_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC1_IB_SIZE 0x0197 +#define mmSDMA5_RLC1_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA5_RLC1_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA5_RLC1_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC1_DOORBELL 0x019a +#define mmSDMA5_RLC1_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC1_STATUS 0x01b0 +#define mmSDMA5_RLC1_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA5_RLC1_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC1_WATERMARK 0x01b2 +#define mmSDMA5_RLC1_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA5_RLC1_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA5_RLC1_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA5_RLC1_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA5_RLC1_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC1_PREEMPT 0x01b8 +#define mmSDMA5_RLC1_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA5_RLC1_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA5_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA5_RLC1_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA5_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA5_RLC1_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA5_RLC1_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA5_RLC1_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA5_RLC1_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA5_RLC1_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA5_RLC1_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA5_RLC1_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA5_RLC1_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA5_RLC1_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA5_RLC1_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_CNTL 0x01e0 +#define mmSDMA5_RLC2_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_BASE 0x01e1 +#define mmSDMA5_RLC2_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA5_RLC2_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_RPTR 0x01e3 +#define mmSDMA5_RLC2_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA5_RLC2_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_WPTR 0x01e5 +#define mmSDMA5_RLC2_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA5_RLC2_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA5_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA5_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA5_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC2_IB_CNTL 0x01ea +#define mmSDMA5_RLC2_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC2_IB_RPTR 0x01eb +#define mmSDMA5_RLC2_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC2_IB_OFFSET 0x01ec +#define mmSDMA5_RLC2_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA5_RLC2_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA5_RLC2_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC2_IB_SIZE 0x01ef +#define mmSDMA5_RLC2_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA5_RLC2_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA5_RLC2_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC2_DOORBELL 0x01f2 +#define mmSDMA5_RLC2_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC2_STATUS 0x0208 +#define mmSDMA5_RLC2_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA5_RLC2_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC2_WATERMARK 0x020a +#define mmSDMA5_RLC2_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA5_RLC2_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA5_RLC2_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA5_RLC2_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA5_RLC2_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC2_PREEMPT 0x0210 +#define mmSDMA5_RLC2_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC2_DUMMY_REG 0x0211 +#define mmSDMA5_RLC2_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA5_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA5_RLC2_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA5_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA5_RLC2_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA5_RLC2_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA5_RLC2_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA5_RLC2_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA5_RLC2_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA5_RLC2_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA5_RLC2_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA5_RLC2_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA5_RLC2_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA5_RLC2_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_CNTL 0x0238 +#define mmSDMA5_RLC3_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_BASE 0x0239 +#define mmSDMA5_RLC3_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_BASE_HI 0x023a +#define mmSDMA5_RLC3_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_RPTR 0x023b +#define mmSDMA5_RLC3_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA5_RLC3_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_WPTR 0x023d +#define mmSDMA5_RLC3_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA5_RLC3_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA5_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA5_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA5_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC3_IB_CNTL 0x0242 +#define mmSDMA5_RLC3_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC3_IB_RPTR 0x0243 +#define mmSDMA5_RLC3_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC3_IB_OFFSET 0x0244 +#define mmSDMA5_RLC3_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA5_RLC3_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA5_RLC3_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC3_IB_SIZE 0x0247 +#define mmSDMA5_RLC3_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA5_RLC3_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA5_RLC3_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC3_DOORBELL 0x024a +#define mmSDMA5_RLC3_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC3_STATUS 0x0260 +#define mmSDMA5_RLC3_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA5_RLC3_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC3_WATERMARK 0x0262 +#define mmSDMA5_RLC3_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA5_RLC3_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA5_RLC3_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA5_RLC3_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA5_RLC3_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC3_PREEMPT 0x0268 +#define mmSDMA5_RLC3_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC3_DUMMY_REG 0x0269 +#define mmSDMA5_RLC3_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA5_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA5_RLC3_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA5_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA5_RLC3_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA5_RLC3_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA5_RLC3_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA5_RLC3_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA5_RLC3_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA5_RLC3_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA5_RLC3_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA5_RLC3_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA5_RLC3_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA5_RLC3_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_CNTL 0x0290 +#define mmSDMA5_RLC4_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_BASE 0x0291 +#define mmSDMA5_RLC4_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA5_RLC4_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_RPTR 0x0293 +#define mmSDMA5_RLC4_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA5_RLC4_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_WPTR 0x0295 +#define mmSDMA5_RLC4_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA5_RLC4_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA5_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA5_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA5_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC4_IB_CNTL 0x029a +#define mmSDMA5_RLC4_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC4_IB_RPTR 0x029b +#define mmSDMA5_RLC4_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC4_IB_OFFSET 0x029c +#define mmSDMA5_RLC4_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC4_IB_BASE_LO 0x029d +#define mmSDMA5_RLC4_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC4_IB_BASE_HI 0x029e +#define mmSDMA5_RLC4_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC4_IB_SIZE 0x029f +#define mmSDMA5_RLC4_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA5_RLC4_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA5_RLC4_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC4_DOORBELL 0x02a2 +#define mmSDMA5_RLC4_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC4_STATUS 0x02b8 +#define mmSDMA5_RLC4_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA5_RLC4_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC4_WATERMARK 0x02ba +#define mmSDMA5_RLC4_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA5_RLC4_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA5_RLC4_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA5_RLC4_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA5_RLC4_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC4_PREEMPT 0x02c0 +#define mmSDMA5_RLC4_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA5_RLC4_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA5_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA5_RLC4_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA5_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA5_RLC4_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA5_RLC4_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA5_RLC4_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA5_RLC4_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA5_RLC4_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA5_RLC4_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA5_RLC4_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA5_RLC4_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA5_RLC4_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA5_RLC4_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_CNTL 0x02e8 +#define mmSDMA5_RLC5_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_BASE 0x02e9 +#define mmSDMA5_RLC5_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA5_RLC5_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_RPTR 0x02eb +#define mmSDMA5_RLC5_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA5_RLC5_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_WPTR 0x02ed +#define mmSDMA5_RLC5_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA5_RLC5_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA5_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA5_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA5_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC5_IB_CNTL 0x02f2 +#define mmSDMA5_RLC5_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC5_IB_RPTR 0x02f3 +#define mmSDMA5_RLC5_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA5_RLC5_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA5_RLC5_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA5_RLC5_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC5_IB_SIZE 0x02f7 +#define mmSDMA5_RLC5_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA5_RLC5_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA5_RLC5_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC5_DOORBELL 0x02fa +#define mmSDMA5_RLC5_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC5_STATUS 0x0310 +#define mmSDMA5_RLC5_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA5_RLC5_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC5_WATERMARK 0x0312 +#define mmSDMA5_RLC5_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA5_RLC5_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA5_RLC5_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA5_RLC5_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA5_RLC5_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC5_PREEMPT 0x0318 +#define mmSDMA5_RLC5_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC5_DUMMY_REG 0x0319 +#define mmSDMA5_RLC5_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA5_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA5_RLC5_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA5_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA5_RLC5_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA5_RLC5_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA5_RLC5_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA5_RLC5_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA5_RLC5_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA5_RLC5_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA5_RLC5_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA5_RLC5_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA5_RLC5_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA5_RLC5_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_CNTL 0x0340 +#define mmSDMA5_RLC6_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_BASE 0x0341 +#define mmSDMA5_RLC6_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA5_RLC6_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_RPTR 0x0343 +#define mmSDMA5_RLC6_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA5_RLC6_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_WPTR 0x0345 +#define mmSDMA5_RLC6_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA5_RLC6_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA5_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA5_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA5_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC6_IB_CNTL 0x034a +#define mmSDMA5_RLC6_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC6_IB_RPTR 0x034b +#define mmSDMA5_RLC6_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC6_IB_OFFSET 0x034c +#define mmSDMA5_RLC6_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC6_IB_BASE_LO 0x034d +#define mmSDMA5_RLC6_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC6_IB_BASE_HI 0x034e +#define mmSDMA5_RLC6_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC6_IB_SIZE 0x034f +#define mmSDMA5_RLC6_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA5_RLC6_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA5_RLC6_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC6_DOORBELL 0x0352 +#define mmSDMA5_RLC6_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC6_STATUS 0x0368 +#define mmSDMA5_RLC6_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA5_RLC6_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC6_WATERMARK 0x036a +#define mmSDMA5_RLC6_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA5_RLC6_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA5_RLC6_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA5_RLC6_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA5_RLC6_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC6_PREEMPT 0x0370 +#define mmSDMA5_RLC6_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC6_DUMMY_REG 0x0371 +#define mmSDMA5_RLC6_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA5_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA5_RLC6_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA5_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA5_RLC6_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA5_RLC6_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA5_RLC6_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA5_RLC6_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA5_RLC6_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA5_RLC6_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA5_RLC6_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA5_RLC6_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA5_RLC6_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA5_RLC6_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_CNTL 0x0398 +#define mmSDMA5_RLC7_RB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_BASE 0x0399 +#define mmSDMA5_RLC7_RB_BASE_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_BASE_HI 0x039a +#define mmSDMA5_RLC7_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_RPTR 0x039b +#define mmSDMA5_RLC7_RB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA5_RLC7_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_WPTR 0x039d +#define mmSDMA5_RLC7_RB_WPTR_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA5_RLC7_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA5_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA5_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA5_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC7_IB_CNTL 0x03a2 +#define mmSDMA5_RLC7_IB_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC7_IB_RPTR 0x03a3 +#define mmSDMA5_RLC7_IB_RPTR_BASE_IDX 1 +#define mmSDMA5_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA5_RLC7_IB_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA5_RLC7_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA5_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA5_RLC7_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA5_RLC7_IB_SIZE 0x03a7 +#define mmSDMA5_RLC7_IB_SIZE_BASE_IDX 1 +#define mmSDMA5_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA5_RLC7_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA5_RLC7_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC7_DOORBELL 0x03aa +#define mmSDMA5_RLC7_DOORBELL_BASE_IDX 1 +#define mmSDMA5_RLC7_STATUS 0x03c0 +#define mmSDMA5_RLC7_STATUS_BASE_IDX 1 +#define mmSDMA5_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA5_RLC7_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA5_RLC7_WATERMARK 0x03c2 +#define mmSDMA5_RLC7_WATERMARK_BASE_IDX 1 +#define mmSDMA5_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA5_RLC7_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA5_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA5_RLC7_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA5_RLC7_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA5_RLC7_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA5_RLC7_PREEMPT 0x03c8 +#define mmSDMA5_RLC7_PREEMPT_BASE_IDX 1 +#define mmSDMA5_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA5_RLC7_DUMMY_REG_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA5_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA5_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA5_RLC7_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA5_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA5_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA5_RLC7_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA5_RLC7_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA5_RLC7_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA5_RLC7_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA5_RLC7_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA5_RLC7_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA5_RLC7_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA5_RLC7_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA5_RLC7_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA5_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA5_RLC7_MIDCMD_CNTL_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h new file mode 100644 index 000000000000..e99856b92386 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h @@ -0,0 +1,2956 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma5_4_2_2_SH_MASK_HEADER +#define _sdma5_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma5_sdma5dec +//SDMA5_UCODE_ADDR +#define SDMA5_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA5_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA5_UCODE_DATA +#define SDMA5_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA5_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA5_VM_CNTL +#define SDMA5_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA5_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA5_VM_CTX_LO +#define SDMA5_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA5_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_VM_CTX_HI +#define SDMA5_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA5_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_ACTIVE_FCN_ID +#define SDMA5_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA5_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA5_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA5_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA5_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA5_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA5_VM_CTX_CNTL +#define SDMA5_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA5_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA5_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA5_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA5_VIRT_RESET_REQ +#define SDMA5_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA5_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA5_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA5_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA5_VF_ENABLE +#define SDMA5_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA5_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA5_CONTEXT_REG_TYPE0 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE__SHIFT 0x1 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_CNTL__SHIFT 0xa +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_RPTR__SHIFT 0xb +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_SIZE__SHIFT 0xf +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_DOORBELL__SHIFT 0x12 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE_MASK 0x00000002L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_DOORBELL_MASK 0x00040000L +#define SDMA5_CONTEXT_REG_TYPE0__SDMA5_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA5_CONTEXT_REG_TYPE1 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_STATUS__SHIFT 0x8 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_WATERMARK__SHIFT 0xa +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA5_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_PREEMPT__SHIFT 0x10 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA5_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_STATUS_MASK 0x00000100L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_WATERMARK_MASK 0x00000400L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA5_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_PREEMPT_MASK 0x00010000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA5_CONTEXT_REG_TYPE1__SDMA5_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA5_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA5_CONTEXT_REG_TYPE2 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA5_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA5_CONTEXT_REG_TYPE2__SDMA5_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA5_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA5_CONTEXT_REG_TYPE3 +#define SDMA5_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA5_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA5_PUB_REG_TYPE0 +#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_ADDR__SHIFT 0x0 +#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_DATA__SHIFT 0x1 +#define SDMA5_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CNTL__SHIFT 0x4 +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_LO__SHIFT 0x5 +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_HI__SHIFT 0x6 +#define SDMA5_PUB_REG_TYPE0__SDMA5_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA5_PUB_REG_TYPE0__SDMA5_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA5_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA5_PUB_REG_TYPE0__SDMA5_MMHUB_CNTL__SHIFT 0x13 +#define SDMA5_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA5_PUB_REG_TYPE0__SDMA5_POWER_CNTL__SHIFT 0x1a +#define SDMA5_PUB_REG_TYPE0__SDMA5_CLK_CTRL__SHIFT 0x1b +#define SDMA5_PUB_REG_TYPE0__SDMA5_CNTL__SHIFT 0x1c +#define SDMA5_PUB_REG_TYPE0__SDMA5_CHICKEN_BITS__SHIFT 0x1d +#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_ADDR_MASK 0x00000001L +#define SDMA5_PUB_REG_TYPE0__SDMA5_UCODE_DATA_MASK 0x00000002L +#define SDMA5_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CNTL_MASK 0x00000010L +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_LO_MASK 0x00000020L +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_HI_MASK 0x00000040L +#define SDMA5_PUB_REG_TYPE0__SDMA5_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA5_PUB_REG_TYPE0__SDMA5_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA5_PUB_REG_TYPE0__SDMA5_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA5_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_MMHUB_CNTL_MASK 0x00080000L +#define SDMA5_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_POWER_CNTL_MASK 0x04000000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CLK_CTRL_MASK 0x08000000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CNTL_MASK 0x10000000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_CHICKEN_BITS_MASK 0x20000000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA5_PUB_REG_TYPE0__SDMA5_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA5_PUB_REG_TYPE1 +#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA5_PUB_REG_TYPE1__SDMA5_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA5_PUB_REG_TYPE1__SDMA5_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA5_PUB_REG_TYPE1__SDMA5_PROGRAM__SHIFT 0x4 +#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS_REG__SHIFT 0x5 +#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS1_REG__SHIFT 0x6 +#define SDMA5_PUB_REG_TYPE1__SDMA5_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA5_PUB_REG_TYPE1__SDMA5_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA5_PUB_REG_TYPE1__SDMA5_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA5_PUB_REG_TYPE1__SDMA5_F32_CNTL__SHIFT 0xa +#define SDMA5_PUB_REG_TYPE1__SDMA5_FREEZE__SHIFT 0xb +#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA5_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_CONFIG__SHIFT 0x12 +#define SDMA5_PUB_REG_TYPE1__SDMA5_BA_THRESHOLD__SHIFT 0x13 +#define SDMA5_PUB_REG_TYPE1__SDMA5_ID__SHIFT 0x14 +#define SDMA5_PUB_REG_TYPE1__SDMA5_VERSION__SHIFT 0x15 +#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER__SHIFT 0x16 +#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS2_REG__SHIFT 0x18 +#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_CNTL__SHIFT 0x1c +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA5_PUB_REG_TYPE1__SDMA5_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA5_PUB_REG_TYPE1__SDMA5_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA5_PUB_REG_TYPE1__SDMA5_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA5_PUB_REG_TYPE1__SDMA5_PROGRAM_MASK 0x00000010L +#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS_REG_MASK 0x00000020L +#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS1_REG_MASK 0x00000040L +#define SDMA5_PUB_REG_TYPE1__SDMA5_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA5_PUB_REG_TYPE1__SDMA5_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA5_PUB_REG_TYPE1__SDMA5_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA5_PUB_REG_TYPE1__SDMA5_F32_CNTL_MASK 0x00000400L +#define SDMA5_PUB_REG_TYPE1__SDMA5_FREEZE_MASK 0x00000800L +#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA5_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA5_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_CONFIG_MASK 0x00040000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_BA_THRESHOLD_MASK 0x00080000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_ID_MASK 0x00100000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_VERSION_MASK 0x00200000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER_MASK 0x00400000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_STATUS2_REG_MASK 0x01000000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_CNTL_MASK 0x10000000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA5_PUB_REG_TYPE1__SDMA5_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA5_PUB_REG_TYPE2 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV0__SHIFT 0x0 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV1__SHIFT 0x1 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV2__SHIFT 0x2 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_PAGE__SHIFT 0x8 +#define SDMA5_PUB_REG_TYPE2__SDMA5_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA5_PUB_REG_TYPE2__SDMA5_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA5_PUB_REG_TYPE2__SDMA5_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA5_PUB_REG_TYPE2__SDMA5_STATUS3_REG__SHIFT 0xc +#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA5_PUB_REG_TYPE2__SDMA5_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA5_PUB_REG_TYPE2__SDMA5_ERROR_LOG__SHIFT 0x10 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA5_PUB_REG_TYPE2__SDMA5_F32_COUNTER__SHIFT 0x15 +#define SDMA5_PUB_REG_TYPE2__SDMA5_UNBREAKABLE__SHIFT 0x16 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFMON_CNTL__SHIFT 0x17 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA5_PUB_REG_TYPE2__SDMA5_CRD_CNTL__SHIFT 0x1b +#define SDMA5_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA5_PUB_REG_TYPE2__SDMA5_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA5_PUB_REG_TYPE2__SDMA5_ULV_CNTL__SHIFT 0x1e +#define SDMA5_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV0_MASK 0x00000001L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV1_MASK 0x00000002L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_INV2_MASK 0x00000004L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UTCL1_PAGE_MASK 0x00000100L +#define SDMA5_PUB_REG_TYPE2__SDMA5_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA5_PUB_REG_TYPE2__SDMA5_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA5_PUB_REG_TYPE2__SDMA5_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA5_PUB_REG_TYPE2__SDMA5_STATUS3_REG_MASK 0x00001000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_ERROR_LOG_MASK 0x00010000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_F32_COUNTER_MASK 0x00200000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_UNBREAKABLE_MASK 0x00400000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFMON_CNTL_MASK 0x00800000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_CRD_CNTL_MASK 0x08000000L +#define SDMA5_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA5_PUB_REG_TYPE2__SDMA5_ULV_CNTL_MASK 0x40000000L +#define SDMA5_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA5_PUB_REG_TYPE3 +#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA5_PUB_REG_TYPE3__SDMA5_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA5_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA5_PUB_REG_TYPE3__SDMA5_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA5_PUB_REG_TYPE3__SDMA5_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA5_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA5_MMHUB_CNTL +#define SDMA5_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA5_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA5_CONTEXT_GROUP_BOUNDARY +#define SDMA5_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA5_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA5_POWER_CNTL +#define SDMA5_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA5_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA5_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA5_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA5_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA5_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA5_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA5_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA5_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA5_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA5_CLK_CTRL +#define SDMA5_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA5_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA5_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA5_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA5_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA5_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA5_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA5_CNTL +#define SDMA5_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA5_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA5_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA5_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA5_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA5_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA5_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA5_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA5_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA5_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA5_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA5_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA5_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA5_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA5_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA5_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA5_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA5_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA5_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA5_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA5_CHICKEN_BITS +#define SDMA5_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA5_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA5_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA5_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA5_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA5_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA5_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA5_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA5_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA5_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA5_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA5_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA5_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA5_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA5_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA5_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA5_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA5_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA5_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA5_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA5_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA5_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA5_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA5_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA5_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA5_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA5_GB_ADDR_CONFIG +#define SDMA5_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA5_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA5_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA5_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA5_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA5_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA5_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA5_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA5_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA5_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA5_GB_ADDR_CONFIG_READ +#define SDMA5_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA5_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA5_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA5_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA5_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA5_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA5_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA5_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA5_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA5_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA5_RB_RPTR_FETCH_HI +#define SDMA5_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA5_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA5_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA5_RB_RPTR_FETCH +#define SDMA5_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA5_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA5_IB_OFFSET_FETCH +#define SDMA5_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA5_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA5_PROGRAM +#define SDMA5_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA5_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA5_STATUS_REG +#define SDMA5_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA5_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA5_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA5_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA5_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA5_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA5_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA5_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA5_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA5_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA5_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA5_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA5_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA5_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA5_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA5_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA5_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA5_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA5_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA5_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA5_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA5_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA5_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA5_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA5_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA5_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA5_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA5_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA5_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA5_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA5_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA5_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA5_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA5_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA5_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA5_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA5_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA5_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA5_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA5_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA5_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA5_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA5_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA5_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA5_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA5_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA5_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA5_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA5_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA5_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA5_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA5_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA5_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA5_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA5_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA5_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA5_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA5_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA5_STATUS1_REG +#define SDMA5_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA5_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA5_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA5_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA5_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA5_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA5_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA5_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA5_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA5_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA5_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA5_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA5_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA5_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA5_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA5_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA5_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA5_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA5_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA5_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA5_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA5_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA5_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA5_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA5_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA5_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA5_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA5_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA5_RD_BURST_CNTL +#define SDMA5_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA5_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA5_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA5_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA5_HBM_PAGE_CONFIG +#define SDMA5_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA5_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA5_UCODE_CHECKSUM +#define SDMA5_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA5_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA5_F32_CNTL +#define SDMA5_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA5_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA5_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA5_F32_CNTL__STEP_MASK 0x00000002L +//SDMA5_FREEZE +#define SDMA5_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA5_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA5_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA5_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA5_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA5_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA5_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA5_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA5_PHASE0_QUANTUM +#define SDMA5_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA5_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA5_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA5_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA5_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA5_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA5_PHASE1_QUANTUM +#define SDMA5_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA5_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA5_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA5_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA5_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA5_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA5_EDC_CONFIG +#define SDMA5_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA5_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA5_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA5_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA5_BA_THRESHOLD +#define SDMA5_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA5_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA5_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA5_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA5_ID +#define SDMA5_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA5_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA5_VERSION +#define SDMA5_VERSION__MINVER__SHIFT 0x0 +#define SDMA5_VERSION__MAJVER__SHIFT 0x8 +#define SDMA5_VERSION__REV__SHIFT 0x10 +#define SDMA5_VERSION__MINVER_MASK 0x0000007FL +#define SDMA5_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA5_VERSION__REV_MASK 0x003F0000L +//SDMA5_EDC_COUNTER +#define SDMA5_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA5_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA5_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA5_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA5_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA5_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA5_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA5_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA5_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA5_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA5_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA5_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA5_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA5_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA5_EDC_COUNTER_CLEAR +#define SDMA5_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA5_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA5_STATUS2_REG +#define SDMA5_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA5_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA5_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA5_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA5_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA5_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA5_ATOMIC_CNTL +#define SDMA5_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA5_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA5_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA5_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA5_ATOMIC_PREOP_LO +#define SDMA5_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA5_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA5_ATOMIC_PREOP_HI +#define SDMA5_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA5_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA5_UTCL1_CNTL +#define SDMA5_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA5_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA5_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA5_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA5_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA5_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA5_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA5_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA5_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA5_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA5_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA5_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA5_UTCL1_WATERMK +#define SDMA5_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA5_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA5_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA5_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA5_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA5_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA5_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA5_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA5_UTCL1_RD_STATUS +#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA5_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA5_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA5_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA5_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA5_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA5_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA5_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA5_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA5_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA5_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA5_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA5_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA5_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA5_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA5_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA5_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA5_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA5_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA5_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA5_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA5_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA5_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA5_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA5_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA5_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA5_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA5_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA5_UTCL1_WR_STATUS +#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA5_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA5_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA5_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA5_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA5_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA5_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA5_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA5_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA5_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA5_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA5_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA5_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA5_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA5_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA5_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA5_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA5_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA5_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA5_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA5_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA5_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA5_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA5_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA5_UTCL1_INV0 +#define SDMA5_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA5_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA5_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA5_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA5_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA5_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA5_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA5_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA5_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA5_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA5_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA5_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA5_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA5_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA5_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA5_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA5_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA5_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA5_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA5_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA5_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA5_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA5_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA5_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA5_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA5_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA5_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA5_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA5_UTCL1_INV1 +#define SDMA5_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA5_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA5_UTCL1_INV2 +#define SDMA5_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA5_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA5_UTCL1_RD_XNACK0 +#define SDMA5_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA5_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA5_UTCL1_RD_XNACK1 +#define SDMA5_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA5_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA5_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA5_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA5_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA5_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA5_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA5_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA5_UTCL1_WR_XNACK0 +#define SDMA5_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA5_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA5_UTCL1_WR_XNACK1 +#define SDMA5_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA5_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA5_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA5_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA5_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA5_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA5_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA5_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA5_UTCL1_TIMEOUT +#define SDMA5_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA5_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA5_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA5_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA5_UTCL1_PAGE +#define SDMA5_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA5_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA5_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA5_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA5_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA5_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA5_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA5_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA5_POWER_CNTL_IDLE +#define SDMA5_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA5_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA5_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA5_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA5_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA5_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA5_RELAX_ORDERING_LUT +#define SDMA5_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA5_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA5_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA5_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA5_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA5_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA5_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA5_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA5_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA5_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA5_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA5_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA5_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA5_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA5_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA5_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA5_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA5_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA5_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA5_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA5_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA5_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA5_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA5_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA5_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA5_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA5_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA5_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA5_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA5_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA5_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA5_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA5_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA5_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA5_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA5_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA5_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA5_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA5_CHICKEN_BITS_2 +#define SDMA5_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA5_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA5_STATUS3_REG +#define SDMA5_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA5_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA5_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA5_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA5_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA5_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA5_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA5_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA5_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA5_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA5_PHYSICAL_ADDR_LO +#define SDMA5_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA5_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA5_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA5_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA5_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA5_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA5_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA5_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA5_PHYSICAL_ADDR_HI +#define SDMA5_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA5_PHASE2_QUANTUM +#define SDMA5_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA5_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA5_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA5_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA5_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA5_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA5_ERROR_LOG +#define SDMA5_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA5_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA5_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA5_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA5_PUB_DUMMY_REG0 +#define SDMA5_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA5_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA5_PUB_DUMMY_REG1 +#define SDMA5_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA5_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA5_PUB_DUMMY_REG2 +#define SDMA5_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA5_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA5_PUB_DUMMY_REG3 +#define SDMA5_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA5_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA5_F32_COUNTER +#define SDMA5_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA5_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA5_UNBREAKABLE +#define SDMA5_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA5_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA5_PERFMON_CNTL +#define SDMA5_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA5_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA5_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA5_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA5_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA5_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA5_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA5_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA5_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA5_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA5_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA5_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA5_PERFCOUNTER0_RESULT +#define SDMA5_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA5_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA5_PERFCOUNTER1_RESULT +#define SDMA5_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA5_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA5_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA5_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA5_CRD_CNTL +#define SDMA5_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA5_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA5_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA5_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA5_GPU_IOV_VIOLATION_LOG +#define SDMA5_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA5_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA5_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA5_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA5_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA5_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA5_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA5_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA5_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA5_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA5_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA5_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA5_ULV_CNTL +#define SDMA5_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA5_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA5_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA5_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA5_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA5_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA5_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA5_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA5_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA5_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA5_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA5_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA5_EA_DBIT_ADDR_DATA +#define SDMA5_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA5_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA5_EA_DBIT_ADDR_INDEX +#define SDMA5_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA5_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA5_GPU_IOV_VIOLATION_LOG2 +#define SDMA5_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA5_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA5_GFX_RB_CNTL +#define SDMA5_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_GFX_RB_BASE +#define SDMA5_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_BASE_HI +#define SDMA5_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_GFX_RB_RPTR +#define SDMA5_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_RPTR_HI +#define SDMA5_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_WPTR +#define SDMA5_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_WPTR_HI +#define SDMA5_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_WPTR_POLL_CNTL +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_GFX_RB_RPTR_ADDR_HI +#define SDMA5_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_RPTR_ADDR_LO +#define SDMA5_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_GFX_IB_CNTL +#define SDMA5_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_GFX_IB_RPTR +#define SDMA5_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_GFX_IB_OFFSET +#define SDMA5_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_GFX_IB_BASE_LO +#define SDMA5_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_GFX_IB_BASE_HI +#define SDMA5_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_GFX_IB_SIZE +#define SDMA5_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_GFX_SKIP_CNTL +#define SDMA5_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_GFX_CONTEXT_STATUS +#define SDMA5_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_GFX_DOORBELL +#define SDMA5_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_GFX_CONTEXT_CNTL +#define SDMA5_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA5_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA5_GFX_STATUS +#define SDMA5_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_GFX_DOORBELL_LOG +#define SDMA5_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_GFX_WATERMARK +#define SDMA5_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_GFX_DOORBELL_OFFSET +#define SDMA5_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_GFX_CSA_ADDR_LO +#define SDMA5_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_GFX_CSA_ADDR_HI +#define SDMA5_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_GFX_IB_SUB_REMAIN +#define SDMA5_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_GFX_PREEMPT +#define SDMA5_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_GFX_DUMMY_REG +#define SDMA5_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA5_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA5_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_GFX_RB_AQL_CNTL +#define SDMA5_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_GFX_MINOR_PTR_UPDATE +#define SDMA5_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_GFX_MIDCMD_DATA0 +#define SDMA5_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA1 +#define SDMA5_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA2 +#define SDMA5_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA3 +#define SDMA5_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA4 +#define SDMA5_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA5 +#define SDMA5_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA6 +#define SDMA5_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA7 +#define SDMA5_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_DATA8 +#define SDMA5_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_GFX_MIDCMD_CNTL +#define SDMA5_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_PAGE_RB_CNTL +#define SDMA5_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_PAGE_RB_BASE +#define SDMA5_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_BASE_HI +#define SDMA5_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_PAGE_RB_RPTR +#define SDMA5_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_RPTR_HI +#define SDMA5_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_WPTR +#define SDMA5_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_WPTR_HI +#define SDMA5_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_WPTR_POLL_CNTL +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_PAGE_RB_RPTR_ADDR_HI +#define SDMA5_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_RPTR_ADDR_LO +#define SDMA5_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_PAGE_IB_CNTL +#define SDMA5_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_PAGE_IB_RPTR +#define SDMA5_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_PAGE_IB_OFFSET +#define SDMA5_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_PAGE_IB_BASE_LO +#define SDMA5_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_PAGE_IB_BASE_HI +#define SDMA5_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_PAGE_IB_SIZE +#define SDMA5_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_PAGE_SKIP_CNTL +#define SDMA5_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_PAGE_CONTEXT_STATUS +#define SDMA5_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_PAGE_DOORBELL +#define SDMA5_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_PAGE_STATUS +#define SDMA5_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_PAGE_DOORBELL_LOG +#define SDMA5_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_PAGE_WATERMARK +#define SDMA5_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_PAGE_DOORBELL_OFFSET +#define SDMA5_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_PAGE_CSA_ADDR_LO +#define SDMA5_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_PAGE_CSA_ADDR_HI +#define SDMA5_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_PAGE_IB_SUB_REMAIN +#define SDMA5_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_PAGE_PREEMPT +#define SDMA5_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_PAGE_DUMMY_REG +#define SDMA5_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_PAGE_RB_AQL_CNTL +#define SDMA5_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_PAGE_MINOR_PTR_UPDATE +#define SDMA5_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_PAGE_MIDCMD_DATA0 +#define SDMA5_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA1 +#define SDMA5_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA2 +#define SDMA5_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA3 +#define SDMA5_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA4 +#define SDMA5_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA5 +#define SDMA5_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA6 +#define SDMA5_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA7 +#define SDMA5_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_DATA8 +#define SDMA5_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_PAGE_MIDCMD_CNTL +#define SDMA5_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC0_RB_CNTL +#define SDMA5_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC0_RB_BASE +#define SDMA5_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_BASE_HI +#define SDMA5_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC0_RB_RPTR +#define SDMA5_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_RPTR_HI +#define SDMA5_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_WPTR +#define SDMA5_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_WPTR_HI +#define SDMA5_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_WPTR_POLL_CNTL +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC0_RB_RPTR_ADDR_HI +#define SDMA5_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_RPTR_ADDR_LO +#define SDMA5_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC0_IB_CNTL +#define SDMA5_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC0_IB_RPTR +#define SDMA5_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC0_IB_OFFSET +#define SDMA5_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC0_IB_BASE_LO +#define SDMA5_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC0_IB_BASE_HI +#define SDMA5_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC0_IB_SIZE +#define SDMA5_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC0_SKIP_CNTL +#define SDMA5_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC0_CONTEXT_STATUS +#define SDMA5_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC0_DOORBELL +#define SDMA5_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC0_STATUS +#define SDMA5_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC0_DOORBELL_LOG +#define SDMA5_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC0_WATERMARK +#define SDMA5_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC0_DOORBELL_OFFSET +#define SDMA5_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC0_CSA_ADDR_LO +#define SDMA5_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC0_CSA_ADDR_HI +#define SDMA5_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC0_IB_SUB_REMAIN +#define SDMA5_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC0_PREEMPT +#define SDMA5_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC0_DUMMY_REG +#define SDMA5_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC0_RB_AQL_CNTL +#define SDMA5_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC0_MINOR_PTR_UPDATE +#define SDMA5_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC0_MIDCMD_DATA0 +#define SDMA5_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA1 +#define SDMA5_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA2 +#define SDMA5_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA3 +#define SDMA5_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA4 +#define SDMA5_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA5 +#define SDMA5_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA6 +#define SDMA5_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA7 +#define SDMA5_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_DATA8 +#define SDMA5_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC0_MIDCMD_CNTL +#define SDMA5_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC1_RB_CNTL +#define SDMA5_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC1_RB_BASE +#define SDMA5_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_BASE_HI +#define SDMA5_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC1_RB_RPTR +#define SDMA5_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_RPTR_HI +#define SDMA5_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_WPTR +#define SDMA5_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_WPTR_HI +#define SDMA5_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_WPTR_POLL_CNTL +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC1_RB_RPTR_ADDR_HI +#define SDMA5_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_RPTR_ADDR_LO +#define SDMA5_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC1_IB_CNTL +#define SDMA5_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC1_IB_RPTR +#define SDMA5_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC1_IB_OFFSET +#define SDMA5_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC1_IB_BASE_LO +#define SDMA5_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC1_IB_BASE_HI +#define SDMA5_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC1_IB_SIZE +#define SDMA5_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC1_SKIP_CNTL +#define SDMA5_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC1_CONTEXT_STATUS +#define SDMA5_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC1_DOORBELL +#define SDMA5_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC1_STATUS +#define SDMA5_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC1_DOORBELL_LOG +#define SDMA5_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC1_WATERMARK +#define SDMA5_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC1_DOORBELL_OFFSET +#define SDMA5_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC1_CSA_ADDR_LO +#define SDMA5_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC1_CSA_ADDR_HI +#define SDMA5_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC1_IB_SUB_REMAIN +#define SDMA5_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC1_PREEMPT +#define SDMA5_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC1_DUMMY_REG +#define SDMA5_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC1_RB_AQL_CNTL +#define SDMA5_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC1_MINOR_PTR_UPDATE +#define SDMA5_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC1_MIDCMD_DATA0 +#define SDMA5_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA1 +#define SDMA5_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA2 +#define SDMA5_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA3 +#define SDMA5_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA4 +#define SDMA5_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA5 +#define SDMA5_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA6 +#define SDMA5_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA7 +#define SDMA5_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_DATA8 +#define SDMA5_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC1_MIDCMD_CNTL +#define SDMA5_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC2_RB_CNTL +#define SDMA5_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC2_RB_BASE +#define SDMA5_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_BASE_HI +#define SDMA5_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC2_RB_RPTR +#define SDMA5_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_RPTR_HI +#define SDMA5_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_WPTR +#define SDMA5_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_WPTR_HI +#define SDMA5_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_WPTR_POLL_CNTL +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC2_RB_RPTR_ADDR_HI +#define SDMA5_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_RPTR_ADDR_LO +#define SDMA5_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC2_IB_CNTL +#define SDMA5_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC2_IB_RPTR +#define SDMA5_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC2_IB_OFFSET +#define SDMA5_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC2_IB_BASE_LO +#define SDMA5_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC2_IB_BASE_HI +#define SDMA5_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC2_IB_SIZE +#define SDMA5_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC2_SKIP_CNTL +#define SDMA5_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC2_CONTEXT_STATUS +#define SDMA5_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC2_DOORBELL +#define SDMA5_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC2_STATUS +#define SDMA5_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC2_DOORBELL_LOG +#define SDMA5_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC2_WATERMARK +#define SDMA5_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC2_DOORBELL_OFFSET +#define SDMA5_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC2_CSA_ADDR_LO +#define SDMA5_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC2_CSA_ADDR_HI +#define SDMA5_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC2_IB_SUB_REMAIN +#define SDMA5_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC2_PREEMPT +#define SDMA5_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC2_DUMMY_REG +#define SDMA5_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC2_RB_AQL_CNTL +#define SDMA5_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC2_MINOR_PTR_UPDATE +#define SDMA5_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC2_MIDCMD_DATA0 +#define SDMA5_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA1 +#define SDMA5_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA2 +#define SDMA5_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA3 +#define SDMA5_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA4 +#define SDMA5_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA5 +#define SDMA5_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA6 +#define SDMA5_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA7 +#define SDMA5_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_DATA8 +#define SDMA5_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC2_MIDCMD_CNTL +#define SDMA5_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC3_RB_CNTL +#define SDMA5_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC3_RB_BASE +#define SDMA5_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_BASE_HI +#define SDMA5_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC3_RB_RPTR +#define SDMA5_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_RPTR_HI +#define SDMA5_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_WPTR +#define SDMA5_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_WPTR_HI +#define SDMA5_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_WPTR_POLL_CNTL +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC3_RB_RPTR_ADDR_HI +#define SDMA5_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_RPTR_ADDR_LO +#define SDMA5_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC3_IB_CNTL +#define SDMA5_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC3_IB_RPTR +#define SDMA5_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC3_IB_OFFSET +#define SDMA5_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC3_IB_BASE_LO +#define SDMA5_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC3_IB_BASE_HI +#define SDMA5_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC3_IB_SIZE +#define SDMA5_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC3_SKIP_CNTL +#define SDMA5_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC3_CONTEXT_STATUS +#define SDMA5_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC3_DOORBELL +#define SDMA5_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC3_STATUS +#define SDMA5_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC3_DOORBELL_LOG +#define SDMA5_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC3_WATERMARK +#define SDMA5_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC3_DOORBELL_OFFSET +#define SDMA5_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC3_CSA_ADDR_LO +#define SDMA5_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC3_CSA_ADDR_HI +#define SDMA5_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC3_IB_SUB_REMAIN +#define SDMA5_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC3_PREEMPT +#define SDMA5_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC3_DUMMY_REG +#define SDMA5_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC3_RB_AQL_CNTL +#define SDMA5_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC3_MINOR_PTR_UPDATE +#define SDMA5_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC3_MIDCMD_DATA0 +#define SDMA5_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA1 +#define SDMA5_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA2 +#define SDMA5_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA3 +#define SDMA5_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA4 +#define SDMA5_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA5 +#define SDMA5_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA6 +#define SDMA5_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA7 +#define SDMA5_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_DATA8 +#define SDMA5_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC3_MIDCMD_CNTL +#define SDMA5_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC4_RB_CNTL +#define SDMA5_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC4_RB_BASE +#define SDMA5_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_BASE_HI +#define SDMA5_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC4_RB_RPTR +#define SDMA5_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_RPTR_HI +#define SDMA5_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_WPTR +#define SDMA5_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_WPTR_HI +#define SDMA5_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_WPTR_POLL_CNTL +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC4_RB_RPTR_ADDR_HI +#define SDMA5_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_RPTR_ADDR_LO +#define SDMA5_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC4_IB_CNTL +#define SDMA5_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC4_IB_RPTR +#define SDMA5_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC4_IB_OFFSET +#define SDMA5_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC4_IB_BASE_LO +#define SDMA5_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC4_IB_BASE_HI +#define SDMA5_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC4_IB_SIZE +#define SDMA5_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC4_SKIP_CNTL +#define SDMA5_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC4_CONTEXT_STATUS +#define SDMA5_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC4_DOORBELL +#define SDMA5_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC4_STATUS +#define SDMA5_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC4_DOORBELL_LOG +#define SDMA5_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC4_WATERMARK +#define SDMA5_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC4_DOORBELL_OFFSET +#define SDMA5_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC4_CSA_ADDR_LO +#define SDMA5_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC4_CSA_ADDR_HI +#define SDMA5_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC4_IB_SUB_REMAIN +#define SDMA5_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC4_PREEMPT +#define SDMA5_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC4_DUMMY_REG +#define SDMA5_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC4_RB_AQL_CNTL +#define SDMA5_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC4_MINOR_PTR_UPDATE +#define SDMA5_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC4_MIDCMD_DATA0 +#define SDMA5_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA1 +#define SDMA5_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA2 +#define SDMA5_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA3 +#define SDMA5_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA4 +#define SDMA5_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA5 +#define SDMA5_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA6 +#define SDMA5_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA7 +#define SDMA5_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_DATA8 +#define SDMA5_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC4_MIDCMD_CNTL +#define SDMA5_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC5_RB_CNTL +#define SDMA5_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC5_RB_BASE +#define SDMA5_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_BASE_HI +#define SDMA5_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC5_RB_RPTR +#define SDMA5_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_RPTR_HI +#define SDMA5_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_WPTR +#define SDMA5_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_WPTR_HI +#define SDMA5_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_WPTR_POLL_CNTL +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC5_RB_RPTR_ADDR_HI +#define SDMA5_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_RPTR_ADDR_LO +#define SDMA5_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC5_IB_CNTL +#define SDMA5_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC5_IB_RPTR +#define SDMA5_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC5_IB_OFFSET +#define SDMA5_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC5_IB_BASE_LO +#define SDMA5_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC5_IB_BASE_HI +#define SDMA5_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC5_IB_SIZE +#define SDMA5_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC5_SKIP_CNTL +#define SDMA5_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC5_CONTEXT_STATUS +#define SDMA5_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC5_DOORBELL +#define SDMA5_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC5_STATUS +#define SDMA5_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC5_DOORBELL_LOG +#define SDMA5_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC5_WATERMARK +#define SDMA5_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC5_DOORBELL_OFFSET +#define SDMA5_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC5_CSA_ADDR_LO +#define SDMA5_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC5_CSA_ADDR_HI +#define SDMA5_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC5_IB_SUB_REMAIN +#define SDMA5_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC5_PREEMPT +#define SDMA5_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC5_DUMMY_REG +#define SDMA5_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC5_RB_AQL_CNTL +#define SDMA5_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC5_MINOR_PTR_UPDATE +#define SDMA5_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC5_MIDCMD_DATA0 +#define SDMA5_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA1 +#define SDMA5_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA2 +#define SDMA5_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA3 +#define SDMA5_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA4 +#define SDMA5_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA5 +#define SDMA5_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA6 +#define SDMA5_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA7 +#define SDMA5_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_DATA8 +#define SDMA5_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC5_MIDCMD_CNTL +#define SDMA5_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC6_RB_CNTL +#define SDMA5_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC6_RB_BASE +#define SDMA5_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_BASE_HI +#define SDMA5_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC6_RB_RPTR +#define SDMA5_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_RPTR_HI +#define SDMA5_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_WPTR +#define SDMA5_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_WPTR_HI +#define SDMA5_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_WPTR_POLL_CNTL +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC6_RB_RPTR_ADDR_HI +#define SDMA5_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_RPTR_ADDR_LO +#define SDMA5_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC6_IB_CNTL +#define SDMA5_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC6_IB_RPTR +#define SDMA5_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC6_IB_OFFSET +#define SDMA5_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC6_IB_BASE_LO +#define SDMA5_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC6_IB_BASE_HI +#define SDMA5_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC6_IB_SIZE +#define SDMA5_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC6_SKIP_CNTL +#define SDMA5_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC6_CONTEXT_STATUS +#define SDMA5_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC6_DOORBELL +#define SDMA5_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC6_STATUS +#define SDMA5_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC6_DOORBELL_LOG +#define SDMA5_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC6_WATERMARK +#define SDMA5_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC6_DOORBELL_OFFSET +#define SDMA5_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC6_CSA_ADDR_LO +#define SDMA5_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC6_CSA_ADDR_HI +#define SDMA5_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC6_IB_SUB_REMAIN +#define SDMA5_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC6_PREEMPT +#define SDMA5_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC6_DUMMY_REG +#define SDMA5_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC6_RB_AQL_CNTL +#define SDMA5_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC6_MINOR_PTR_UPDATE +#define SDMA5_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC6_MIDCMD_DATA0 +#define SDMA5_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA1 +#define SDMA5_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA2 +#define SDMA5_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA3 +#define SDMA5_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA4 +#define SDMA5_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA5 +#define SDMA5_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA6 +#define SDMA5_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA7 +#define SDMA5_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_DATA8 +#define SDMA5_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC6_MIDCMD_CNTL +#define SDMA5_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA5_RLC7_RB_CNTL +#define SDMA5_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA5_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA5_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA5_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA5_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA5_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA5_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA5_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA5_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA5_RLC7_RB_BASE +#define SDMA5_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA5_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_BASE_HI +#define SDMA5_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA5_RLC7_RB_RPTR +#define SDMA5_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_RPTR_HI +#define SDMA5_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_WPTR +#define SDMA5_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA5_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_WPTR_HI +#define SDMA5_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA5_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_WPTR_POLL_CNTL +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA5_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA5_RLC7_RB_RPTR_ADDR_HI +#define SDMA5_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_RPTR_ADDR_LO +#define SDMA5_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA5_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA5_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC7_IB_CNTL +#define SDMA5_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA5_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA5_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA5_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA5_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA5_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA5_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA5_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA5_RLC7_IB_RPTR +#define SDMA5_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA5_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC7_IB_OFFSET +#define SDMA5_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA5_RLC7_IB_BASE_LO +#define SDMA5_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA5_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA5_RLC7_IB_BASE_HI +#define SDMA5_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC7_IB_SIZE +#define SDMA5_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA5_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC7_SKIP_CNTL +#define SDMA5_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA5_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA5_RLC7_CONTEXT_STATUS +#define SDMA5_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA5_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA5_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA5_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA5_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA5_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA5_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA5_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA5_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA5_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA5_RLC7_DOORBELL +#define SDMA5_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA5_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA5_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA5_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA5_RLC7_STATUS +#define SDMA5_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA5_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA5_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA5_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA5_RLC7_DOORBELL_LOG +#define SDMA5_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA5_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA5_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA5_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA5_RLC7_WATERMARK +#define SDMA5_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA5_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA5_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA5_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA5_RLC7_DOORBELL_OFFSET +#define SDMA5_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA5_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA5_RLC7_CSA_ADDR_LO +#define SDMA5_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC7_CSA_ADDR_HI +#define SDMA5_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC7_IB_SUB_REMAIN +#define SDMA5_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA5_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA5_RLC7_PREEMPT +#define SDMA5_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA5_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA5_RLC7_DUMMY_REG +#define SDMA5_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA5_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA5_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA5_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA5_RLC7_RB_AQL_CNTL +#define SDMA5_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA5_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA5_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA5_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA5_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA5_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA5_RLC7_MINOR_PTR_UPDATE +#define SDMA5_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA5_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA5_RLC7_MIDCMD_DATA0 +#define SDMA5_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA1 +#define SDMA5_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA2 +#define SDMA5_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA3 +#define SDMA5_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA4 +#define SDMA5_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA5 +#define SDMA5_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA6 +#define SDMA5_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA7 +#define SDMA5_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_DATA8 +#define SDMA5_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA5_RLC7_MIDCMD_CNTL +#define SDMA5_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA5_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA5_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA5_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA5_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA5_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA5_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA5_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h new file mode 100644 index 000000000000..ae12db26362e --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h @@ -0,0 +1,1043 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma6_4_2_2_OFFSET_HEADER +#define _sdma6_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma6_sdma6dec +// base address: 0x7c000 +#define mmSDMA6_UCODE_ADDR 0x0000 +#define mmSDMA6_UCODE_ADDR_BASE_IDX 1 +#define mmSDMA6_UCODE_DATA 0x0001 +#define mmSDMA6_UCODE_DATA_BASE_IDX 1 +#define mmSDMA6_VM_CNTL 0x0004 +#define mmSDMA6_VM_CNTL_BASE_IDX 1 +#define mmSDMA6_VM_CTX_LO 0x0005 +#define mmSDMA6_VM_CTX_LO_BASE_IDX 1 +#define mmSDMA6_VM_CTX_HI 0x0006 +#define mmSDMA6_VM_CTX_HI_BASE_IDX 1 +#define mmSDMA6_ACTIVE_FCN_ID 0x0007 +#define mmSDMA6_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmSDMA6_VM_CTX_CNTL 0x0008 +#define mmSDMA6_VM_CTX_CNTL_BASE_IDX 1 +#define mmSDMA6_VIRT_RESET_REQ 0x0009 +#define mmSDMA6_VIRT_RESET_REQ_BASE_IDX 1 +#define mmSDMA6_VF_ENABLE 0x000a +#define mmSDMA6_VF_ENABLE_BASE_IDX 1 +#define mmSDMA6_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA6_CONTEXT_REG_TYPE0_BASE_IDX 1 +#define mmSDMA6_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA6_CONTEXT_REG_TYPE1_BASE_IDX 1 +#define mmSDMA6_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA6_CONTEXT_REG_TYPE2_BASE_IDX 1 +#define mmSDMA6_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA6_CONTEXT_REG_TYPE3_BASE_IDX 1 +#define mmSDMA6_PUB_REG_TYPE0 0x000f +#define mmSDMA6_PUB_REG_TYPE0_BASE_IDX 1 +#define mmSDMA6_PUB_REG_TYPE1 0x0010 +#define mmSDMA6_PUB_REG_TYPE1_BASE_IDX 1 +#define mmSDMA6_PUB_REG_TYPE2 0x0011 +#define mmSDMA6_PUB_REG_TYPE2_BASE_IDX 1 +#define mmSDMA6_PUB_REG_TYPE3 0x0012 +#define mmSDMA6_PUB_REG_TYPE3_BASE_IDX 1 +#define mmSDMA6_MMHUB_CNTL 0x0013 +#define mmSDMA6_MMHUB_CNTL_BASE_IDX 1 +#define mmSDMA6_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA6_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1 +#define mmSDMA6_POWER_CNTL 0x001a +#define mmSDMA6_POWER_CNTL_BASE_IDX 1 +#define mmSDMA6_CLK_CTRL 0x001b +#define mmSDMA6_CLK_CTRL_BASE_IDX 1 +#define mmSDMA6_CNTL 0x001c +#define mmSDMA6_CNTL_BASE_IDX 1 +#define mmSDMA6_CHICKEN_BITS 0x001d +#define mmSDMA6_CHICKEN_BITS_BASE_IDX 1 +#define mmSDMA6_GB_ADDR_CONFIG 0x001e +#define mmSDMA6_GB_ADDR_CONFIG_BASE_IDX 1 +#define mmSDMA6_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA6_GB_ADDR_CONFIG_READ_BASE_IDX 1 +#define mmSDMA6_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA6_RB_RPTR_FETCH_HI_BASE_IDX 1 +#define mmSDMA6_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA6_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1 +#define mmSDMA6_RB_RPTR_FETCH 0x0022 +#define mmSDMA6_RB_RPTR_FETCH_BASE_IDX 1 +#define mmSDMA6_IB_OFFSET_FETCH 0x0023 +#define mmSDMA6_IB_OFFSET_FETCH_BASE_IDX 1 +#define mmSDMA6_PROGRAM 0x0024 +#define mmSDMA6_PROGRAM_BASE_IDX 1 +#define mmSDMA6_STATUS_REG 0x0025 +#define mmSDMA6_STATUS_REG_BASE_IDX 1 +#define mmSDMA6_STATUS1_REG 0x0026 +#define mmSDMA6_STATUS1_REG_BASE_IDX 1 +#define mmSDMA6_RD_BURST_CNTL 0x0027 +#define mmSDMA6_RD_BURST_CNTL_BASE_IDX 1 +#define mmSDMA6_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA6_HBM_PAGE_CONFIG_BASE_IDX 1 +#define mmSDMA6_UCODE_CHECKSUM 0x0029 +#define mmSDMA6_UCODE_CHECKSUM_BASE_IDX 1 +#define mmSDMA6_F32_CNTL 0x002a +#define mmSDMA6_F32_CNTL_BASE_IDX 1 +#define mmSDMA6_FREEZE 0x002b +#define mmSDMA6_FREEZE_BASE_IDX 1 +#define mmSDMA6_PHASE0_QUANTUM 0x002c +#define mmSDMA6_PHASE0_QUANTUM_BASE_IDX 1 +#define mmSDMA6_PHASE1_QUANTUM 0x002d +#define mmSDMA6_PHASE1_QUANTUM_BASE_IDX 1 +#define mmSDMA6_EDC_CONFIG 0x0032 +#define mmSDMA6_EDC_CONFIG_BASE_IDX 1 +#define mmSDMA6_BA_THRESHOLD 0x0033 +#define mmSDMA6_BA_THRESHOLD_BASE_IDX 1 +#define mmSDMA6_ID 0x0034 +#define mmSDMA6_ID_BASE_IDX 1 +#define mmSDMA6_VERSION 0x0035 +#define mmSDMA6_VERSION_BASE_IDX 1 +#define mmSDMA6_EDC_COUNTER 0x0036 +#define mmSDMA6_EDC_COUNTER_BASE_IDX 1 +#define mmSDMA6_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA6_EDC_COUNTER_CLEAR_BASE_IDX 1 +#define mmSDMA6_STATUS2_REG 0x0038 +#define mmSDMA6_STATUS2_REG_BASE_IDX 1 +#define mmSDMA6_ATOMIC_CNTL 0x0039 +#define mmSDMA6_ATOMIC_CNTL_BASE_IDX 1 +#define mmSDMA6_ATOMIC_PREOP_LO 0x003a +#define mmSDMA6_ATOMIC_PREOP_LO_BASE_IDX 1 +#define mmSDMA6_ATOMIC_PREOP_HI 0x003b +#define mmSDMA6_ATOMIC_PREOP_HI_BASE_IDX 1 +#define mmSDMA6_UTCL1_CNTL 0x003c +#define mmSDMA6_UTCL1_CNTL_BASE_IDX 1 +#define mmSDMA6_UTCL1_WATERMK 0x003d +#define mmSDMA6_UTCL1_WATERMK_BASE_IDX 1 +#define mmSDMA6_UTCL1_RD_STATUS 0x003e +#define mmSDMA6_UTCL1_RD_STATUS_BASE_IDX 1 +#define mmSDMA6_UTCL1_WR_STATUS 0x003f +#define mmSDMA6_UTCL1_WR_STATUS_BASE_IDX 1 +#define mmSDMA6_UTCL1_INV0 0x0040 +#define mmSDMA6_UTCL1_INV0_BASE_IDX 1 +#define mmSDMA6_UTCL1_INV1 0x0041 +#define mmSDMA6_UTCL1_INV1_BASE_IDX 1 +#define mmSDMA6_UTCL1_INV2 0x0042 +#define mmSDMA6_UTCL1_INV2_BASE_IDX 1 +#define mmSDMA6_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA6_UTCL1_RD_XNACK0_BASE_IDX 1 +#define mmSDMA6_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA6_UTCL1_RD_XNACK1_BASE_IDX 1 +#define mmSDMA6_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA6_UTCL1_WR_XNACK0_BASE_IDX 1 +#define mmSDMA6_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA6_UTCL1_WR_XNACK1_BASE_IDX 1 +#define mmSDMA6_UTCL1_TIMEOUT 0x0047 +#define mmSDMA6_UTCL1_TIMEOUT_BASE_IDX 1 +#define mmSDMA6_UTCL1_PAGE 0x0048 +#define mmSDMA6_UTCL1_PAGE_BASE_IDX 1 +#define mmSDMA6_POWER_CNTL_IDLE 0x0049 +#define mmSDMA6_POWER_CNTL_IDLE_BASE_IDX 1 +#define mmSDMA6_RELAX_ORDERING_LUT 0x004a +#define mmSDMA6_RELAX_ORDERING_LUT_BASE_IDX 1 +#define mmSDMA6_CHICKEN_BITS_2 0x004b +#define mmSDMA6_CHICKEN_BITS_2_BASE_IDX 1 +#define mmSDMA6_STATUS3_REG 0x004c +#define mmSDMA6_STATUS3_REG_BASE_IDX 1 +#define mmSDMA6_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA6_PHYSICAL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA6_PHYSICAL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_PHASE2_QUANTUM 0x004f +#define mmSDMA6_PHASE2_QUANTUM_BASE_IDX 1 +#define mmSDMA6_ERROR_LOG 0x0050 +#define mmSDMA6_ERROR_LOG_BASE_IDX 1 +#define mmSDMA6_PUB_DUMMY_REG0 0x0051 +#define mmSDMA6_PUB_DUMMY_REG0_BASE_IDX 1 +#define mmSDMA6_PUB_DUMMY_REG1 0x0052 +#define mmSDMA6_PUB_DUMMY_REG1_BASE_IDX 1 +#define mmSDMA6_PUB_DUMMY_REG2 0x0053 +#define mmSDMA6_PUB_DUMMY_REG2_BASE_IDX 1 +#define mmSDMA6_PUB_DUMMY_REG3 0x0054 +#define mmSDMA6_PUB_DUMMY_REG3_BASE_IDX 1 +#define mmSDMA6_F32_COUNTER 0x0055 +#define mmSDMA6_F32_COUNTER_BASE_IDX 1 +#define mmSDMA6_UNBREAKABLE 0x0056 +#define mmSDMA6_UNBREAKABLE_BASE_IDX 1 +#define mmSDMA6_PERFMON_CNTL 0x0057 +#define mmSDMA6_PERFMON_CNTL_BASE_IDX 1 +#define mmSDMA6_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA6_PERFCOUNTER0_RESULT_BASE_IDX 1 +#define mmSDMA6_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA6_PERFCOUNTER1_RESULT_BASE_IDX 1 +#define mmSDMA6_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA6_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1 +#define mmSDMA6_CRD_CNTL 0x005b +#define mmSDMA6_CRD_CNTL_BASE_IDX 1 +#define mmSDMA6_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA6_GPU_IOV_VIOLATION_LOG_BASE_IDX 1 +#define mmSDMA6_ULV_CNTL 0x005e +#define mmSDMA6_ULV_CNTL_BASE_IDX 1 +#define mmSDMA6_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA6_EA_DBIT_ADDR_DATA_BASE_IDX 1 +#define mmSDMA6_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA6_EA_DBIT_ADDR_INDEX_BASE_IDX 1 +#define mmSDMA6_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA6_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1 +#define mmSDMA6_GFX_RB_CNTL 0x0080 +#define mmSDMA6_GFX_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_GFX_RB_BASE 0x0081 +#define mmSDMA6_GFX_RB_BASE_BASE_IDX 1 +#define mmSDMA6_GFX_RB_BASE_HI 0x0082 +#define mmSDMA6_GFX_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_GFX_RB_RPTR 0x0083 +#define mmSDMA6_GFX_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA6_GFX_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_GFX_RB_WPTR 0x0085 +#define mmSDMA6_GFX_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA6_GFX_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA6_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA6_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA6_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_GFX_IB_CNTL 0x008a +#define mmSDMA6_GFX_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_GFX_IB_RPTR 0x008b +#define mmSDMA6_GFX_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_GFX_IB_OFFSET 0x008c +#define mmSDMA6_GFX_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_GFX_IB_BASE_LO 0x008d +#define mmSDMA6_GFX_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_GFX_IB_BASE_HI 0x008e +#define mmSDMA6_GFX_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_GFX_IB_SIZE 0x008f +#define mmSDMA6_GFX_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_GFX_SKIP_CNTL 0x0090 +#define mmSDMA6_GFX_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA6_GFX_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_GFX_DOORBELL 0x0092 +#define mmSDMA6_GFX_DOORBELL_BASE_IDX 1 +#define mmSDMA6_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA6_GFX_CONTEXT_CNTL_BASE_IDX 1 +#define mmSDMA6_GFX_STATUS 0x00a8 +#define mmSDMA6_GFX_STATUS_BASE_IDX 1 +#define mmSDMA6_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA6_GFX_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_GFX_WATERMARK 0x00aa +#define mmSDMA6_GFX_WATERMARK_BASE_IDX 1 +#define mmSDMA6_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA6_GFX_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA6_GFX_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA6_GFX_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA6_GFX_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_GFX_PREEMPT 0x00b0 +#define mmSDMA6_GFX_PREEMPT_BASE_IDX 1 +#define mmSDMA6_GFX_DUMMY_REG 0x00b1 +#define mmSDMA6_GFX_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA6_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA6_GFX_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA6_GFX_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA6_GFX_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA6_GFX_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA6_GFX_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA6_GFX_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA6_GFX_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA6_GFX_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA6_GFX_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA6_GFX_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA6_GFX_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA6_GFX_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_CNTL 0x00d8 +#define mmSDMA6_PAGE_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_BASE 0x00d9 +#define mmSDMA6_PAGE_RB_BASE_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_BASE_HI 0x00da +#define mmSDMA6_PAGE_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_RPTR 0x00db +#define mmSDMA6_PAGE_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA6_PAGE_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_WPTR 0x00dd +#define mmSDMA6_PAGE_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA6_PAGE_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA6_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA6_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA6_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_PAGE_IB_CNTL 0x00e2 +#define mmSDMA6_PAGE_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_PAGE_IB_RPTR 0x00e3 +#define mmSDMA6_PAGE_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA6_PAGE_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA6_PAGE_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA6_PAGE_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_PAGE_IB_SIZE 0x00e7 +#define mmSDMA6_PAGE_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA6_PAGE_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA6_PAGE_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_PAGE_DOORBELL 0x00ea +#define mmSDMA6_PAGE_DOORBELL_BASE_IDX 1 +#define mmSDMA6_PAGE_STATUS 0x0100 +#define mmSDMA6_PAGE_STATUS_BASE_IDX 1 +#define mmSDMA6_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA6_PAGE_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_PAGE_WATERMARK 0x0102 +#define mmSDMA6_PAGE_WATERMARK_BASE_IDX 1 +#define mmSDMA6_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA6_PAGE_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA6_PAGE_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA6_PAGE_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA6_PAGE_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_PAGE_PREEMPT 0x0108 +#define mmSDMA6_PAGE_PREEMPT_BASE_IDX 1 +#define mmSDMA6_PAGE_DUMMY_REG 0x0109 +#define mmSDMA6_PAGE_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA6_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA6_PAGE_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA6_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA6_PAGE_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA6_PAGE_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA6_PAGE_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA6_PAGE_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA6_PAGE_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA6_PAGE_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA6_PAGE_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA6_PAGE_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA6_PAGE_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA6_PAGE_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_CNTL 0x0130 +#define mmSDMA6_RLC0_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_BASE 0x0131 +#define mmSDMA6_RLC0_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA6_RLC0_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_RPTR 0x0133 +#define mmSDMA6_RLC0_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA6_RLC0_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_WPTR 0x0135 +#define mmSDMA6_RLC0_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA6_RLC0_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA6_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA6_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA6_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC0_IB_CNTL 0x013a +#define mmSDMA6_RLC0_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC0_IB_RPTR 0x013b +#define mmSDMA6_RLC0_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC0_IB_OFFSET 0x013c +#define mmSDMA6_RLC0_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC0_IB_BASE_LO 0x013d +#define mmSDMA6_RLC0_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC0_IB_BASE_HI 0x013e +#define mmSDMA6_RLC0_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC0_IB_SIZE 0x013f +#define mmSDMA6_RLC0_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA6_RLC0_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA6_RLC0_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC0_DOORBELL 0x0142 +#define mmSDMA6_RLC0_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC0_STATUS 0x0158 +#define mmSDMA6_RLC0_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA6_RLC0_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC0_WATERMARK 0x015a +#define mmSDMA6_RLC0_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA6_RLC0_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA6_RLC0_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA6_RLC0_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA6_RLC0_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC0_PREEMPT 0x0160 +#define mmSDMA6_RLC0_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC0_DUMMY_REG 0x0161 +#define mmSDMA6_RLC0_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA6_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA6_RLC0_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA6_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA6_RLC0_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA6_RLC0_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA6_RLC0_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA6_RLC0_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA6_RLC0_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA6_RLC0_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA6_RLC0_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA6_RLC0_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA6_RLC0_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA6_RLC0_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_CNTL 0x0188 +#define mmSDMA6_RLC1_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_BASE 0x0189 +#define mmSDMA6_RLC1_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_BASE_HI 0x018a +#define mmSDMA6_RLC1_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_RPTR 0x018b +#define mmSDMA6_RLC1_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA6_RLC1_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_WPTR 0x018d +#define mmSDMA6_RLC1_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA6_RLC1_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA6_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA6_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA6_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC1_IB_CNTL 0x0192 +#define mmSDMA6_RLC1_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC1_IB_RPTR 0x0193 +#define mmSDMA6_RLC1_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC1_IB_OFFSET 0x0194 +#define mmSDMA6_RLC1_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA6_RLC1_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA6_RLC1_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC1_IB_SIZE 0x0197 +#define mmSDMA6_RLC1_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA6_RLC1_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA6_RLC1_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC1_DOORBELL 0x019a +#define mmSDMA6_RLC1_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC1_STATUS 0x01b0 +#define mmSDMA6_RLC1_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA6_RLC1_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC1_WATERMARK 0x01b2 +#define mmSDMA6_RLC1_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA6_RLC1_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA6_RLC1_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA6_RLC1_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA6_RLC1_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC1_PREEMPT 0x01b8 +#define mmSDMA6_RLC1_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA6_RLC1_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA6_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA6_RLC1_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA6_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA6_RLC1_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA6_RLC1_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA6_RLC1_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA6_RLC1_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA6_RLC1_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA6_RLC1_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA6_RLC1_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA6_RLC1_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA6_RLC1_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA6_RLC1_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_CNTL 0x01e0 +#define mmSDMA6_RLC2_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_BASE 0x01e1 +#define mmSDMA6_RLC2_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA6_RLC2_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_RPTR 0x01e3 +#define mmSDMA6_RLC2_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA6_RLC2_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_WPTR 0x01e5 +#define mmSDMA6_RLC2_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA6_RLC2_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA6_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA6_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA6_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC2_IB_CNTL 0x01ea +#define mmSDMA6_RLC2_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC2_IB_RPTR 0x01eb +#define mmSDMA6_RLC2_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC2_IB_OFFSET 0x01ec +#define mmSDMA6_RLC2_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA6_RLC2_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA6_RLC2_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC2_IB_SIZE 0x01ef +#define mmSDMA6_RLC2_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA6_RLC2_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA6_RLC2_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC2_DOORBELL 0x01f2 +#define mmSDMA6_RLC2_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC2_STATUS 0x0208 +#define mmSDMA6_RLC2_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA6_RLC2_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC2_WATERMARK 0x020a +#define mmSDMA6_RLC2_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA6_RLC2_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA6_RLC2_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA6_RLC2_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA6_RLC2_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC2_PREEMPT 0x0210 +#define mmSDMA6_RLC2_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC2_DUMMY_REG 0x0211 +#define mmSDMA6_RLC2_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA6_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA6_RLC2_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA6_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA6_RLC2_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA6_RLC2_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA6_RLC2_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA6_RLC2_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA6_RLC2_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA6_RLC2_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA6_RLC2_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA6_RLC2_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA6_RLC2_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA6_RLC2_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_CNTL 0x0238 +#define mmSDMA6_RLC3_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_BASE 0x0239 +#define mmSDMA6_RLC3_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_BASE_HI 0x023a +#define mmSDMA6_RLC3_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_RPTR 0x023b +#define mmSDMA6_RLC3_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA6_RLC3_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_WPTR 0x023d +#define mmSDMA6_RLC3_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA6_RLC3_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA6_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA6_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA6_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC3_IB_CNTL 0x0242 +#define mmSDMA6_RLC3_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC3_IB_RPTR 0x0243 +#define mmSDMA6_RLC3_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC3_IB_OFFSET 0x0244 +#define mmSDMA6_RLC3_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA6_RLC3_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA6_RLC3_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC3_IB_SIZE 0x0247 +#define mmSDMA6_RLC3_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA6_RLC3_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA6_RLC3_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC3_DOORBELL 0x024a +#define mmSDMA6_RLC3_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC3_STATUS 0x0260 +#define mmSDMA6_RLC3_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA6_RLC3_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC3_WATERMARK 0x0262 +#define mmSDMA6_RLC3_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA6_RLC3_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA6_RLC3_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA6_RLC3_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA6_RLC3_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC3_PREEMPT 0x0268 +#define mmSDMA6_RLC3_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC3_DUMMY_REG 0x0269 +#define mmSDMA6_RLC3_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA6_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA6_RLC3_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA6_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA6_RLC3_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA6_RLC3_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA6_RLC3_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA6_RLC3_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA6_RLC3_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA6_RLC3_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA6_RLC3_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA6_RLC3_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA6_RLC3_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA6_RLC3_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_CNTL 0x0290 +#define mmSDMA6_RLC4_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_BASE 0x0291 +#define mmSDMA6_RLC4_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA6_RLC4_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_RPTR 0x0293 +#define mmSDMA6_RLC4_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA6_RLC4_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_WPTR 0x0295 +#define mmSDMA6_RLC4_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA6_RLC4_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA6_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA6_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA6_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC4_IB_CNTL 0x029a +#define mmSDMA6_RLC4_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC4_IB_RPTR 0x029b +#define mmSDMA6_RLC4_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC4_IB_OFFSET 0x029c +#define mmSDMA6_RLC4_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC4_IB_BASE_LO 0x029d +#define mmSDMA6_RLC4_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC4_IB_BASE_HI 0x029e +#define mmSDMA6_RLC4_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC4_IB_SIZE 0x029f +#define mmSDMA6_RLC4_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA6_RLC4_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA6_RLC4_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC4_DOORBELL 0x02a2 +#define mmSDMA6_RLC4_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC4_STATUS 0x02b8 +#define mmSDMA6_RLC4_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA6_RLC4_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC4_WATERMARK 0x02ba +#define mmSDMA6_RLC4_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA6_RLC4_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA6_RLC4_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA6_RLC4_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA6_RLC4_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC4_PREEMPT 0x02c0 +#define mmSDMA6_RLC4_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA6_RLC4_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA6_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA6_RLC4_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA6_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA6_RLC4_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA6_RLC4_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA6_RLC4_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA6_RLC4_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA6_RLC4_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA6_RLC4_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA6_RLC4_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA6_RLC4_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA6_RLC4_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA6_RLC4_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_CNTL 0x02e8 +#define mmSDMA6_RLC5_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_BASE 0x02e9 +#define mmSDMA6_RLC5_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA6_RLC5_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_RPTR 0x02eb +#define mmSDMA6_RLC5_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA6_RLC5_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_WPTR 0x02ed +#define mmSDMA6_RLC5_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA6_RLC5_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA6_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA6_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA6_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC5_IB_CNTL 0x02f2 +#define mmSDMA6_RLC5_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC5_IB_RPTR 0x02f3 +#define mmSDMA6_RLC5_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA6_RLC5_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA6_RLC5_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA6_RLC5_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC5_IB_SIZE 0x02f7 +#define mmSDMA6_RLC5_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA6_RLC5_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA6_RLC5_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC5_DOORBELL 0x02fa +#define mmSDMA6_RLC5_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC5_STATUS 0x0310 +#define mmSDMA6_RLC5_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA6_RLC5_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC5_WATERMARK 0x0312 +#define mmSDMA6_RLC5_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA6_RLC5_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA6_RLC5_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA6_RLC5_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA6_RLC5_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC5_PREEMPT 0x0318 +#define mmSDMA6_RLC5_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC5_DUMMY_REG 0x0319 +#define mmSDMA6_RLC5_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA6_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA6_RLC5_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA6_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA6_RLC5_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA6_RLC5_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA6_RLC5_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA6_RLC5_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA6_RLC5_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA6_RLC5_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA6_RLC5_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA6_RLC5_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA6_RLC5_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA6_RLC5_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_CNTL 0x0340 +#define mmSDMA6_RLC6_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_BASE 0x0341 +#define mmSDMA6_RLC6_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA6_RLC6_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_RPTR 0x0343 +#define mmSDMA6_RLC6_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA6_RLC6_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_WPTR 0x0345 +#define mmSDMA6_RLC6_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA6_RLC6_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA6_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA6_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA6_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC6_IB_CNTL 0x034a +#define mmSDMA6_RLC6_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC6_IB_RPTR 0x034b +#define mmSDMA6_RLC6_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC6_IB_OFFSET 0x034c +#define mmSDMA6_RLC6_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC6_IB_BASE_LO 0x034d +#define mmSDMA6_RLC6_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC6_IB_BASE_HI 0x034e +#define mmSDMA6_RLC6_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC6_IB_SIZE 0x034f +#define mmSDMA6_RLC6_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA6_RLC6_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA6_RLC6_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC6_DOORBELL 0x0352 +#define mmSDMA6_RLC6_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC6_STATUS 0x0368 +#define mmSDMA6_RLC6_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA6_RLC6_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC6_WATERMARK 0x036a +#define mmSDMA6_RLC6_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA6_RLC6_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA6_RLC6_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA6_RLC6_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA6_RLC6_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC6_PREEMPT 0x0370 +#define mmSDMA6_RLC6_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC6_DUMMY_REG 0x0371 +#define mmSDMA6_RLC6_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA6_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA6_RLC6_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA6_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA6_RLC6_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA6_RLC6_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA6_RLC6_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA6_RLC6_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA6_RLC6_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA6_RLC6_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA6_RLC6_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA6_RLC6_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA6_RLC6_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA6_RLC6_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_CNTL 0x0398 +#define mmSDMA6_RLC7_RB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_BASE 0x0399 +#define mmSDMA6_RLC7_RB_BASE_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_BASE_HI 0x039a +#define mmSDMA6_RLC7_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_RPTR 0x039b +#define mmSDMA6_RLC7_RB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA6_RLC7_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_WPTR 0x039d +#define mmSDMA6_RLC7_RB_WPTR_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA6_RLC7_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA6_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA6_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA6_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC7_IB_CNTL 0x03a2 +#define mmSDMA6_RLC7_IB_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC7_IB_RPTR 0x03a3 +#define mmSDMA6_RLC7_IB_RPTR_BASE_IDX 1 +#define mmSDMA6_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA6_RLC7_IB_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA6_RLC7_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA6_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA6_RLC7_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA6_RLC7_IB_SIZE 0x03a7 +#define mmSDMA6_RLC7_IB_SIZE_BASE_IDX 1 +#define mmSDMA6_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA6_RLC7_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA6_RLC7_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC7_DOORBELL 0x03aa +#define mmSDMA6_RLC7_DOORBELL_BASE_IDX 1 +#define mmSDMA6_RLC7_STATUS 0x03c0 +#define mmSDMA6_RLC7_STATUS_BASE_IDX 1 +#define mmSDMA6_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA6_RLC7_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA6_RLC7_WATERMARK 0x03c2 +#define mmSDMA6_RLC7_WATERMARK_BASE_IDX 1 +#define mmSDMA6_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA6_RLC7_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA6_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA6_RLC7_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA6_RLC7_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA6_RLC7_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA6_RLC7_PREEMPT 0x03c8 +#define mmSDMA6_RLC7_PREEMPT_BASE_IDX 1 +#define mmSDMA6_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA6_RLC7_DUMMY_REG_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA6_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA6_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA6_RLC7_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA6_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA6_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA6_RLC7_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA6_RLC7_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA6_RLC7_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA6_RLC7_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA6_RLC7_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA6_RLC7_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA6_RLC7_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA6_RLC7_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA6_RLC7_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA6_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA6_RLC7_MIDCMD_CNTL_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h new file mode 100644 index 000000000000..55569f5d8eae --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h @@ -0,0 +1,2956 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma6_4_2_2_SH_MASK_HEADER +#define _sdma6_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma6_sdma6dec +//SDMA6_UCODE_ADDR +#define SDMA6_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA6_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA6_UCODE_DATA +#define SDMA6_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA6_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA6_VM_CNTL +#define SDMA6_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA6_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA6_VM_CTX_LO +#define SDMA6_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA6_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_VM_CTX_HI +#define SDMA6_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA6_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_ACTIVE_FCN_ID +#define SDMA6_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA6_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA6_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA6_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA6_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA6_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA6_VM_CTX_CNTL +#define SDMA6_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA6_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA6_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA6_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA6_VIRT_RESET_REQ +#define SDMA6_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA6_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA6_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA6_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA6_VF_ENABLE +#define SDMA6_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA6_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA6_CONTEXT_REG_TYPE0 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE__SHIFT 0x1 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_CNTL__SHIFT 0xa +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_RPTR__SHIFT 0xb +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_SIZE__SHIFT 0xf +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_DOORBELL__SHIFT 0x12 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE_MASK 0x00000002L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_DOORBELL_MASK 0x00040000L +#define SDMA6_CONTEXT_REG_TYPE0__SDMA6_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA6_CONTEXT_REG_TYPE1 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_STATUS__SHIFT 0x8 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_WATERMARK__SHIFT 0xa +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA6_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_PREEMPT__SHIFT 0x10 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA6_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_STATUS_MASK 0x00000100L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_WATERMARK_MASK 0x00000400L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA6_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_PREEMPT_MASK 0x00010000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA6_CONTEXT_REG_TYPE1__SDMA6_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA6_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA6_CONTEXT_REG_TYPE2 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA6_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA6_CONTEXT_REG_TYPE2__SDMA6_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA6_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA6_CONTEXT_REG_TYPE3 +#define SDMA6_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA6_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA6_PUB_REG_TYPE0 +#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_ADDR__SHIFT 0x0 +#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_DATA__SHIFT 0x1 +#define SDMA6_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CNTL__SHIFT 0x4 +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_LO__SHIFT 0x5 +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_HI__SHIFT 0x6 +#define SDMA6_PUB_REG_TYPE0__SDMA6_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA6_PUB_REG_TYPE0__SDMA6_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA6_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA6_PUB_REG_TYPE0__SDMA6_MMHUB_CNTL__SHIFT 0x13 +#define SDMA6_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA6_PUB_REG_TYPE0__SDMA6_POWER_CNTL__SHIFT 0x1a +#define SDMA6_PUB_REG_TYPE0__SDMA6_CLK_CTRL__SHIFT 0x1b +#define SDMA6_PUB_REG_TYPE0__SDMA6_CNTL__SHIFT 0x1c +#define SDMA6_PUB_REG_TYPE0__SDMA6_CHICKEN_BITS__SHIFT 0x1d +#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_ADDR_MASK 0x00000001L +#define SDMA6_PUB_REG_TYPE0__SDMA6_UCODE_DATA_MASK 0x00000002L +#define SDMA6_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CNTL_MASK 0x00000010L +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_LO_MASK 0x00000020L +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_HI_MASK 0x00000040L +#define SDMA6_PUB_REG_TYPE0__SDMA6_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA6_PUB_REG_TYPE0__SDMA6_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA6_PUB_REG_TYPE0__SDMA6_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA6_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_MMHUB_CNTL_MASK 0x00080000L +#define SDMA6_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_POWER_CNTL_MASK 0x04000000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CLK_CTRL_MASK 0x08000000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CNTL_MASK 0x10000000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_CHICKEN_BITS_MASK 0x20000000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA6_PUB_REG_TYPE0__SDMA6_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA6_PUB_REG_TYPE1 +#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA6_PUB_REG_TYPE1__SDMA6_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA6_PUB_REG_TYPE1__SDMA6_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA6_PUB_REG_TYPE1__SDMA6_PROGRAM__SHIFT 0x4 +#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS_REG__SHIFT 0x5 +#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS1_REG__SHIFT 0x6 +#define SDMA6_PUB_REG_TYPE1__SDMA6_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA6_PUB_REG_TYPE1__SDMA6_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA6_PUB_REG_TYPE1__SDMA6_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA6_PUB_REG_TYPE1__SDMA6_F32_CNTL__SHIFT 0xa +#define SDMA6_PUB_REG_TYPE1__SDMA6_FREEZE__SHIFT 0xb +#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA6_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_CONFIG__SHIFT 0x12 +#define SDMA6_PUB_REG_TYPE1__SDMA6_BA_THRESHOLD__SHIFT 0x13 +#define SDMA6_PUB_REG_TYPE1__SDMA6_ID__SHIFT 0x14 +#define SDMA6_PUB_REG_TYPE1__SDMA6_VERSION__SHIFT 0x15 +#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER__SHIFT 0x16 +#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS2_REG__SHIFT 0x18 +#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_CNTL__SHIFT 0x1c +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA6_PUB_REG_TYPE1__SDMA6_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA6_PUB_REG_TYPE1__SDMA6_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA6_PUB_REG_TYPE1__SDMA6_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA6_PUB_REG_TYPE1__SDMA6_PROGRAM_MASK 0x00000010L +#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS_REG_MASK 0x00000020L +#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS1_REG_MASK 0x00000040L +#define SDMA6_PUB_REG_TYPE1__SDMA6_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA6_PUB_REG_TYPE1__SDMA6_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA6_PUB_REG_TYPE1__SDMA6_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA6_PUB_REG_TYPE1__SDMA6_F32_CNTL_MASK 0x00000400L +#define SDMA6_PUB_REG_TYPE1__SDMA6_FREEZE_MASK 0x00000800L +#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA6_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA6_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_CONFIG_MASK 0x00040000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_BA_THRESHOLD_MASK 0x00080000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_ID_MASK 0x00100000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_VERSION_MASK 0x00200000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER_MASK 0x00400000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_STATUS2_REG_MASK 0x01000000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_CNTL_MASK 0x10000000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA6_PUB_REG_TYPE1__SDMA6_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA6_PUB_REG_TYPE2 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV0__SHIFT 0x0 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV1__SHIFT 0x1 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV2__SHIFT 0x2 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_PAGE__SHIFT 0x8 +#define SDMA6_PUB_REG_TYPE2__SDMA6_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA6_PUB_REG_TYPE2__SDMA6_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA6_PUB_REG_TYPE2__SDMA6_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA6_PUB_REG_TYPE2__SDMA6_STATUS3_REG__SHIFT 0xc +#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA6_PUB_REG_TYPE2__SDMA6_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA6_PUB_REG_TYPE2__SDMA6_ERROR_LOG__SHIFT 0x10 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA6_PUB_REG_TYPE2__SDMA6_F32_COUNTER__SHIFT 0x15 +#define SDMA6_PUB_REG_TYPE2__SDMA6_UNBREAKABLE__SHIFT 0x16 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFMON_CNTL__SHIFT 0x17 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA6_PUB_REG_TYPE2__SDMA6_CRD_CNTL__SHIFT 0x1b +#define SDMA6_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA6_PUB_REG_TYPE2__SDMA6_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA6_PUB_REG_TYPE2__SDMA6_ULV_CNTL__SHIFT 0x1e +#define SDMA6_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV0_MASK 0x00000001L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV1_MASK 0x00000002L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_INV2_MASK 0x00000004L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UTCL1_PAGE_MASK 0x00000100L +#define SDMA6_PUB_REG_TYPE2__SDMA6_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA6_PUB_REG_TYPE2__SDMA6_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA6_PUB_REG_TYPE2__SDMA6_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA6_PUB_REG_TYPE2__SDMA6_STATUS3_REG_MASK 0x00001000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_ERROR_LOG_MASK 0x00010000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_F32_COUNTER_MASK 0x00200000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_UNBREAKABLE_MASK 0x00400000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFMON_CNTL_MASK 0x00800000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_CRD_CNTL_MASK 0x08000000L +#define SDMA6_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA6_PUB_REG_TYPE2__SDMA6_ULV_CNTL_MASK 0x40000000L +#define SDMA6_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA6_PUB_REG_TYPE3 +#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA6_PUB_REG_TYPE3__SDMA6_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA6_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA6_PUB_REG_TYPE3__SDMA6_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA6_PUB_REG_TYPE3__SDMA6_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA6_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA6_MMHUB_CNTL +#define SDMA6_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA6_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA6_CONTEXT_GROUP_BOUNDARY +#define SDMA6_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA6_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA6_POWER_CNTL +#define SDMA6_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA6_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA6_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA6_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA6_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA6_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA6_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA6_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA6_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA6_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA6_CLK_CTRL +#define SDMA6_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA6_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA6_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA6_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA6_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA6_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA6_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA6_CNTL +#define SDMA6_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA6_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA6_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA6_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA6_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA6_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA6_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA6_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA6_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA6_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA6_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA6_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA6_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA6_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA6_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA6_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA6_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA6_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA6_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA6_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA6_CHICKEN_BITS +#define SDMA6_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA6_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA6_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA6_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA6_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA6_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA6_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA6_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA6_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA6_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA6_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA6_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA6_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA6_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA6_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA6_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA6_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA6_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA6_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA6_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA6_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA6_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA6_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA6_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA6_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA6_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA6_GB_ADDR_CONFIG +#define SDMA6_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA6_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA6_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA6_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA6_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA6_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA6_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA6_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA6_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA6_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA6_GB_ADDR_CONFIG_READ +#define SDMA6_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA6_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA6_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA6_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA6_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA6_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA6_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA6_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA6_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA6_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA6_RB_RPTR_FETCH_HI +#define SDMA6_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA6_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA6_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA6_RB_RPTR_FETCH +#define SDMA6_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA6_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA6_IB_OFFSET_FETCH +#define SDMA6_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA6_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA6_PROGRAM +#define SDMA6_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA6_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA6_STATUS_REG +#define SDMA6_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA6_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA6_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA6_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA6_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA6_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA6_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA6_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA6_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA6_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA6_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA6_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA6_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA6_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA6_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA6_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA6_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA6_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA6_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA6_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA6_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA6_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA6_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA6_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA6_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA6_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA6_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA6_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA6_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA6_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA6_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA6_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA6_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA6_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA6_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA6_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA6_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA6_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA6_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA6_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA6_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA6_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA6_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA6_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA6_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA6_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA6_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA6_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA6_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA6_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA6_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA6_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA6_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA6_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA6_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA6_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA6_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA6_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA6_STATUS1_REG +#define SDMA6_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA6_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA6_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA6_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA6_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA6_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA6_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA6_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA6_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA6_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA6_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA6_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA6_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA6_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA6_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA6_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA6_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA6_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA6_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA6_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA6_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA6_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA6_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA6_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA6_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA6_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA6_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA6_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA6_RD_BURST_CNTL +#define SDMA6_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA6_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA6_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA6_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA6_HBM_PAGE_CONFIG +#define SDMA6_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA6_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA6_UCODE_CHECKSUM +#define SDMA6_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA6_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA6_F32_CNTL +#define SDMA6_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA6_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA6_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA6_F32_CNTL__STEP_MASK 0x00000002L +//SDMA6_FREEZE +#define SDMA6_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA6_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA6_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA6_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA6_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA6_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA6_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA6_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA6_PHASE0_QUANTUM +#define SDMA6_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA6_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA6_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA6_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA6_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA6_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA6_PHASE1_QUANTUM +#define SDMA6_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA6_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA6_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA6_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA6_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA6_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA6_EDC_CONFIG +#define SDMA6_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA6_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA6_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA6_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA6_BA_THRESHOLD +#define SDMA6_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA6_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA6_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA6_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA6_ID +#define SDMA6_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA6_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA6_VERSION +#define SDMA6_VERSION__MINVER__SHIFT 0x0 +#define SDMA6_VERSION__MAJVER__SHIFT 0x8 +#define SDMA6_VERSION__REV__SHIFT 0x10 +#define SDMA6_VERSION__MINVER_MASK 0x0000007FL +#define SDMA6_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA6_VERSION__REV_MASK 0x003F0000L +//SDMA6_EDC_COUNTER +#define SDMA6_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA6_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA6_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA6_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA6_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA6_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA6_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA6_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA6_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA6_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA6_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA6_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA6_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA6_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA6_EDC_COUNTER_CLEAR +#define SDMA6_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA6_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA6_STATUS2_REG +#define SDMA6_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA6_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA6_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA6_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA6_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA6_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA6_ATOMIC_CNTL +#define SDMA6_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA6_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA6_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA6_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA6_ATOMIC_PREOP_LO +#define SDMA6_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA6_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA6_ATOMIC_PREOP_HI +#define SDMA6_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA6_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA6_UTCL1_CNTL +#define SDMA6_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA6_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA6_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA6_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA6_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA6_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA6_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA6_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA6_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA6_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA6_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA6_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA6_UTCL1_WATERMK +#define SDMA6_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA6_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA6_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA6_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA6_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA6_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA6_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA6_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA6_UTCL1_RD_STATUS +#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA6_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA6_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA6_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA6_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA6_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA6_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA6_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA6_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA6_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA6_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA6_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA6_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA6_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA6_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA6_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA6_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA6_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA6_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA6_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA6_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA6_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA6_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA6_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA6_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA6_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA6_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA6_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA6_UTCL1_WR_STATUS +#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA6_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA6_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA6_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA6_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA6_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA6_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA6_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA6_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA6_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA6_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA6_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA6_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA6_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA6_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA6_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA6_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA6_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA6_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA6_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA6_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA6_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA6_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA6_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA6_UTCL1_INV0 +#define SDMA6_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA6_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA6_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA6_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA6_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA6_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA6_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA6_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA6_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA6_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA6_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA6_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA6_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA6_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA6_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA6_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA6_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA6_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA6_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA6_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA6_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA6_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA6_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA6_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA6_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA6_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA6_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA6_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA6_UTCL1_INV1 +#define SDMA6_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA6_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA6_UTCL1_INV2 +#define SDMA6_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA6_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA6_UTCL1_RD_XNACK0 +#define SDMA6_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA6_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA6_UTCL1_RD_XNACK1 +#define SDMA6_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA6_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA6_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA6_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA6_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA6_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA6_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA6_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA6_UTCL1_WR_XNACK0 +#define SDMA6_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA6_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA6_UTCL1_WR_XNACK1 +#define SDMA6_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA6_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA6_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA6_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA6_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA6_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA6_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA6_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA6_UTCL1_TIMEOUT +#define SDMA6_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA6_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA6_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA6_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA6_UTCL1_PAGE +#define SDMA6_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA6_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA6_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA6_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA6_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA6_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA6_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA6_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA6_POWER_CNTL_IDLE +#define SDMA6_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA6_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA6_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA6_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA6_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA6_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA6_RELAX_ORDERING_LUT +#define SDMA6_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA6_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA6_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA6_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA6_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA6_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA6_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA6_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA6_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA6_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA6_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA6_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA6_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA6_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA6_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA6_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA6_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA6_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA6_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA6_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA6_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA6_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA6_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA6_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA6_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA6_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA6_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA6_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA6_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA6_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA6_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA6_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA6_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA6_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA6_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA6_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA6_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA6_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA6_CHICKEN_BITS_2 +#define SDMA6_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA6_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA6_STATUS3_REG +#define SDMA6_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA6_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA6_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA6_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA6_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA6_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA6_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA6_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA6_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA6_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA6_PHYSICAL_ADDR_LO +#define SDMA6_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA6_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA6_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA6_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA6_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA6_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA6_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA6_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA6_PHYSICAL_ADDR_HI +#define SDMA6_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA6_PHASE2_QUANTUM +#define SDMA6_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA6_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA6_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA6_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA6_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA6_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA6_ERROR_LOG +#define SDMA6_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA6_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA6_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA6_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA6_PUB_DUMMY_REG0 +#define SDMA6_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA6_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA6_PUB_DUMMY_REG1 +#define SDMA6_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA6_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA6_PUB_DUMMY_REG2 +#define SDMA6_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA6_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA6_PUB_DUMMY_REG3 +#define SDMA6_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA6_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA6_F32_COUNTER +#define SDMA6_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA6_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA6_UNBREAKABLE +#define SDMA6_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA6_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA6_PERFMON_CNTL +#define SDMA6_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA6_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA6_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA6_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA6_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA6_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA6_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA6_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA6_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA6_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA6_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA6_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA6_PERFCOUNTER0_RESULT +#define SDMA6_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA6_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA6_PERFCOUNTER1_RESULT +#define SDMA6_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA6_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA6_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA6_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA6_CRD_CNTL +#define SDMA6_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA6_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA6_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA6_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA6_GPU_IOV_VIOLATION_LOG +#define SDMA6_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA6_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA6_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA6_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA6_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA6_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA6_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA6_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA6_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA6_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA6_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA6_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA6_ULV_CNTL +#define SDMA6_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA6_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA6_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA6_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA6_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA6_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA6_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA6_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA6_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA6_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA6_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA6_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA6_EA_DBIT_ADDR_DATA +#define SDMA6_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA6_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA6_EA_DBIT_ADDR_INDEX +#define SDMA6_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA6_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA6_GPU_IOV_VIOLATION_LOG2 +#define SDMA6_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA6_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA6_GFX_RB_CNTL +#define SDMA6_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_GFX_RB_BASE +#define SDMA6_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_BASE_HI +#define SDMA6_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_GFX_RB_RPTR +#define SDMA6_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_RPTR_HI +#define SDMA6_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_WPTR +#define SDMA6_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_WPTR_HI +#define SDMA6_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_WPTR_POLL_CNTL +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_GFX_RB_RPTR_ADDR_HI +#define SDMA6_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_RPTR_ADDR_LO +#define SDMA6_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_GFX_IB_CNTL +#define SDMA6_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_GFX_IB_RPTR +#define SDMA6_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_GFX_IB_OFFSET +#define SDMA6_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_GFX_IB_BASE_LO +#define SDMA6_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_GFX_IB_BASE_HI +#define SDMA6_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_GFX_IB_SIZE +#define SDMA6_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_GFX_SKIP_CNTL +#define SDMA6_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_GFX_CONTEXT_STATUS +#define SDMA6_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_GFX_DOORBELL +#define SDMA6_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_GFX_CONTEXT_CNTL +#define SDMA6_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA6_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA6_GFX_STATUS +#define SDMA6_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_GFX_DOORBELL_LOG +#define SDMA6_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_GFX_WATERMARK +#define SDMA6_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_GFX_DOORBELL_OFFSET +#define SDMA6_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_GFX_CSA_ADDR_LO +#define SDMA6_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_GFX_CSA_ADDR_HI +#define SDMA6_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_GFX_IB_SUB_REMAIN +#define SDMA6_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_GFX_PREEMPT +#define SDMA6_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_GFX_DUMMY_REG +#define SDMA6_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA6_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA6_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_GFX_RB_AQL_CNTL +#define SDMA6_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_GFX_MINOR_PTR_UPDATE +#define SDMA6_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_GFX_MIDCMD_DATA0 +#define SDMA6_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA1 +#define SDMA6_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA2 +#define SDMA6_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA3 +#define SDMA6_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA4 +#define SDMA6_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA5 +#define SDMA6_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA6 +#define SDMA6_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA7 +#define SDMA6_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_DATA8 +#define SDMA6_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_GFX_MIDCMD_CNTL +#define SDMA6_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_PAGE_RB_CNTL +#define SDMA6_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_PAGE_RB_BASE +#define SDMA6_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_BASE_HI +#define SDMA6_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_PAGE_RB_RPTR +#define SDMA6_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_RPTR_HI +#define SDMA6_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_WPTR +#define SDMA6_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_WPTR_HI +#define SDMA6_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_WPTR_POLL_CNTL +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_PAGE_RB_RPTR_ADDR_HI +#define SDMA6_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_RPTR_ADDR_LO +#define SDMA6_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_PAGE_IB_CNTL +#define SDMA6_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_PAGE_IB_RPTR +#define SDMA6_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_PAGE_IB_OFFSET +#define SDMA6_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_PAGE_IB_BASE_LO +#define SDMA6_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_PAGE_IB_BASE_HI +#define SDMA6_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_PAGE_IB_SIZE +#define SDMA6_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_PAGE_SKIP_CNTL +#define SDMA6_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_PAGE_CONTEXT_STATUS +#define SDMA6_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_PAGE_DOORBELL +#define SDMA6_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_PAGE_STATUS +#define SDMA6_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_PAGE_DOORBELL_LOG +#define SDMA6_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_PAGE_WATERMARK +#define SDMA6_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_PAGE_DOORBELL_OFFSET +#define SDMA6_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_PAGE_CSA_ADDR_LO +#define SDMA6_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_PAGE_CSA_ADDR_HI +#define SDMA6_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_PAGE_IB_SUB_REMAIN +#define SDMA6_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_PAGE_PREEMPT +#define SDMA6_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_PAGE_DUMMY_REG +#define SDMA6_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_PAGE_RB_AQL_CNTL +#define SDMA6_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_PAGE_MINOR_PTR_UPDATE +#define SDMA6_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_PAGE_MIDCMD_DATA0 +#define SDMA6_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA1 +#define SDMA6_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA2 +#define SDMA6_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA3 +#define SDMA6_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA4 +#define SDMA6_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA5 +#define SDMA6_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA6 +#define SDMA6_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA7 +#define SDMA6_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_DATA8 +#define SDMA6_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_PAGE_MIDCMD_CNTL +#define SDMA6_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC0_RB_CNTL +#define SDMA6_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC0_RB_BASE +#define SDMA6_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_BASE_HI +#define SDMA6_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC0_RB_RPTR +#define SDMA6_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_RPTR_HI +#define SDMA6_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_WPTR +#define SDMA6_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_WPTR_HI +#define SDMA6_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_WPTR_POLL_CNTL +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC0_RB_RPTR_ADDR_HI +#define SDMA6_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_RPTR_ADDR_LO +#define SDMA6_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC0_IB_CNTL +#define SDMA6_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC0_IB_RPTR +#define SDMA6_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC0_IB_OFFSET +#define SDMA6_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC0_IB_BASE_LO +#define SDMA6_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC0_IB_BASE_HI +#define SDMA6_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC0_IB_SIZE +#define SDMA6_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC0_SKIP_CNTL +#define SDMA6_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC0_CONTEXT_STATUS +#define SDMA6_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC0_DOORBELL +#define SDMA6_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC0_STATUS +#define SDMA6_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC0_DOORBELL_LOG +#define SDMA6_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC0_WATERMARK +#define SDMA6_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC0_DOORBELL_OFFSET +#define SDMA6_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC0_CSA_ADDR_LO +#define SDMA6_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC0_CSA_ADDR_HI +#define SDMA6_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC0_IB_SUB_REMAIN +#define SDMA6_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC0_PREEMPT +#define SDMA6_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC0_DUMMY_REG +#define SDMA6_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC0_RB_AQL_CNTL +#define SDMA6_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC0_MINOR_PTR_UPDATE +#define SDMA6_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC0_MIDCMD_DATA0 +#define SDMA6_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA1 +#define SDMA6_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA2 +#define SDMA6_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA3 +#define SDMA6_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA4 +#define SDMA6_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA5 +#define SDMA6_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA6 +#define SDMA6_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA7 +#define SDMA6_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_DATA8 +#define SDMA6_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC0_MIDCMD_CNTL +#define SDMA6_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC1_RB_CNTL +#define SDMA6_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC1_RB_BASE +#define SDMA6_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_BASE_HI +#define SDMA6_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC1_RB_RPTR +#define SDMA6_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_RPTR_HI +#define SDMA6_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_WPTR +#define SDMA6_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_WPTR_HI +#define SDMA6_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_WPTR_POLL_CNTL +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC1_RB_RPTR_ADDR_HI +#define SDMA6_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_RPTR_ADDR_LO +#define SDMA6_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC1_IB_CNTL +#define SDMA6_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC1_IB_RPTR +#define SDMA6_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC1_IB_OFFSET +#define SDMA6_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC1_IB_BASE_LO +#define SDMA6_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC1_IB_BASE_HI +#define SDMA6_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC1_IB_SIZE +#define SDMA6_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC1_SKIP_CNTL +#define SDMA6_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC1_CONTEXT_STATUS +#define SDMA6_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC1_DOORBELL +#define SDMA6_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC1_STATUS +#define SDMA6_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC1_DOORBELL_LOG +#define SDMA6_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC1_WATERMARK +#define SDMA6_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC1_DOORBELL_OFFSET +#define SDMA6_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC1_CSA_ADDR_LO +#define SDMA6_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC1_CSA_ADDR_HI +#define SDMA6_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC1_IB_SUB_REMAIN +#define SDMA6_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC1_PREEMPT +#define SDMA6_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC1_DUMMY_REG +#define SDMA6_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC1_RB_AQL_CNTL +#define SDMA6_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC1_MINOR_PTR_UPDATE +#define SDMA6_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC1_MIDCMD_DATA0 +#define SDMA6_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA1 +#define SDMA6_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA2 +#define SDMA6_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA3 +#define SDMA6_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA4 +#define SDMA6_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA5 +#define SDMA6_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA6 +#define SDMA6_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA7 +#define SDMA6_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_DATA8 +#define SDMA6_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC1_MIDCMD_CNTL +#define SDMA6_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC2_RB_CNTL +#define SDMA6_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC2_RB_BASE +#define SDMA6_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_BASE_HI +#define SDMA6_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC2_RB_RPTR +#define SDMA6_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_RPTR_HI +#define SDMA6_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_WPTR +#define SDMA6_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_WPTR_HI +#define SDMA6_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_WPTR_POLL_CNTL +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC2_RB_RPTR_ADDR_HI +#define SDMA6_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_RPTR_ADDR_LO +#define SDMA6_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC2_IB_CNTL +#define SDMA6_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC2_IB_RPTR +#define SDMA6_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC2_IB_OFFSET +#define SDMA6_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC2_IB_BASE_LO +#define SDMA6_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC2_IB_BASE_HI +#define SDMA6_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC2_IB_SIZE +#define SDMA6_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC2_SKIP_CNTL +#define SDMA6_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC2_CONTEXT_STATUS +#define SDMA6_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC2_DOORBELL +#define SDMA6_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC2_STATUS +#define SDMA6_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC2_DOORBELL_LOG +#define SDMA6_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC2_WATERMARK +#define SDMA6_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC2_DOORBELL_OFFSET +#define SDMA6_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC2_CSA_ADDR_LO +#define SDMA6_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC2_CSA_ADDR_HI +#define SDMA6_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC2_IB_SUB_REMAIN +#define SDMA6_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC2_PREEMPT +#define SDMA6_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC2_DUMMY_REG +#define SDMA6_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC2_RB_AQL_CNTL +#define SDMA6_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC2_MINOR_PTR_UPDATE +#define SDMA6_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC2_MIDCMD_DATA0 +#define SDMA6_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA1 +#define SDMA6_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA2 +#define SDMA6_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA3 +#define SDMA6_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA4 +#define SDMA6_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA5 +#define SDMA6_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA6 +#define SDMA6_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA7 +#define SDMA6_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_DATA8 +#define SDMA6_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC2_MIDCMD_CNTL +#define SDMA6_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC3_RB_CNTL +#define SDMA6_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC3_RB_BASE +#define SDMA6_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_BASE_HI +#define SDMA6_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC3_RB_RPTR +#define SDMA6_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_RPTR_HI +#define SDMA6_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_WPTR +#define SDMA6_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_WPTR_HI +#define SDMA6_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_WPTR_POLL_CNTL +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC3_RB_RPTR_ADDR_HI +#define SDMA6_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_RPTR_ADDR_LO +#define SDMA6_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC3_IB_CNTL +#define SDMA6_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC3_IB_RPTR +#define SDMA6_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC3_IB_OFFSET +#define SDMA6_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC3_IB_BASE_LO +#define SDMA6_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC3_IB_BASE_HI +#define SDMA6_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC3_IB_SIZE +#define SDMA6_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC3_SKIP_CNTL +#define SDMA6_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC3_CONTEXT_STATUS +#define SDMA6_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC3_DOORBELL +#define SDMA6_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC3_STATUS +#define SDMA6_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC3_DOORBELL_LOG +#define SDMA6_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC3_WATERMARK +#define SDMA6_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC3_DOORBELL_OFFSET +#define SDMA6_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC3_CSA_ADDR_LO +#define SDMA6_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC3_CSA_ADDR_HI +#define SDMA6_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC3_IB_SUB_REMAIN +#define SDMA6_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC3_PREEMPT +#define SDMA6_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC3_DUMMY_REG +#define SDMA6_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC3_RB_AQL_CNTL +#define SDMA6_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC3_MINOR_PTR_UPDATE +#define SDMA6_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC3_MIDCMD_DATA0 +#define SDMA6_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA1 +#define SDMA6_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA2 +#define SDMA6_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA3 +#define SDMA6_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA4 +#define SDMA6_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA5 +#define SDMA6_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA6 +#define SDMA6_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA7 +#define SDMA6_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_DATA8 +#define SDMA6_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC3_MIDCMD_CNTL +#define SDMA6_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC4_RB_CNTL +#define SDMA6_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC4_RB_BASE +#define SDMA6_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_BASE_HI +#define SDMA6_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC4_RB_RPTR +#define SDMA6_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_RPTR_HI +#define SDMA6_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_WPTR +#define SDMA6_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_WPTR_HI +#define SDMA6_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_WPTR_POLL_CNTL +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC4_RB_RPTR_ADDR_HI +#define SDMA6_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_RPTR_ADDR_LO +#define SDMA6_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC4_IB_CNTL +#define SDMA6_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC4_IB_RPTR +#define SDMA6_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC4_IB_OFFSET +#define SDMA6_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC4_IB_BASE_LO +#define SDMA6_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC4_IB_BASE_HI +#define SDMA6_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC4_IB_SIZE +#define SDMA6_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC4_SKIP_CNTL +#define SDMA6_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC4_CONTEXT_STATUS +#define SDMA6_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC4_DOORBELL +#define SDMA6_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC4_STATUS +#define SDMA6_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC4_DOORBELL_LOG +#define SDMA6_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC4_WATERMARK +#define SDMA6_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC4_DOORBELL_OFFSET +#define SDMA6_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC4_CSA_ADDR_LO +#define SDMA6_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC4_CSA_ADDR_HI +#define SDMA6_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC4_IB_SUB_REMAIN +#define SDMA6_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC4_PREEMPT +#define SDMA6_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC4_DUMMY_REG +#define SDMA6_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC4_RB_AQL_CNTL +#define SDMA6_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC4_MINOR_PTR_UPDATE +#define SDMA6_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC4_MIDCMD_DATA0 +#define SDMA6_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA1 +#define SDMA6_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA2 +#define SDMA6_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA3 +#define SDMA6_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA4 +#define SDMA6_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA5 +#define SDMA6_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA6 +#define SDMA6_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA7 +#define SDMA6_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_DATA8 +#define SDMA6_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC4_MIDCMD_CNTL +#define SDMA6_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC5_RB_CNTL +#define SDMA6_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC5_RB_BASE +#define SDMA6_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_BASE_HI +#define SDMA6_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC5_RB_RPTR +#define SDMA6_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_RPTR_HI +#define SDMA6_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_WPTR +#define SDMA6_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_WPTR_HI +#define SDMA6_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_WPTR_POLL_CNTL +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC5_RB_RPTR_ADDR_HI +#define SDMA6_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_RPTR_ADDR_LO +#define SDMA6_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC5_IB_CNTL +#define SDMA6_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC5_IB_RPTR +#define SDMA6_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC5_IB_OFFSET +#define SDMA6_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC5_IB_BASE_LO +#define SDMA6_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC5_IB_BASE_HI +#define SDMA6_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC5_IB_SIZE +#define SDMA6_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC5_SKIP_CNTL +#define SDMA6_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC5_CONTEXT_STATUS +#define SDMA6_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC5_DOORBELL +#define SDMA6_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC5_STATUS +#define SDMA6_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC5_DOORBELL_LOG +#define SDMA6_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC5_WATERMARK +#define SDMA6_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC5_DOORBELL_OFFSET +#define SDMA6_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC5_CSA_ADDR_LO +#define SDMA6_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC5_CSA_ADDR_HI +#define SDMA6_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC5_IB_SUB_REMAIN +#define SDMA6_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC5_PREEMPT +#define SDMA6_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC5_DUMMY_REG +#define SDMA6_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC5_RB_AQL_CNTL +#define SDMA6_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC5_MINOR_PTR_UPDATE +#define SDMA6_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC5_MIDCMD_DATA0 +#define SDMA6_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA1 +#define SDMA6_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA2 +#define SDMA6_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA3 +#define SDMA6_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA4 +#define SDMA6_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA5 +#define SDMA6_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA6 +#define SDMA6_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA7 +#define SDMA6_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_DATA8 +#define SDMA6_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC5_MIDCMD_CNTL +#define SDMA6_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC6_RB_CNTL +#define SDMA6_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC6_RB_BASE +#define SDMA6_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_BASE_HI +#define SDMA6_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC6_RB_RPTR +#define SDMA6_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_RPTR_HI +#define SDMA6_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_WPTR +#define SDMA6_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_WPTR_HI +#define SDMA6_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_WPTR_POLL_CNTL +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC6_RB_RPTR_ADDR_HI +#define SDMA6_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_RPTR_ADDR_LO +#define SDMA6_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC6_IB_CNTL +#define SDMA6_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC6_IB_RPTR +#define SDMA6_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC6_IB_OFFSET +#define SDMA6_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC6_IB_BASE_LO +#define SDMA6_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC6_IB_BASE_HI +#define SDMA6_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC6_IB_SIZE +#define SDMA6_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC6_SKIP_CNTL +#define SDMA6_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC6_CONTEXT_STATUS +#define SDMA6_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC6_DOORBELL +#define SDMA6_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC6_STATUS +#define SDMA6_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC6_DOORBELL_LOG +#define SDMA6_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC6_WATERMARK +#define SDMA6_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC6_DOORBELL_OFFSET +#define SDMA6_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC6_CSA_ADDR_LO +#define SDMA6_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC6_CSA_ADDR_HI +#define SDMA6_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC6_IB_SUB_REMAIN +#define SDMA6_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC6_PREEMPT +#define SDMA6_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC6_DUMMY_REG +#define SDMA6_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC6_RB_AQL_CNTL +#define SDMA6_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC6_MINOR_PTR_UPDATE +#define SDMA6_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC6_MIDCMD_DATA0 +#define SDMA6_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA1 +#define SDMA6_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA2 +#define SDMA6_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA3 +#define SDMA6_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA4 +#define SDMA6_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA5 +#define SDMA6_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA6 +#define SDMA6_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA7 +#define SDMA6_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_DATA8 +#define SDMA6_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC6_MIDCMD_CNTL +#define SDMA6_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA6_RLC7_RB_CNTL +#define SDMA6_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA6_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA6_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA6_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA6_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA6_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA6_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA6_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA6_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA6_RLC7_RB_BASE +#define SDMA6_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA6_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_BASE_HI +#define SDMA6_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA6_RLC7_RB_RPTR +#define SDMA6_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_RPTR_HI +#define SDMA6_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_WPTR +#define SDMA6_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA6_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_WPTR_HI +#define SDMA6_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA6_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_WPTR_POLL_CNTL +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA6_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA6_RLC7_RB_RPTR_ADDR_HI +#define SDMA6_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_RPTR_ADDR_LO +#define SDMA6_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA6_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA6_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC7_IB_CNTL +#define SDMA6_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA6_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA6_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA6_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA6_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA6_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA6_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA6_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA6_RLC7_IB_RPTR +#define SDMA6_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA6_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC7_IB_OFFSET +#define SDMA6_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA6_RLC7_IB_BASE_LO +#define SDMA6_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA6_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA6_RLC7_IB_BASE_HI +#define SDMA6_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC7_IB_SIZE +#define SDMA6_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA6_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC7_SKIP_CNTL +#define SDMA6_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA6_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA6_RLC7_CONTEXT_STATUS +#define SDMA6_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA6_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA6_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA6_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA6_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA6_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA6_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA6_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA6_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA6_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA6_RLC7_DOORBELL +#define SDMA6_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA6_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA6_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA6_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA6_RLC7_STATUS +#define SDMA6_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA6_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA6_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA6_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA6_RLC7_DOORBELL_LOG +#define SDMA6_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA6_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA6_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA6_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA6_RLC7_WATERMARK +#define SDMA6_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA6_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA6_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA6_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA6_RLC7_DOORBELL_OFFSET +#define SDMA6_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA6_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA6_RLC7_CSA_ADDR_LO +#define SDMA6_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC7_CSA_ADDR_HI +#define SDMA6_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC7_IB_SUB_REMAIN +#define SDMA6_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA6_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA6_RLC7_PREEMPT +#define SDMA6_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA6_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA6_RLC7_DUMMY_REG +#define SDMA6_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA6_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA6_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA6_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA6_RLC7_RB_AQL_CNTL +#define SDMA6_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA6_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA6_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA6_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA6_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA6_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA6_RLC7_MINOR_PTR_UPDATE +#define SDMA6_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA6_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA6_RLC7_MIDCMD_DATA0 +#define SDMA6_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA1 +#define SDMA6_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA2 +#define SDMA6_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA3 +#define SDMA6_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA4 +#define SDMA6_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA5 +#define SDMA6_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA6 +#define SDMA6_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA7 +#define SDMA6_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_DATA8 +#define SDMA6_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA6_RLC7_MIDCMD_CNTL +#define SDMA6_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA6_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA6_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA6_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA6_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA6_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA6_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA6_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h new file mode 100644 index 000000000000..10f387202af6 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h @@ -0,0 +1,1043 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma7_4_2_2_OFFSET_HEADER +#define _sdma7_4_2_2_OFFSET_HEADER + + + +// addressBlock: sdma7_sdma7dec +// base address: 0x7d000 +#define mmSDMA7_UCODE_ADDR 0x0000 +#define mmSDMA7_UCODE_ADDR_BASE_IDX 1 +#define mmSDMA7_UCODE_DATA 0x0001 +#define mmSDMA7_UCODE_DATA_BASE_IDX 1 +#define mmSDMA7_VM_CNTL 0x0004 +#define mmSDMA7_VM_CNTL_BASE_IDX 1 +#define mmSDMA7_VM_CTX_LO 0x0005 +#define mmSDMA7_VM_CTX_LO_BASE_IDX 1 +#define mmSDMA7_VM_CTX_HI 0x0006 +#define mmSDMA7_VM_CTX_HI_BASE_IDX 1 +#define mmSDMA7_ACTIVE_FCN_ID 0x0007 +#define mmSDMA7_ACTIVE_FCN_ID_BASE_IDX 1 +#define mmSDMA7_VM_CTX_CNTL 0x0008 +#define mmSDMA7_VM_CTX_CNTL_BASE_IDX 1 +#define mmSDMA7_VIRT_RESET_REQ 0x0009 +#define mmSDMA7_VIRT_RESET_REQ_BASE_IDX 1 +#define mmSDMA7_VF_ENABLE 0x000a +#define mmSDMA7_VF_ENABLE_BASE_IDX 1 +#define mmSDMA7_CONTEXT_REG_TYPE0 0x000b +#define mmSDMA7_CONTEXT_REG_TYPE0_BASE_IDX 1 +#define mmSDMA7_CONTEXT_REG_TYPE1 0x000c +#define mmSDMA7_CONTEXT_REG_TYPE1_BASE_IDX 1 +#define mmSDMA7_CONTEXT_REG_TYPE2 0x000d +#define mmSDMA7_CONTEXT_REG_TYPE2_BASE_IDX 1 +#define mmSDMA7_CONTEXT_REG_TYPE3 0x000e +#define mmSDMA7_CONTEXT_REG_TYPE3_BASE_IDX 1 +#define mmSDMA7_PUB_REG_TYPE0 0x000f +#define mmSDMA7_PUB_REG_TYPE0_BASE_IDX 1 +#define mmSDMA7_PUB_REG_TYPE1 0x0010 +#define mmSDMA7_PUB_REG_TYPE1_BASE_IDX 1 +#define mmSDMA7_PUB_REG_TYPE2 0x0011 +#define mmSDMA7_PUB_REG_TYPE2_BASE_IDX 1 +#define mmSDMA7_PUB_REG_TYPE3 0x0012 +#define mmSDMA7_PUB_REG_TYPE3_BASE_IDX 1 +#define mmSDMA7_MMHUB_CNTL 0x0013 +#define mmSDMA7_MMHUB_CNTL_BASE_IDX 1 +#define mmSDMA7_CONTEXT_GROUP_BOUNDARY 0x0019 +#define mmSDMA7_CONTEXT_GROUP_BOUNDARY_BASE_IDX 1 +#define mmSDMA7_POWER_CNTL 0x001a +#define mmSDMA7_POWER_CNTL_BASE_IDX 1 +#define mmSDMA7_CLK_CTRL 0x001b +#define mmSDMA7_CLK_CTRL_BASE_IDX 1 +#define mmSDMA7_CNTL 0x001c +#define mmSDMA7_CNTL_BASE_IDX 1 +#define mmSDMA7_CHICKEN_BITS 0x001d +#define mmSDMA7_CHICKEN_BITS_BASE_IDX 1 +#define mmSDMA7_GB_ADDR_CONFIG 0x001e +#define mmSDMA7_GB_ADDR_CONFIG_BASE_IDX 1 +#define mmSDMA7_GB_ADDR_CONFIG_READ 0x001f +#define mmSDMA7_GB_ADDR_CONFIG_READ_BASE_IDX 1 +#define mmSDMA7_RB_RPTR_FETCH_HI 0x0020 +#define mmSDMA7_RB_RPTR_FETCH_HI_BASE_IDX 1 +#define mmSDMA7_SEM_WAIT_FAIL_TIMER_CNTL 0x0021 +#define mmSDMA7_SEM_WAIT_FAIL_TIMER_CNTL_BASE_IDX 1 +#define mmSDMA7_RB_RPTR_FETCH 0x0022 +#define mmSDMA7_RB_RPTR_FETCH_BASE_IDX 1 +#define mmSDMA7_IB_OFFSET_FETCH 0x0023 +#define mmSDMA7_IB_OFFSET_FETCH_BASE_IDX 1 +#define mmSDMA7_PROGRAM 0x0024 +#define mmSDMA7_PROGRAM_BASE_IDX 1 +#define mmSDMA7_STATUS_REG 0x0025 +#define mmSDMA7_STATUS_REG_BASE_IDX 1 +#define mmSDMA7_STATUS1_REG 0x0026 +#define mmSDMA7_STATUS1_REG_BASE_IDX 1 +#define mmSDMA7_RD_BURST_CNTL 0x0027 +#define mmSDMA7_RD_BURST_CNTL_BASE_IDX 1 +#define mmSDMA7_HBM_PAGE_CONFIG 0x0028 +#define mmSDMA7_HBM_PAGE_CONFIG_BASE_IDX 1 +#define mmSDMA7_UCODE_CHECKSUM 0x0029 +#define mmSDMA7_UCODE_CHECKSUM_BASE_IDX 1 +#define mmSDMA7_F32_CNTL 0x002a +#define mmSDMA7_F32_CNTL_BASE_IDX 1 +#define mmSDMA7_FREEZE 0x002b +#define mmSDMA7_FREEZE_BASE_IDX 1 +#define mmSDMA7_PHASE0_QUANTUM 0x002c +#define mmSDMA7_PHASE0_QUANTUM_BASE_IDX 1 +#define mmSDMA7_PHASE1_QUANTUM 0x002d +#define mmSDMA7_PHASE1_QUANTUM_BASE_IDX 1 +#define mmSDMA7_EDC_CONFIG 0x0032 +#define mmSDMA7_EDC_CONFIG_BASE_IDX 1 +#define mmSDMA7_BA_THRESHOLD 0x0033 +#define mmSDMA7_BA_THRESHOLD_BASE_IDX 1 +#define mmSDMA7_ID 0x0034 +#define mmSDMA7_ID_BASE_IDX 1 +#define mmSDMA7_VERSION 0x0035 +#define mmSDMA7_VERSION_BASE_IDX 1 +#define mmSDMA7_EDC_COUNTER 0x0036 +#define mmSDMA7_EDC_COUNTER_BASE_IDX 1 +#define mmSDMA7_EDC_COUNTER_CLEAR 0x0037 +#define mmSDMA7_EDC_COUNTER_CLEAR_BASE_IDX 1 +#define mmSDMA7_STATUS2_REG 0x0038 +#define mmSDMA7_STATUS2_REG_BASE_IDX 1 +#define mmSDMA7_ATOMIC_CNTL 0x0039 +#define mmSDMA7_ATOMIC_CNTL_BASE_IDX 1 +#define mmSDMA7_ATOMIC_PREOP_LO 0x003a +#define mmSDMA7_ATOMIC_PREOP_LO_BASE_IDX 1 +#define mmSDMA7_ATOMIC_PREOP_HI 0x003b +#define mmSDMA7_ATOMIC_PREOP_HI_BASE_IDX 1 +#define mmSDMA7_UTCL1_CNTL 0x003c +#define mmSDMA7_UTCL1_CNTL_BASE_IDX 1 +#define mmSDMA7_UTCL1_WATERMK 0x003d +#define mmSDMA7_UTCL1_WATERMK_BASE_IDX 1 +#define mmSDMA7_UTCL1_RD_STATUS 0x003e +#define mmSDMA7_UTCL1_RD_STATUS_BASE_IDX 1 +#define mmSDMA7_UTCL1_WR_STATUS 0x003f +#define mmSDMA7_UTCL1_WR_STATUS_BASE_IDX 1 +#define mmSDMA7_UTCL1_INV0 0x0040 +#define mmSDMA7_UTCL1_INV0_BASE_IDX 1 +#define mmSDMA7_UTCL1_INV1 0x0041 +#define mmSDMA7_UTCL1_INV1_BASE_IDX 1 +#define mmSDMA7_UTCL1_INV2 0x0042 +#define mmSDMA7_UTCL1_INV2_BASE_IDX 1 +#define mmSDMA7_UTCL1_RD_XNACK0 0x0043 +#define mmSDMA7_UTCL1_RD_XNACK0_BASE_IDX 1 +#define mmSDMA7_UTCL1_RD_XNACK1 0x0044 +#define mmSDMA7_UTCL1_RD_XNACK1_BASE_IDX 1 +#define mmSDMA7_UTCL1_WR_XNACK0 0x0045 +#define mmSDMA7_UTCL1_WR_XNACK0_BASE_IDX 1 +#define mmSDMA7_UTCL1_WR_XNACK1 0x0046 +#define mmSDMA7_UTCL1_WR_XNACK1_BASE_IDX 1 +#define mmSDMA7_UTCL1_TIMEOUT 0x0047 +#define mmSDMA7_UTCL1_TIMEOUT_BASE_IDX 1 +#define mmSDMA7_UTCL1_PAGE 0x0048 +#define mmSDMA7_UTCL1_PAGE_BASE_IDX 1 +#define mmSDMA7_POWER_CNTL_IDLE 0x0049 +#define mmSDMA7_POWER_CNTL_IDLE_BASE_IDX 1 +#define mmSDMA7_RELAX_ORDERING_LUT 0x004a +#define mmSDMA7_RELAX_ORDERING_LUT_BASE_IDX 1 +#define mmSDMA7_CHICKEN_BITS_2 0x004b +#define mmSDMA7_CHICKEN_BITS_2_BASE_IDX 1 +#define mmSDMA7_STATUS3_REG 0x004c +#define mmSDMA7_STATUS3_REG_BASE_IDX 1 +#define mmSDMA7_PHYSICAL_ADDR_LO 0x004d +#define mmSDMA7_PHYSICAL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_PHYSICAL_ADDR_HI 0x004e +#define mmSDMA7_PHYSICAL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_PHASE2_QUANTUM 0x004f +#define mmSDMA7_PHASE2_QUANTUM_BASE_IDX 1 +#define mmSDMA7_ERROR_LOG 0x0050 +#define mmSDMA7_ERROR_LOG_BASE_IDX 1 +#define mmSDMA7_PUB_DUMMY_REG0 0x0051 +#define mmSDMA7_PUB_DUMMY_REG0_BASE_IDX 1 +#define mmSDMA7_PUB_DUMMY_REG1 0x0052 +#define mmSDMA7_PUB_DUMMY_REG1_BASE_IDX 1 +#define mmSDMA7_PUB_DUMMY_REG2 0x0053 +#define mmSDMA7_PUB_DUMMY_REG2_BASE_IDX 1 +#define mmSDMA7_PUB_DUMMY_REG3 0x0054 +#define mmSDMA7_PUB_DUMMY_REG3_BASE_IDX 1 +#define mmSDMA7_F32_COUNTER 0x0055 +#define mmSDMA7_F32_COUNTER_BASE_IDX 1 +#define mmSDMA7_UNBREAKABLE 0x0056 +#define mmSDMA7_UNBREAKABLE_BASE_IDX 1 +#define mmSDMA7_PERFMON_CNTL 0x0057 +#define mmSDMA7_PERFMON_CNTL_BASE_IDX 1 +#define mmSDMA7_PERFCOUNTER0_RESULT 0x0058 +#define mmSDMA7_PERFCOUNTER0_RESULT_BASE_IDX 1 +#define mmSDMA7_PERFCOUNTER1_RESULT 0x0059 +#define mmSDMA7_PERFCOUNTER1_RESULT_BASE_IDX 1 +#define mmSDMA7_PERFCOUNTER_TAG_DELAY_RANGE 0x005a +#define mmSDMA7_PERFCOUNTER_TAG_DELAY_RANGE_BASE_IDX 1 +#define mmSDMA7_CRD_CNTL 0x005b +#define mmSDMA7_CRD_CNTL_BASE_IDX 1 +#define mmSDMA7_GPU_IOV_VIOLATION_LOG 0x005d +#define mmSDMA7_GPU_IOV_VIOLATION_LOG_BASE_IDX 1 +#define mmSDMA7_ULV_CNTL 0x005e +#define mmSDMA7_ULV_CNTL_BASE_IDX 1 +#define mmSDMA7_EA_DBIT_ADDR_DATA 0x0060 +#define mmSDMA7_EA_DBIT_ADDR_DATA_BASE_IDX 1 +#define mmSDMA7_EA_DBIT_ADDR_INDEX 0x0061 +#define mmSDMA7_EA_DBIT_ADDR_INDEX_BASE_IDX 1 +#define mmSDMA7_GPU_IOV_VIOLATION_LOG2 0x0062 +#define mmSDMA7_GPU_IOV_VIOLATION_LOG2_BASE_IDX 1 +#define mmSDMA7_GFX_RB_CNTL 0x0080 +#define mmSDMA7_GFX_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_GFX_RB_BASE 0x0081 +#define mmSDMA7_GFX_RB_BASE_BASE_IDX 1 +#define mmSDMA7_GFX_RB_BASE_HI 0x0082 +#define mmSDMA7_GFX_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_GFX_RB_RPTR 0x0083 +#define mmSDMA7_GFX_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_GFX_RB_RPTR_HI 0x0084 +#define mmSDMA7_GFX_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_GFX_RB_WPTR 0x0085 +#define mmSDMA7_GFX_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_GFX_RB_WPTR_HI 0x0086 +#define mmSDMA7_GFX_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_GFX_RB_WPTR_POLL_CNTL 0x0087 +#define mmSDMA7_GFX_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_GFX_RB_RPTR_ADDR_HI 0x0088 +#define mmSDMA7_GFX_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_GFX_RB_RPTR_ADDR_LO 0x0089 +#define mmSDMA7_GFX_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_GFX_IB_CNTL 0x008a +#define mmSDMA7_GFX_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_GFX_IB_RPTR 0x008b +#define mmSDMA7_GFX_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_GFX_IB_OFFSET 0x008c +#define mmSDMA7_GFX_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_GFX_IB_BASE_LO 0x008d +#define mmSDMA7_GFX_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_GFX_IB_BASE_HI 0x008e +#define mmSDMA7_GFX_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_GFX_IB_SIZE 0x008f +#define mmSDMA7_GFX_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_GFX_SKIP_CNTL 0x0090 +#define mmSDMA7_GFX_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_GFX_CONTEXT_STATUS 0x0091 +#define mmSDMA7_GFX_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_GFX_DOORBELL 0x0092 +#define mmSDMA7_GFX_DOORBELL_BASE_IDX 1 +#define mmSDMA7_GFX_CONTEXT_CNTL 0x0093 +#define mmSDMA7_GFX_CONTEXT_CNTL_BASE_IDX 1 +#define mmSDMA7_GFX_STATUS 0x00a8 +#define mmSDMA7_GFX_STATUS_BASE_IDX 1 +#define mmSDMA7_GFX_DOORBELL_LOG 0x00a9 +#define mmSDMA7_GFX_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_GFX_WATERMARK 0x00aa +#define mmSDMA7_GFX_WATERMARK_BASE_IDX 1 +#define mmSDMA7_GFX_DOORBELL_OFFSET 0x00ab +#define mmSDMA7_GFX_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_GFX_CSA_ADDR_LO 0x00ac +#define mmSDMA7_GFX_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_GFX_CSA_ADDR_HI 0x00ad +#define mmSDMA7_GFX_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_GFX_IB_SUB_REMAIN 0x00af +#define mmSDMA7_GFX_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_GFX_PREEMPT 0x00b0 +#define mmSDMA7_GFX_PREEMPT_BASE_IDX 1 +#define mmSDMA7_GFX_DUMMY_REG 0x00b1 +#define mmSDMA7_GFX_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_HI 0x00b2 +#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_LO 0x00b3 +#define mmSDMA7_GFX_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_GFX_RB_AQL_CNTL 0x00b4 +#define mmSDMA7_GFX_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_GFX_MINOR_PTR_UPDATE 0x00b5 +#define mmSDMA7_GFX_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA0 0x00c0 +#define mmSDMA7_GFX_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA1 0x00c1 +#define mmSDMA7_GFX_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA2 0x00c2 +#define mmSDMA7_GFX_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA3 0x00c3 +#define mmSDMA7_GFX_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA4 0x00c4 +#define mmSDMA7_GFX_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA5 0x00c5 +#define mmSDMA7_GFX_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA6 0x00c6 +#define mmSDMA7_GFX_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA7 0x00c7 +#define mmSDMA7_GFX_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_DATA8 0x00c8 +#define mmSDMA7_GFX_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_GFX_MIDCMD_CNTL 0x00c9 +#define mmSDMA7_GFX_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_CNTL 0x00d8 +#define mmSDMA7_PAGE_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_BASE 0x00d9 +#define mmSDMA7_PAGE_RB_BASE_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_BASE_HI 0x00da +#define mmSDMA7_PAGE_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_RPTR 0x00db +#define mmSDMA7_PAGE_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_RPTR_HI 0x00dc +#define mmSDMA7_PAGE_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_WPTR 0x00dd +#define mmSDMA7_PAGE_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_WPTR_HI 0x00de +#define mmSDMA7_PAGE_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_WPTR_POLL_CNTL 0x00df +#define mmSDMA7_PAGE_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_RPTR_ADDR_HI 0x00e0 +#define mmSDMA7_PAGE_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_RPTR_ADDR_LO 0x00e1 +#define mmSDMA7_PAGE_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_PAGE_IB_CNTL 0x00e2 +#define mmSDMA7_PAGE_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_PAGE_IB_RPTR 0x00e3 +#define mmSDMA7_PAGE_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_PAGE_IB_OFFSET 0x00e4 +#define mmSDMA7_PAGE_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_PAGE_IB_BASE_LO 0x00e5 +#define mmSDMA7_PAGE_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_PAGE_IB_BASE_HI 0x00e6 +#define mmSDMA7_PAGE_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_PAGE_IB_SIZE 0x00e7 +#define mmSDMA7_PAGE_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_PAGE_SKIP_CNTL 0x00e8 +#define mmSDMA7_PAGE_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_PAGE_CONTEXT_STATUS 0x00e9 +#define mmSDMA7_PAGE_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_PAGE_DOORBELL 0x00ea +#define mmSDMA7_PAGE_DOORBELL_BASE_IDX 1 +#define mmSDMA7_PAGE_STATUS 0x0100 +#define mmSDMA7_PAGE_STATUS_BASE_IDX 1 +#define mmSDMA7_PAGE_DOORBELL_LOG 0x0101 +#define mmSDMA7_PAGE_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_PAGE_WATERMARK 0x0102 +#define mmSDMA7_PAGE_WATERMARK_BASE_IDX 1 +#define mmSDMA7_PAGE_DOORBELL_OFFSET 0x0103 +#define mmSDMA7_PAGE_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_PAGE_CSA_ADDR_LO 0x0104 +#define mmSDMA7_PAGE_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_PAGE_CSA_ADDR_HI 0x0105 +#define mmSDMA7_PAGE_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_PAGE_IB_SUB_REMAIN 0x0107 +#define mmSDMA7_PAGE_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_PAGE_PREEMPT 0x0108 +#define mmSDMA7_PAGE_PREEMPT_BASE_IDX 1 +#define mmSDMA7_PAGE_DUMMY_REG 0x0109 +#define mmSDMA7_PAGE_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_HI 0x010a +#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_LO 0x010b +#define mmSDMA7_PAGE_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_PAGE_RB_AQL_CNTL 0x010c +#define mmSDMA7_PAGE_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_PAGE_MINOR_PTR_UPDATE 0x010d +#define mmSDMA7_PAGE_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA0 0x0118 +#define mmSDMA7_PAGE_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA1 0x0119 +#define mmSDMA7_PAGE_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA2 0x011a +#define mmSDMA7_PAGE_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA3 0x011b +#define mmSDMA7_PAGE_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA4 0x011c +#define mmSDMA7_PAGE_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA5 0x011d +#define mmSDMA7_PAGE_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA6 0x011e +#define mmSDMA7_PAGE_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA7 0x011f +#define mmSDMA7_PAGE_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_DATA8 0x0120 +#define mmSDMA7_PAGE_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_PAGE_MIDCMD_CNTL 0x0121 +#define mmSDMA7_PAGE_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_CNTL 0x0130 +#define mmSDMA7_RLC0_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_BASE 0x0131 +#define mmSDMA7_RLC0_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_BASE_HI 0x0132 +#define mmSDMA7_RLC0_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_RPTR 0x0133 +#define mmSDMA7_RLC0_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_RPTR_HI 0x0134 +#define mmSDMA7_RLC0_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_WPTR 0x0135 +#define mmSDMA7_RLC0_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_WPTR_HI 0x0136 +#define mmSDMA7_RLC0_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_WPTR_POLL_CNTL 0x0137 +#define mmSDMA7_RLC0_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_RPTR_ADDR_HI 0x0138 +#define mmSDMA7_RLC0_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_RPTR_ADDR_LO 0x0139 +#define mmSDMA7_RLC0_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC0_IB_CNTL 0x013a +#define mmSDMA7_RLC0_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC0_IB_RPTR 0x013b +#define mmSDMA7_RLC0_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC0_IB_OFFSET 0x013c +#define mmSDMA7_RLC0_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC0_IB_BASE_LO 0x013d +#define mmSDMA7_RLC0_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC0_IB_BASE_HI 0x013e +#define mmSDMA7_RLC0_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC0_IB_SIZE 0x013f +#define mmSDMA7_RLC0_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC0_SKIP_CNTL 0x0140 +#define mmSDMA7_RLC0_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC0_CONTEXT_STATUS 0x0141 +#define mmSDMA7_RLC0_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC0_DOORBELL 0x0142 +#define mmSDMA7_RLC0_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC0_STATUS 0x0158 +#define mmSDMA7_RLC0_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC0_DOORBELL_LOG 0x0159 +#define mmSDMA7_RLC0_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC0_WATERMARK 0x015a +#define mmSDMA7_RLC0_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC0_DOORBELL_OFFSET 0x015b +#define mmSDMA7_RLC0_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC0_CSA_ADDR_LO 0x015c +#define mmSDMA7_RLC0_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC0_CSA_ADDR_HI 0x015d +#define mmSDMA7_RLC0_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC0_IB_SUB_REMAIN 0x015f +#define mmSDMA7_RLC0_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC0_PREEMPT 0x0160 +#define mmSDMA7_RLC0_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC0_DUMMY_REG 0x0161 +#define mmSDMA7_RLC0_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_HI 0x0162 +#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_LO 0x0163 +#define mmSDMA7_RLC0_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC0_RB_AQL_CNTL 0x0164 +#define mmSDMA7_RLC0_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC0_MINOR_PTR_UPDATE 0x0165 +#define mmSDMA7_RLC0_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA0 0x0170 +#define mmSDMA7_RLC0_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA1 0x0171 +#define mmSDMA7_RLC0_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA2 0x0172 +#define mmSDMA7_RLC0_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA3 0x0173 +#define mmSDMA7_RLC0_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA4 0x0174 +#define mmSDMA7_RLC0_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA5 0x0175 +#define mmSDMA7_RLC0_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA6 0x0176 +#define mmSDMA7_RLC0_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA7 0x0177 +#define mmSDMA7_RLC0_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_DATA8 0x0178 +#define mmSDMA7_RLC0_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC0_MIDCMD_CNTL 0x0179 +#define mmSDMA7_RLC0_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_CNTL 0x0188 +#define mmSDMA7_RLC1_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_BASE 0x0189 +#define mmSDMA7_RLC1_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_BASE_HI 0x018a +#define mmSDMA7_RLC1_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_RPTR 0x018b +#define mmSDMA7_RLC1_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_RPTR_HI 0x018c +#define mmSDMA7_RLC1_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_WPTR 0x018d +#define mmSDMA7_RLC1_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_WPTR_HI 0x018e +#define mmSDMA7_RLC1_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_WPTR_POLL_CNTL 0x018f +#define mmSDMA7_RLC1_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_RPTR_ADDR_HI 0x0190 +#define mmSDMA7_RLC1_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_RPTR_ADDR_LO 0x0191 +#define mmSDMA7_RLC1_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC1_IB_CNTL 0x0192 +#define mmSDMA7_RLC1_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC1_IB_RPTR 0x0193 +#define mmSDMA7_RLC1_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC1_IB_OFFSET 0x0194 +#define mmSDMA7_RLC1_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC1_IB_BASE_LO 0x0195 +#define mmSDMA7_RLC1_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC1_IB_BASE_HI 0x0196 +#define mmSDMA7_RLC1_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC1_IB_SIZE 0x0197 +#define mmSDMA7_RLC1_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC1_SKIP_CNTL 0x0198 +#define mmSDMA7_RLC1_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC1_CONTEXT_STATUS 0x0199 +#define mmSDMA7_RLC1_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC1_DOORBELL 0x019a +#define mmSDMA7_RLC1_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC1_STATUS 0x01b0 +#define mmSDMA7_RLC1_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC1_DOORBELL_LOG 0x01b1 +#define mmSDMA7_RLC1_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC1_WATERMARK 0x01b2 +#define mmSDMA7_RLC1_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC1_DOORBELL_OFFSET 0x01b3 +#define mmSDMA7_RLC1_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC1_CSA_ADDR_LO 0x01b4 +#define mmSDMA7_RLC1_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC1_CSA_ADDR_HI 0x01b5 +#define mmSDMA7_RLC1_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC1_IB_SUB_REMAIN 0x01b7 +#define mmSDMA7_RLC1_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC1_PREEMPT 0x01b8 +#define mmSDMA7_RLC1_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC1_DUMMY_REG 0x01b9 +#define mmSDMA7_RLC1_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_HI 0x01ba +#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_LO 0x01bb +#define mmSDMA7_RLC1_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC1_RB_AQL_CNTL 0x01bc +#define mmSDMA7_RLC1_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC1_MINOR_PTR_UPDATE 0x01bd +#define mmSDMA7_RLC1_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA0 0x01c8 +#define mmSDMA7_RLC1_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA1 0x01c9 +#define mmSDMA7_RLC1_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA2 0x01ca +#define mmSDMA7_RLC1_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA3 0x01cb +#define mmSDMA7_RLC1_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA4 0x01cc +#define mmSDMA7_RLC1_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA5 0x01cd +#define mmSDMA7_RLC1_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA6 0x01ce +#define mmSDMA7_RLC1_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA7 0x01cf +#define mmSDMA7_RLC1_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_DATA8 0x01d0 +#define mmSDMA7_RLC1_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC1_MIDCMD_CNTL 0x01d1 +#define mmSDMA7_RLC1_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_CNTL 0x01e0 +#define mmSDMA7_RLC2_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_BASE 0x01e1 +#define mmSDMA7_RLC2_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_BASE_HI 0x01e2 +#define mmSDMA7_RLC2_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_RPTR 0x01e3 +#define mmSDMA7_RLC2_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_RPTR_HI 0x01e4 +#define mmSDMA7_RLC2_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_WPTR 0x01e5 +#define mmSDMA7_RLC2_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_WPTR_HI 0x01e6 +#define mmSDMA7_RLC2_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_WPTR_POLL_CNTL 0x01e7 +#define mmSDMA7_RLC2_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_RPTR_ADDR_HI 0x01e8 +#define mmSDMA7_RLC2_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_RPTR_ADDR_LO 0x01e9 +#define mmSDMA7_RLC2_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC2_IB_CNTL 0x01ea +#define mmSDMA7_RLC2_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC2_IB_RPTR 0x01eb +#define mmSDMA7_RLC2_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC2_IB_OFFSET 0x01ec +#define mmSDMA7_RLC2_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC2_IB_BASE_LO 0x01ed +#define mmSDMA7_RLC2_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC2_IB_BASE_HI 0x01ee +#define mmSDMA7_RLC2_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC2_IB_SIZE 0x01ef +#define mmSDMA7_RLC2_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC2_SKIP_CNTL 0x01f0 +#define mmSDMA7_RLC2_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC2_CONTEXT_STATUS 0x01f1 +#define mmSDMA7_RLC2_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC2_DOORBELL 0x01f2 +#define mmSDMA7_RLC2_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC2_STATUS 0x0208 +#define mmSDMA7_RLC2_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC2_DOORBELL_LOG 0x0209 +#define mmSDMA7_RLC2_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC2_WATERMARK 0x020a +#define mmSDMA7_RLC2_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC2_DOORBELL_OFFSET 0x020b +#define mmSDMA7_RLC2_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC2_CSA_ADDR_LO 0x020c +#define mmSDMA7_RLC2_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC2_CSA_ADDR_HI 0x020d +#define mmSDMA7_RLC2_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC2_IB_SUB_REMAIN 0x020f +#define mmSDMA7_RLC2_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC2_PREEMPT 0x0210 +#define mmSDMA7_RLC2_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC2_DUMMY_REG 0x0211 +#define mmSDMA7_RLC2_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_HI 0x0212 +#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_LO 0x0213 +#define mmSDMA7_RLC2_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC2_RB_AQL_CNTL 0x0214 +#define mmSDMA7_RLC2_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC2_MINOR_PTR_UPDATE 0x0215 +#define mmSDMA7_RLC2_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA0 0x0220 +#define mmSDMA7_RLC2_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA1 0x0221 +#define mmSDMA7_RLC2_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA2 0x0222 +#define mmSDMA7_RLC2_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA3 0x0223 +#define mmSDMA7_RLC2_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA4 0x0224 +#define mmSDMA7_RLC2_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA5 0x0225 +#define mmSDMA7_RLC2_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA6 0x0226 +#define mmSDMA7_RLC2_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA7 0x0227 +#define mmSDMA7_RLC2_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_DATA8 0x0228 +#define mmSDMA7_RLC2_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC2_MIDCMD_CNTL 0x0229 +#define mmSDMA7_RLC2_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_CNTL 0x0238 +#define mmSDMA7_RLC3_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_BASE 0x0239 +#define mmSDMA7_RLC3_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_BASE_HI 0x023a +#define mmSDMA7_RLC3_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_RPTR 0x023b +#define mmSDMA7_RLC3_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_RPTR_HI 0x023c +#define mmSDMA7_RLC3_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_WPTR 0x023d +#define mmSDMA7_RLC3_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_WPTR_HI 0x023e +#define mmSDMA7_RLC3_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_WPTR_POLL_CNTL 0x023f +#define mmSDMA7_RLC3_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_RPTR_ADDR_HI 0x0240 +#define mmSDMA7_RLC3_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_RPTR_ADDR_LO 0x0241 +#define mmSDMA7_RLC3_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC3_IB_CNTL 0x0242 +#define mmSDMA7_RLC3_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC3_IB_RPTR 0x0243 +#define mmSDMA7_RLC3_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC3_IB_OFFSET 0x0244 +#define mmSDMA7_RLC3_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC3_IB_BASE_LO 0x0245 +#define mmSDMA7_RLC3_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC3_IB_BASE_HI 0x0246 +#define mmSDMA7_RLC3_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC3_IB_SIZE 0x0247 +#define mmSDMA7_RLC3_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC3_SKIP_CNTL 0x0248 +#define mmSDMA7_RLC3_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC3_CONTEXT_STATUS 0x0249 +#define mmSDMA7_RLC3_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC3_DOORBELL 0x024a +#define mmSDMA7_RLC3_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC3_STATUS 0x0260 +#define mmSDMA7_RLC3_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC3_DOORBELL_LOG 0x0261 +#define mmSDMA7_RLC3_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC3_WATERMARK 0x0262 +#define mmSDMA7_RLC3_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC3_DOORBELL_OFFSET 0x0263 +#define mmSDMA7_RLC3_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC3_CSA_ADDR_LO 0x0264 +#define mmSDMA7_RLC3_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC3_CSA_ADDR_HI 0x0265 +#define mmSDMA7_RLC3_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC3_IB_SUB_REMAIN 0x0267 +#define mmSDMA7_RLC3_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC3_PREEMPT 0x0268 +#define mmSDMA7_RLC3_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC3_DUMMY_REG 0x0269 +#define mmSDMA7_RLC3_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_HI 0x026a +#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_LO 0x026b +#define mmSDMA7_RLC3_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC3_RB_AQL_CNTL 0x026c +#define mmSDMA7_RLC3_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC3_MINOR_PTR_UPDATE 0x026d +#define mmSDMA7_RLC3_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA0 0x0278 +#define mmSDMA7_RLC3_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA1 0x0279 +#define mmSDMA7_RLC3_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA2 0x027a +#define mmSDMA7_RLC3_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA3 0x027b +#define mmSDMA7_RLC3_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA4 0x027c +#define mmSDMA7_RLC3_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA5 0x027d +#define mmSDMA7_RLC3_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA6 0x027e +#define mmSDMA7_RLC3_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA7 0x027f +#define mmSDMA7_RLC3_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_DATA8 0x0280 +#define mmSDMA7_RLC3_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC3_MIDCMD_CNTL 0x0281 +#define mmSDMA7_RLC3_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_CNTL 0x0290 +#define mmSDMA7_RLC4_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_BASE 0x0291 +#define mmSDMA7_RLC4_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_BASE_HI 0x0292 +#define mmSDMA7_RLC4_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_RPTR 0x0293 +#define mmSDMA7_RLC4_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_RPTR_HI 0x0294 +#define mmSDMA7_RLC4_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_WPTR 0x0295 +#define mmSDMA7_RLC4_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_WPTR_HI 0x0296 +#define mmSDMA7_RLC4_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_WPTR_POLL_CNTL 0x0297 +#define mmSDMA7_RLC4_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_RPTR_ADDR_HI 0x0298 +#define mmSDMA7_RLC4_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_RPTR_ADDR_LO 0x0299 +#define mmSDMA7_RLC4_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC4_IB_CNTL 0x029a +#define mmSDMA7_RLC4_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC4_IB_RPTR 0x029b +#define mmSDMA7_RLC4_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC4_IB_OFFSET 0x029c +#define mmSDMA7_RLC4_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC4_IB_BASE_LO 0x029d +#define mmSDMA7_RLC4_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC4_IB_BASE_HI 0x029e +#define mmSDMA7_RLC4_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC4_IB_SIZE 0x029f +#define mmSDMA7_RLC4_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC4_SKIP_CNTL 0x02a0 +#define mmSDMA7_RLC4_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC4_CONTEXT_STATUS 0x02a1 +#define mmSDMA7_RLC4_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC4_DOORBELL 0x02a2 +#define mmSDMA7_RLC4_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC4_STATUS 0x02b8 +#define mmSDMA7_RLC4_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC4_DOORBELL_LOG 0x02b9 +#define mmSDMA7_RLC4_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC4_WATERMARK 0x02ba +#define mmSDMA7_RLC4_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC4_DOORBELL_OFFSET 0x02bb +#define mmSDMA7_RLC4_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC4_CSA_ADDR_LO 0x02bc +#define mmSDMA7_RLC4_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC4_CSA_ADDR_HI 0x02bd +#define mmSDMA7_RLC4_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC4_IB_SUB_REMAIN 0x02bf +#define mmSDMA7_RLC4_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC4_PREEMPT 0x02c0 +#define mmSDMA7_RLC4_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC4_DUMMY_REG 0x02c1 +#define mmSDMA7_RLC4_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_HI 0x02c2 +#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_LO 0x02c3 +#define mmSDMA7_RLC4_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC4_RB_AQL_CNTL 0x02c4 +#define mmSDMA7_RLC4_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC4_MINOR_PTR_UPDATE 0x02c5 +#define mmSDMA7_RLC4_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA0 0x02d0 +#define mmSDMA7_RLC4_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA1 0x02d1 +#define mmSDMA7_RLC4_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA2 0x02d2 +#define mmSDMA7_RLC4_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA3 0x02d3 +#define mmSDMA7_RLC4_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA4 0x02d4 +#define mmSDMA7_RLC4_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA5 0x02d5 +#define mmSDMA7_RLC4_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA6 0x02d6 +#define mmSDMA7_RLC4_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA7 0x02d7 +#define mmSDMA7_RLC4_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_DATA8 0x02d8 +#define mmSDMA7_RLC4_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC4_MIDCMD_CNTL 0x02d9 +#define mmSDMA7_RLC4_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_CNTL 0x02e8 +#define mmSDMA7_RLC5_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_BASE 0x02e9 +#define mmSDMA7_RLC5_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_BASE_HI 0x02ea +#define mmSDMA7_RLC5_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_RPTR 0x02eb +#define mmSDMA7_RLC5_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_RPTR_HI 0x02ec +#define mmSDMA7_RLC5_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_WPTR 0x02ed +#define mmSDMA7_RLC5_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_WPTR_HI 0x02ee +#define mmSDMA7_RLC5_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_WPTR_POLL_CNTL 0x02ef +#define mmSDMA7_RLC5_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_RPTR_ADDR_HI 0x02f0 +#define mmSDMA7_RLC5_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_RPTR_ADDR_LO 0x02f1 +#define mmSDMA7_RLC5_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC5_IB_CNTL 0x02f2 +#define mmSDMA7_RLC5_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC5_IB_RPTR 0x02f3 +#define mmSDMA7_RLC5_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC5_IB_OFFSET 0x02f4 +#define mmSDMA7_RLC5_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC5_IB_BASE_LO 0x02f5 +#define mmSDMA7_RLC5_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC5_IB_BASE_HI 0x02f6 +#define mmSDMA7_RLC5_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC5_IB_SIZE 0x02f7 +#define mmSDMA7_RLC5_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC5_SKIP_CNTL 0x02f8 +#define mmSDMA7_RLC5_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC5_CONTEXT_STATUS 0x02f9 +#define mmSDMA7_RLC5_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC5_DOORBELL 0x02fa +#define mmSDMA7_RLC5_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC5_STATUS 0x0310 +#define mmSDMA7_RLC5_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC5_DOORBELL_LOG 0x0311 +#define mmSDMA7_RLC5_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC5_WATERMARK 0x0312 +#define mmSDMA7_RLC5_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC5_DOORBELL_OFFSET 0x0313 +#define mmSDMA7_RLC5_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC5_CSA_ADDR_LO 0x0314 +#define mmSDMA7_RLC5_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC5_CSA_ADDR_HI 0x0315 +#define mmSDMA7_RLC5_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC5_IB_SUB_REMAIN 0x0317 +#define mmSDMA7_RLC5_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC5_PREEMPT 0x0318 +#define mmSDMA7_RLC5_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC5_DUMMY_REG 0x0319 +#define mmSDMA7_RLC5_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_HI 0x031a +#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_LO 0x031b +#define mmSDMA7_RLC5_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC5_RB_AQL_CNTL 0x031c +#define mmSDMA7_RLC5_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC5_MINOR_PTR_UPDATE 0x031d +#define mmSDMA7_RLC5_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA0 0x0328 +#define mmSDMA7_RLC5_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA1 0x0329 +#define mmSDMA7_RLC5_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA2 0x032a +#define mmSDMA7_RLC5_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA3 0x032b +#define mmSDMA7_RLC5_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA4 0x032c +#define mmSDMA7_RLC5_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA5 0x032d +#define mmSDMA7_RLC5_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA6 0x032e +#define mmSDMA7_RLC5_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA7 0x032f +#define mmSDMA7_RLC5_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_DATA8 0x0330 +#define mmSDMA7_RLC5_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC5_MIDCMD_CNTL 0x0331 +#define mmSDMA7_RLC5_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_CNTL 0x0340 +#define mmSDMA7_RLC6_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_BASE 0x0341 +#define mmSDMA7_RLC6_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_BASE_HI 0x0342 +#define mmSDMA7_RLC6_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_RPTR 0x0343 +#define mmSDMA7_RLC6_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_RPTR_HI 0x0344 +#define mmSDMA7_RLC6_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_WPTR 0x0345 +#define mmSDMA7_RLC6_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_WPTR_HI 0x0346 +#define mmSDMA7_RLC6_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_WPTR_POLL_CNTL 0x0347 +#define mmSDMA7_RLC6_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_RPTR_ADDR_HI 0x0348 +#define mmSDMA7_RLC6_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_RPTR_ADDR_LO 0x0349 +#define mmSDMA7_RLC6_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC6_IB_CNTL 0x034a +#define mmSDMA7_RLC6_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC6_IB_RPTR 0x034b +#define mmSDMA7_RLC6_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC6_IB_OFFSET 0x034c +#define mmSDMA7_RLC6_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC6_IB_BASE_LO 0x034d +#define mmSDMA7_RLC6_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC6_IB_BASE_HI 0x034e +#define mmSDMA7_RLC6_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC6_IB_SIZE 0x034f +#define mmSDMA7_RLC6_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC6_SKIP_CNTL 0x0350 +#define mmSDMA7_RLC6_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC6_CONTEXT_STATUS 0x0351 +#define mmSDMA7_RLC6_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC6_DOORBELL 0x0352 +#define mmSDMA7_RLC6_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC6_STATUS 0x0368 +#define mmSDMA7_RLC6_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC6_DOORBELL_LOG 0x0369 +#define mmSDMA7_RLC6_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC6_WATERMARK 0x036a +#define mmSDMA7_RLC6_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC6_DOORBELL_OFFSET 0x036b +#define mmSDMA7_RLC6_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC6_CSA_ADDR_LO 0x036c +#define mmSDMA7_RLC6_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC6_CSA_ADDR_HI 0x036d +#define mmSDMA7_RLC6_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC6_IB_SUB_REMAIN 0x036f +#define mmSDMA7_RLC6_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC6_PREEMPT 0x0370 +#define mmSDMA7_RLC6_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC6_DUMMY_REG 0x0371 +#define mmSDMA7_RLC6_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_HI 0x0372 +#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_LO 0x0373 +#define mmSDMA7_RLC6_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC6_RB_AQL_CNTL 0x0374 +#define mmSDMA7_RLC6_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC6_MINOR_PTR_UPDATE 0x0375 +#define mmSDMA7_RLC6_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA0 0x0380 +#define mmSDMA7_RLC6_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA1 0x0381 +#define mmSDMA7_RLC6_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA2 0x0382 +#define mmSDMA7_RLC6_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA3 0x0383 +#define mmSDMA7_RLC6_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA4 0x0384 +#define mmSDMA7_RLC6_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA5 0x0385 +#define mmSDMA7_RLC6_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA6 0x0386 +#define mmSDMA7_RLC6_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA7 0x0387 +#define mmSDMA7_RLC6_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_DATA8 0x0388 +#define mmSDMA7_RLC6_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC6_MIDCMD_CNTL 0x0389 +#define mmSDMA7_RLC6_MIDCMD_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_CNTL 0x0398 +#define mmSDMA7_RLC7_RB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_BASE 0x0399 +#define mmSDMA7_RLC7_RB_BASE_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_BASE_HI 0x039a +#define mmSDMA7_RLC7_RB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_RPTR 0x039b +#define mmSDMA7_RLC7_RB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_RPTR_HI 0x039c +#define mmSDMA7_RLC7_RB_RPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_WPTR 0x039d +#define mmSDMA7_RLC7_RB_WPTR_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_WPTR_HI 0x039e +#define mmSDMA7_RLC7_RB_WPTR_HI_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_WPTR_POLL_CNTL 0x039f +#define mmSDMA7_RLC7_RB_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_RPTR_ADDR_HI 0x03a0 +#define mmSDMA7_RLC7_RB_RPTR_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_RPTR_ADDR_LO 0x03a1 +#define mmSDMA7_RLC7_RB_RPTR_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC7_IB_CNTL 0x03a2 +#define mmSDMA7_RLC7_IB_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC7_IB_RPTR 0x03a3 +#define mmSDMA7_RLC7_IB_RPTR_BASE_IDX 1 +#define mmSDMA7_RLC7_IB_OFFSET 0x03a4 +#define mmSDMA7_RLC7_IB_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC7_IB_BASE_LO 0x03a5 +#define mmSDMA7_RLC7_IB_BASE_LO_BASE_IDX 1 +#define mmSDMA7_RLC7_IB_BASE_HI 0x03a6 +#define mmSDMA7_RLC7_IB_BASE_HI_BASE_IDX 1 +#define mmSDMA7_RLC7_IB_SIZE 0x03a7 +#define mmSDMA7_RLC7_IB_SIZE_BASE_IDX 1 +#define mmSDMA7_RLC7_SKIP_CNTL 0x03a8 +#define mmSDMA7_RLC7_SKIP_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC7_CONTEXT_STATUS 0x03a9 +#define mmSDMA7_RLC7_CONTEXT_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC7_DOORBELL 0x03aa +#define mmSDMA7_RLC7_DOORBELL_BASE_IDX 1 +#define mmSDMA7_RLC7_STATUS 0x03c0 +#define mmSDMA7_RLC7_STATUS_BASE_IDX 1 +#define mmSDMA7_RLC7_DOORBELL_LOG 0x03c1 +#define mmSDMA7_RLC7_DOORBELL_LOG_BASE_IDX 1 +#define mmSDMA7_RLC7_WATERMARK 0x03c2 +#define mmSDMA7_RLC7_WATERMARK_BASE_IDX 1 +#define mmSDMA7_RLC7_DOORBELL_OFFSET 0x03c3 +#define mmSDMA7_RLC7_DOORBELL_OFFSET_BASE_IDX 1 +#define mmSDMA7_RLC7_CSA_ADDR_LO 0x03c4 +#define mmSDMA7_RLC7_CSA_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC7_CSA_ADDR_HI 0x03c5 +#define mmSDMA7_RLC7_CSA_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC7_IB_SUB_REMAIN 0x03c7 +#define mmSDMA7_RLC7_IB_SUB_REMAIN_BASE_IDX 1 +#define mmSDMA7_RLC7_PREEMPT 0x03c8 +#define mmSDMA7_RLC7_PREEMPT_BASE_IDX 1 +#define mmSDMA7_RLC7_DUMMY_REG 0x03c9 +#define mmSDMA7_RLC7_DUMMY_REG_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_HI 0x03ca +#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_HI_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_LO 0x03cb +#define mmSDMA7_RLC7_RB_WPTR_POLL_ADDR_LO_BASE_IDX 1 +#define mmSDMA7_RLC7_RB_AQL_CNTL 0x03cc +#define mmSDMA7_RLC7_RB_AQL_CNTL_BASE_IDX 1 +#define mmSDMA7_RLC7_MINOR_PTR_UPDATE 0x03cd +#define mmSDMA7_RLC7_MINOR_PTR_UPDATE_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA0 0x03d8 +#define mmSDMA7_RLC7_MIDCMD_DATA0_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA1 0x03d9 +#define mmSDMA7_RLC7_MIDCMD_DATA1_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA2 0x03da +#define mmSDMA7_RLC7_MIDCMD_DATA2_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA3 0x03db +#define mmSDMA7_RLC7_MIDCMD_DATA3_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA4 0x03dc +#define mmSDMA7_RLC7_MIDCMD_DATA4_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA5 0x03dd +#define mmSDMA7_RLC7_MIDCMD_DATA5_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA6 0x03de +#define mmSDMA7_RLC7_MIDCMD_DATA6_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA7 0x03df +#define mmSDMA7_RLC7_MIDCMD_DATA7_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_DATA8 0x03e0 +#define mmSDMA7_RLC7_MIDCMD_DATA8_BASE_IDX 1 +#define mmSDMA7_RLC7_MIDCMD_CNTL 0x03e1 +#define mmSDMA7_RLC7_MIDCMD_CNTL_BASE_IDX 1 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h new file mode 100644 index 000000000000..4b56d8c67d91 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h @@ -0,0 +1,2956 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _sdma7_4_2_2_SH_MASK_HEADER +#define _sdma7_4_2_2_SH_MASK_HEADER + + +// addressBlock: sdma7_sdma7dec +//SDMA7_UCODE_ADDR +#define SDMA7_UCODE_ADDR__VALUE__SHIFT 0x0 +#define SDMA7_UCODE_ADDR__VALUE_MASK 0x00001FFFL +//SDMA7_UCODE_DATA +#define SDMA7_UCODE_DATA__VALUE__SHIFT 0x0 +#define SDMA7_UCODE_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA7_VM_CNTL +#define SDMA7_VM_CNTL__CMD__SHIFT 0x0 +#define SDMA7_VM_CNTL__CMD_MASK 0x0000000FL +//SDMA7_VM_CTX_LO +#define SDMA7_VM_CTX_LO__ADDR__SHIFT 0x2 +#define SDMA7_VM_CTX_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_VM_CTX_HI +#define SDMA7_VM_CTX_HI__ADDR__SHIFT 0x0 +#define SDMA7_VM_CTX_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_ACTIVE_FCN_ID +#define SDMA7_ACTIVE_FCN_ID__VFID__SHIFT 0x0 +#define SDMA7_ACTIVE_FCN_ID__RESERVED__SHIFT 0x4 +#define SDMA7_ACTIVE_FCN_ID__VF__SHIFT 0x1f +#define SDMA7_ACTIVE_FCN_ID__VFID_MASK 0x0000000FL +#define SDMA7_ACTIVE_FCN_ID__RESERVED_MASK 0x7FFFFFF0L +#define SDMA7_ACTIVE_FCN_ID__VF_MASK 0x80000000L +//SDMA7_VM_CTX_CNTL +#define SDMA7_VM_CTX_CNTL__PRIV__SHIFT 0x0 +#define SDMA7_VM_CTX_CNTL__VMID__SHIFT 0x4 +#define SDMA7_VM_CTX_CNTL__PRIV_MASK 0x00000001L +#define SDMA7_VM_CTX_CNTL__VMID_MASK 0x000000F0L +//SDMA7_VIRT_RESET_REQ +#define SDMA7_VIRT_RESET_REQ__VF__SHIFT 0x0 +#define SDMA7_VIRT_RESET_REQ__PF__SHIFT 0x1f +#define SDMA7_VIRT_RESET_REQ__VF_MASK 0x0000FFFFL +#define SDMA7_VIRT_RESET_REQ__PF_MASK 0x80000000L +//SDMA7_VF_ENABLE +#define SDMA7_VF_ENABLE__VF_ENABLE__SHIFT 0x0 +#define SDMA7_VF_ENABLE__VF_ENABLE_MASK 0x00000001L +//SDMA7_CONTEXT_REG_TYPE0 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_CNTL__SHIFT 0x0 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE__SHIFT 0x1 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE_HI__SHIFT 0x2 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR__SHIFT 0x3 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_HI__SHIFT 0x4 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR__SHIFT 0x5 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_HI__SHIFT 0x6 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_POLL_CNTL__SHIFT 0x7 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_HI__SHIFT 0x8 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_LO__SHIFT 0x9 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_CNTL__SHIFT 0xa +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_RPTR__SHIFT 0xb +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_OFFSET__SHIFT 0xc +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_LO__SHIFT 0xd +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_HI__SHIFT 0xe +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_SIZE__SHIFT 0xf +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_SKIP_CNTL__SHIFT 0x10 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_STATUS__SHIFT 0x11 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_DOORBELL__SHIFT 0x12 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_CNTL__SHIFT 0x13 +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_CNTL_MASK 0x00000001L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE_MASK 0x00000002L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_BASE_HI_MASK 0x00000004L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_MASK 0x00000008L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_HI_MASK 0x00000010L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_MASK 0x00000020L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_HI_MASK 0x00000040L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_WPTR_POLL_CNTL_MASK 0x00000080L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_HI_MASK 0x00000100L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_RB_RPTR_ADDR_LO_MASK 0x00000200L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_CNTL_MASK 0x00000400L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_RPTR_MASK 0x00000800L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_OFFSET_MASK 0x00001000L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_LO_MASK 0x00002000L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_BASE_HI_MASK 0x00004000L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_IB_SIZE_MASK 0x00008000L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_SKIP_CNTL_MASK 0x00010000L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_STATUS_MASK 0x00020000L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_DOORBELL_MASK 0x00040000L +#define SDMA7_CONTEXT_REG_TYPE0__SDMA7_GFX_CONTEXT_CNTL_MASK 0x00080000L +//SDMA7_CONTEXT_REG_TYPE1 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_STATUS__SHIFT 0x8 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_LOG__SHIFT 0x9 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_WATERMARK__SHIFT 0xa +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_OFFSET__SHIFT 0xb +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_LO__SHIFT 0xc +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_HI__SHIFT 0xd +#define SDMA7_CONTEXT_REG_TYPE1__VOID_REG2__SHIFT 0xe +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_IB_SUB_REMAIN__SHIFT 0xf +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_PREEMPT__SHIFT 0x10 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DUMMY_REG__SHIFT 0x11 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_HI__SHIFT 0x12 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_LO__SHIFT 0x13 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_AQL_CNTL__SHIFT 0x14 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_MINOR_PTR_UPDATE__SHIFT 0x15 +#define SDMA7_CONTEXT_REG_TYPE1__RESERVED__SHIFT 0x16 +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_STATUS_MASK 0x00000100L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_LOG_MASK 0x00000200L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_WATERMARK_MASK 0x00000400L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DOORBELL_OFFSET_MASK 0x00000800L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_LO_MASK 0x00001000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_CSA_ADDR_HI_MASK 0x00002000L +#define SDMA7_CONTEXT_REG_TYPE1__VOID_REG2_MASK 0x00004000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_IB_SUB_REMAIN_MASK 0x00008000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_PREEMPT_MASK 0x00010000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_DUMMY_REG_MASK 0x00020000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_HI_MASK 0x00040000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_WPTR_POLL_ADDR_LO_MASK 0x00080000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_RB_AQL_CNTL_MASK 0x00100000L +#define SDMA7_CONTEXT_REG_TYPE1__SDMA7_GFX_MINOR_PTR_UPDATE_MASK 0x00200000L +#define SDMA7_CONTEXT_REG_TYPE1__RESERVED_MASK 0xFFC00000L +//SDMA7_CONTEXT_REG_TYPE2 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA0__SHIFT 0x0 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA1__SHIFT 0x1 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA2__SHIFT 0x2 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA3__SHIFT 0x3 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA4__SHIFT 0x4 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA5__SHIFT 0x5 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA6__SHIFT 0x6 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA7__SHIFT 0x7 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA8__SHIFT 0x8 +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_CNTL__SHIFT 0x9 +#define SDMA7_CONTEXT_REG_TYPE2__RESERVED__SHIFT 0xa +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA0_MASK 0x00000001L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA1_MASK 0x00000002L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA2_MASK 0x00000004L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA3_MASK 0x00000008L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA4_MASK 0x00000010L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA5_MASK 0x00000020L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA6_MASK 0x00000040L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA7_MASK 0x00000080L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_DATA8_MASK 0x00000100L +#define SDMA7_CONTEXT_REG_TYPE2__SDMA7_GFX_MIDCMD_CNTL_MASK 0x00000200L +#define SDMA7_CONTEXT_REG_TYPE2__RESERVED_MASK 0xFFFFFC00L +//SDMA7_CONTEXT_REG_TYPE3 +#define SDMA7_CONTEXT_REG_TYPE3__RESERVED__SHIFT 0x0 +#define SDMA7_CONTEXT_REG_TYPE3__RESERVED_MASK 0xFFFFFFFFL +//SDMA7_PUB_REG_TYPE0 +#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_ADDR__SHIFT 0x0 +#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_DATA__SHIFT 0x1 +#define SDMA7_PUB_REG_TYPE0__RESERVED3__SHIFT 0x3 +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CNTL__SHIFT 0x4 +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_LO__SHIFT 0x5 +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_HI__SHIFT 0x6 +#define SDMA7_PUB_REG_TYPE0__SDMA7_ACTIVE_FCN_ID__SHIFT 0x7 +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_CNTL__SHIFT 0x8 +#define SDMA7_PUB_REG_TYPE0__SDMA7_VIRT_RESET_REQ__SHIFT 0x9 +#define SDMA7_PUB_REG_TYPE0__RESERVED10__SHIFT 0xa +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE0__SHIFT 0xb +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE1__SHIFT 0xc +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE2__SHIFT 0xd +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE3__SHIFT 0xe +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE0__SHIFT 0xf +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE1__SHIFT 0x10 +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE2__SHIFT 0x11 +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE3__SHIFT 0x12 +#define SDMA7_PUB_REG_TYPE0__SDMA7_MMHUB_CNTL__SHIFT 0x13 +#define SDMA7_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY__SHIFT 0x15 +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_GROUP_BOUNDARY__SHIFT 0x19 +#define SDMA7_PUB_REG_TYPE0__SDMA7_POWER_CNTL__SHIFT 0x1a +#define SDMA7_PUB_REG_TYPE0__SDMA7_CLK_CTRL__SHIFT 0x1b +#define SDMA7_PUB_REG_TYPE0__SDMA7_CNTL__SHIFT 0x1c +#define SDMA7_PUB_REG_TYPE0__SDMA7_CHICKEN_BITS__SHIFT 0x1d +#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG__SHIFT 0x1e +#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG_READ__SHIFT 0x1f +#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_ADDR_MASK 0x00000001L +#define SDMA7_PUB_REG_TYPE0__SDMA7_UCODE_DATA_MASK 0x00000002L +#define SDMA7_PUB_REG_TYPE0__RESERVED3_MASK 0x00000008L +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CNTL_MASK 0x00000010L +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_LO_MASK 0x00000020L +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_HI_MASK 0x00000040L +#define SDMA7_PUB_REG_TYPE0__SDMA7_ACTIVE_FCN_ID_MASK 0x00000080L +#define SDMA7_PUB_REG_TYPE0__SDMA7_VM_CTX_CNTL_MASK 0x00000100L +#define SDMA7_PUB_REG_TYPE0__SDMA7_VIRT_RESET_REQ_MASK 0x00000200L +#define SDMA7_PUB_REG_TYPE0__RESERVED10_MASK 0x00000400L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE0_MASK 0x00000800L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE1_MASK 0x00001000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE2_MASK 0x00002000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_REG_TYPE3_MASK 0x00004000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE0_MASK 0x00008000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE1_MASK 0x00010000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE2_MASK 0x00020000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_PUB_REG_TYPE3_MASK 0x00040000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_MMHUB_CNTL_MASK 0x00080000L +#define SDMA7_PUB_REG_TYPE0__RESERVED_FOR_PSPSMU_ACCESS_ONLY_MASK 0x01E00000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CONTEXT_GROUP_BOUNDARY_MASK 0x02000000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_POWER_CNTL_MASK 0x04000000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CLK_CTRL_MASK 0x08000000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CNTL_MASK 0x10000000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_CHICKEN_BITS_MASK 0x20000000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG_MASK 0x40000000L +#define SDMA7_PUB_REG_TYPE0__SDMA7_GB_ADDR_CONFIG_READ_MASK 0x80000000L +//SDMA7_PUB_REG_TYPE1 +#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH_HI__SHIFT 0x0 +#define SDMA7_PUB_REG_TYPE1__SDMA7_SEM_WAIT_FAIL_TIMER_CNTL__SHIFT 0x1 +#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH__SHIFT 0x2 +#define SDMA7_PUB_REG_TYPE1__SDMA7_IB_OFFSET_FETCH__SHIFT 0x3 +#define SDMA7_PUB_REG_TYPE1__SDMA7_PROGRAM__SHIFT 0x4 +#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS_REG__SHIFT 0x5 +#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS1_REG__SHIFT 0x6 +#define SDMA7_PUB_REG_TYPE1__SDMA7_RD_BURST_CNTL__SHIFT 0x7 +#define SDMA7_PUB_REG_TYPE1__SDMA7_HBM_PAGE_CONFIG__SHIFT 0x8 +#define SDMA7_PUB_REG_TYPE1__SDMA7_UCODE_CHECKSUM__SHIFT 0x9 +#define SDMA7_PUB_REG_TYPE1__SDMA7_F32_CNTL__SHIFT 0xa +#define SDMA7_PUB_REG_TYPE1__SDMA7_FREEZE__SHIFT 0xb +#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE0_QUANTUM__SHIFT 0xc +#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE1_QUANTUM__SHIFT 0xd +#define SDMA7_PUB_REG_TYPE1__SDMA_POWER_GATING__SHIFT 0xe +#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG__SHIFT 0xf +#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_WRITE__SHIFT 0x10 +#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_READ__SHIFT 0x11 +#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_CONFIG__SHIFT 0x12 +#define SDMA7_PUB_REG_TYPE1__SDMA7_BA_THRESHOLD__SHIFT 0x13 +#define SDMA7_PUB_REG_TYPE1__SDMA7_ID__SHIFT 0x14 +#define SDMA7_PUB_REG_TYPE1__SDMA7_VERSION__SHIFT 0x15 +#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER__SHIFT 0x16 +#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER_CLEAR__SHIFT 0x17 +#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS2_REG__SHIFT 0x18 +#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_CNTL__SHIFT 0x19 +#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_LO__SHIFT 0x1a +#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_HI__SHIFT 0x1b +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_CNTL__SHIFT 0x1c +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WATERMK__SHIFT 0x1d +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_RD_STATUS__SHIFT 0x1e +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WR_STATUS__SHIFT 0x1f +#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH_HI_MASK 0x00000001L +#define SDMA7_PUB_REG_TYPE1__SDMA7_SEM_WAIT_FAIL_TIMER_CNTL_MASK 0x00000002L +#define SDMA7_PUB_REG_TYPE1__SDMA7_RB_RPTR_FETCH_MASK 0x00000004L +#define SDMA7_PUB_REG_TYPE1__SDMA7_IB_OFFSET_FETCH_MASK 0x00000008L +#define SDMA7_PUB_REG_TYPE1__SDMA7_PROGRAM_MASK 0x00000010L +#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS_REG_MASK 0x00000020L +#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS1_REG_MASK 0x00000040L +#define SDMA7_PUB_REG_TYPE1__SDMA7_RD_BURST_CNTL_MASK 0x00000080L +#define SDMA7_PUB_REG_TYPE1__SDMA7_HBM_PAGE_CONFIG_MASK 0x00000100L +#define SDMA7_PUB_REG_TYPE1__SDMA7_UCODE_CHECKSUM_MASK 0x00000200L +#define SDMA7_PUB_REG_TYPE1__SDMA7_F32_CNTL_MASK 0x00000400L +#define SDMA7_PUB_REG_TYPE1__SDMA7_FREEZE_MASK 0x00000800L +#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE0_QUANTUM_MASK 0x00001000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_PHASE1_QUANTUM_MASK 0x00002000L +#define SDMA7_PUB_REG_TYPE1__SDMA_POWER_GATING_MASK 0x00004000L +#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_CONFIG_MASK 0x00008000L +#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_WRITE_MASK 0x00010000L +#define SDMA7_PUB_REG_TYPE1__SDMA_PGFSM_READ_MASK 0x00020000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_CONFIG_MASK 0x00040000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_BA_THRESHOLD_MASK 0x00080000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_ID_MASK 0x00100000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_VERSION_MASK 0x00200000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER_MASK 0x00400000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_EDC_COUNTER_CLEAR_MASK 0x00800000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_STATUS2_REG_MASK 0x01000000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_CNTL_MASK 0x02000000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_LO_MASK 0x04000000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_ATOMIC_PREOP_HI_MASK 0x08000000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_CNTL_MASK 0x10000000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WATERMK_MASK 0x20000000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_RD_STATUS_MASK 0x40000000L +#define SDMA7_PUB_REG_TYPE1__SDMA7_UTCL1_WR_STATUS_MASK 0x80000000L +//SDMA7_PUB_REG_TYPE2 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV0__SHIFT 0x0 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV1__SHIFT 0x1 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV2__SHIFT 0x2 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK0__SHIFT 0x3 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK1__SHIFT 0x4 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK0__SHIFT 0x5 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK1__SHIFT 0x6 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_TIMEOUT__SHIFT 0x7 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_PAGE__SHIFT 0x8 +#define SDMA7_PUB_REG_TYPE2__SDMA7_POWER_CNTL_IDLE__SHIFT 0x9 +#define SDMA7_PUB_REG_TYPE2__SDMA7_RELAX_ORDERING_LUT__SHIFT 0xa +#define SDMA7_PUB_REG_TYPE2__SDMA7_CHICKEN_BITS_2__SHIFT 0xb +#define SDMA7_PUB_REG_TYPE2__SDMA7_STATUS3_REG__SHIFT 0xc +#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_LO__SHIFT 0xd +#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_HI__SHIFT 0xe +#define SDMA7_PUB_REG_TYPE2__SDMA7_PHASE2_QUANTUM__SHIFT 0xf +#define SDMA7_PUB_REG_TYPE2__SDMA7_ERROR_LOG__SHIFT 0x10 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG0__SHIFT 0x11 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG1__SHIFT 0x12 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG2__SHIFT 0x13 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG3__SHIFT 0x14 +#define SDMA7_PUB_REG_TYPE2__SDMA7_F32_COUNTER__SHIFT 0x15 +#define SDMA7_PUB_REG_TYPE2__SDMA7_UNBREAKABLE__SHIFT 0x16 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFMON_CNTL__SHIFT 0x17 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER0_RESULT__SHIFT 0x18 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER1_RESULT__SHIFT 0x19 +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__SHIFT 0x1a +#define SDMA7_PUB_REG_TYPE2__SDMA7_CRD_CNTL__SHIFT 0x1b +#define SDMA7_PUB_REG_TYPE2__RESERVED28__SHIFT 0x1c +#define SDMA7_PUB_REG_TYPE2__SDMA7_GPU_IOV_VIOLATION_LOG__SHIFT 0x1d +#define SDMA7_PUB_REG_TYPE2__SDMA7_ULV_CNTL__SHIFT 0x1e +#define SDMA7_PUB_REG_TYPE2__RESERVED__SHIFT 0x1f +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV0_MASK 0x00000001L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV1_MASK 0x00000002L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_INV2_MASK 0x00000004L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK0_MASK 0x00000008L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_RD_XNACK1_MASK 0x00000010L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK0_MASK 0x00000020L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_WR_XNACK1_MASK 0x00000040L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_TIMEOUT_MASK 0x00000080L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UTCL1_PAGE_MASK 0x00000100L +#define SDMA7_PUB_REG_TYPE2__SDMA7_POWER_CNTL_IDLE_MASK 0x00000200L +#define SDMA7_PUB_REG_TYPE2__SDMA7_RELAX_ORDERING_LUT_MASK 0x00000400L +#define SDMA7_PUB_REG_TYPE2__SDMA7_CHICKEN_BITS_2_MASK 0x00000800L +#define SDMA7_PUB_REG_TYPE2__SDMA7_STATUS3_REG_MASK 0x00001000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_LO_MASK 0x00002000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PHYSICAL_ADDR_HI_MASK 0x00004000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PHASE2_QUANTUM_MASK 0x00008000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_ERROR_LOG_MASK 0x00010000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG0_MASK 0x00020000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG1_MASK 0x00040000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG2_MASK 0x00080000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PUB_DUMMY_REG3_MASK 0x00100000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_F32_COUNTER_MASK 0x00200000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_UNBREAKABLE_MASK 0x00400000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFMON_CNTL_MASK 0x00800000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER0_RESULT_MASK 0x01000000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER1_RESULT_MASK 0x02000000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_PERFCOUNTER_TAG_DELAY_RANGE_MASK 0x04000000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_CRD_CNTL_MASK 0x08000000L +#define SDMA7_PUB_REG_TYPE2__RESERVED28_MASK 0x10000000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_GPU_IOV_VIOLATION_LOG_MASK 0x20000000L +#define SDMA7_PUB_REG_TYPE2__SDMA7_ULV_CNTL_MASK 0x40000000L +#define SDMA7_PUB_REG_TYPE2__RESERVED_MASK 0x80000000L +//SDMA7_PUB_REG_TYPE3 +#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_DATA__SHIFT 0x0 +#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_INDEX__SHIFT 0x1 +#define SDMA7_PUB_REG_TYPE3__SDMA7_GPU_IOV_VIOLATION_LOG2__SHIFT 0x2 +#define SDMA7_PUB_REG_TYPE3__RESERVED__SHIFT 0x3 +#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_DATA_MASK 0x00000001L +#define SDMA7_PUB_REG_TYPE3__SDMA7_EA_DBIT_ADDR_INDEX_MASK 0x00000002L +#define SDMA7_PUB_REG_TYPE3__SDMA7_GPU_IOV_VIOLATION_LOG2_MASK 0x00000004L +#define SDMA7_PUB_REG_TYPE3__RESERVED_MASK 0xFFFFFFF8L +//SDMA7_MMHUB_CNTL +#define SDMA7_MMHUB_CNTL__UNIT_ID__SHIFT 0x0 +#define SDMA7_MMHUB_CNTL__UNIT_ID_MASK 0x0000003FL +//SDMA7_CONTEXT_GROUP_BOUNDARY +#define SDMA7_CONTEXT_GROUP_BOUNDARY__RESERVED__SHIFT 0x0 +#define SDMA7_CONTEXT_GROUP_BOUNDARY__RESERVED_MASK 0xFFFFFFFFL +//SDMA7_POWER_CNTL +#define SDMA7_POWER_CNTL__MEM_POWER_OVERRIDE__SHIFT 0x8 +#define SDMA7_POWER_CNTL__MEM_POWER_LS_EN__SHIFT 0x9 +#define SDMA7_POWER_CNTL__MEM_POWER_DS_EN__SHIFT 0xa +#define SDMA7_POWER_CNTL__MEM_POWER_SD_EN__SHIFT 0xb +#define SDMA7_POWER_CNTL__MEM_POWER_DELAY__SHIFT 0xc +#define SDMA7_POWER_CNTL__MEM_POWER_OVERRIDE_MASK 0x00000100L +#define SDMA7_POWER_CNTL__MEM_POWER_LS_EN_MASK 0x00000200L +#define SDMA7_POWER_CNTL__MEM_POWER_DS_EN_MASK 0x00000400L +#define SDMA7_POWER_CNTL__MEM_POWER_SD_EN_MASK 0x00000800L +#define SDMA7_POWER_CNTL__MEM_POWER_DELAY_MASK 0x003FF000L +//SDMA7_CLK_CTRL +#define SDMA7_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define SDMA7_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define SDMA7_CLK_CTRL__RESERVED__SHIFT 0xc +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE4__SHIFT 0x1b +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE3__SHIFT 0x1c +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE2__SHIFT 0x1d +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE1__SHIFT 0x1e +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE0__SHIFT 0x1f +#define SDMA7_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define SDMA7_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define SDMA7_CLK_CTRL__RESERVED_MASK 0x00FFF000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE4_MASK 0x08000000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE3_MASK 0x10000000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE2_MASK 0x20000000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE1_MASK 0x40000000L +#define SDMA7_CLK_CTRL__SOFT_OVERRIDE0_MASK 0x80000000L +//SDMA7_CNTL +#define SDMA7_CNTL__TRAP_ENABLE__SHIFT 0x0 +#define SDMA7_CNTL__UTC_L1_ENABLE__SHIFT 0x1 +#define SDMA7_CNTL__SEM_WAIT_INT_ENABLE__SHIFT 0x2 +#define SDMA7_CNTL__DATA_SWAP_ENABLE__SHIFT 0x3 +#define SDMA7_CNTL__FENCE_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_CNTL__MIDCMD_PREEMPT_ENABLE__SHIFT 0x5 +#define SDMA7_CNTL__MIDCMD_WORLDSWITCH_ENABLE__SHIFT 0x11 +#define SDMA7_CNTL__AUTO_CTXSW_ENABLE__SHIFT 0x12 +#define SDMA7_CNTL__CTXEMPTY_INT_ENABLE__SHIFT 0x1c +#define SDMA7_CNTL__FROZEN_INT_ENABLE__SHIFT 0x1d +#define SDMA7_CNTL__IB_PREEMPT_INT_ENABLE__SHIFT 0x1e +#define SDMA7_CNTL__TRAP_ENABLE_MASK 0x00000001L +#define SDMA7_CNTL__UTC_L1_ENABLE_MASK 0x00000002L +#define SDMA7_CNTL__SEM_WAIT_INT_ENABLE_MASK 0x00000004L +#define SDMA7_CNTL__DATA_SWAP_ENABLE_MASK 0x00000008L +#define SDMA7_CNTL__FENCE_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_CNTL__MIDCMD_PREEMPT_ENABLE_MASK 0x00000020L +#define SDMA7_CNTL__MIDCMD_WORLDSWITCH_ENABLE_MASK 0x00020000L +#define SDMA7_CNTL__AUTO_CTXSW_ENABLE_MASK 0x00040000L +#define SDMA7_CNTL__CTXEMPTY_INT_ENABLE_MASK 0x10000000L +#define SDMA7_CNTL__FROZEN_INT_ENABLE_MASK 0x20000000L +#define SDMA7_CNTL__IB_PREEMPT_INT_ENABLE_MASK 0x40000000L +//SDMA7_CHICKEN_BITS +#define SDMA7_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE__SHIFT 0x0 +#define SDMA7_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE__SHIFT 0x1 +#define SDMA7_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE__SHIFT 0x2 +#define SDMA7_CHICKEN_BITS__WRITE_BURST_LENGTH__SHIFT 0x8 +#define SDMA7_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE__SHIFT 0xa +#define SDMA7_CHICKEN_BITS__COPY_OVERLAP_ENABLE__SHIFT 0x10 +#define SDMA7_CHICKEN_BITS__RAW_CHECK_ENABLE__SHIFT 0x11 +#define SDMA7_CHICKEN_BITS__SRBM_POLL_RETRYING__SHIFT 0x14 +#define SDMA7_CHICKEN_BITS__CG_STATUS_OUTPUT__SHIFT 0x17 +#define SDMA7_CHICKEN_BITS__TIME_BASED_QOS__SHIFT 0x19 +#define SDMA7_CHICKEN_BITS__CE_AFIFO_WATERMARK__SHIFT 0x1a +#define SDMA7_CHICKEN_BITS__CE_DFIFO_WATERMARK__SHIFT 0x1c +#define SDMA7_CHICKEN_BITS__CE_LFIFO_WATERMARK__SHIFT 0x1e +#define SDMA7_CHICKEN_BITS__COPY_EFFICIENCY_ENABLE_MASK 0x00000001L +#define SDMA7_CHICKEN_BITS__STALL_ON_TRANS_FULL_ENABLE_MASK 0x00000002L +#define SDMA7_CHICKEN_BITS__STALL_ON_NO_FREE_DATA_BUFFER_ENABLE_MASK 0x00000004L +#define SDMA7_CHICKEN_BITS__WRITE_BURST_LENGTH_MASK 0x00000300L +#define SDMA7_CHICKEN_BITS__WRITE_BURST_WAIT_CYCLE_MASK 0x00001C00L +#define SDMA7_CHICKEN_BITS__COPY_OVERLAP_ENABLE_MASK 0x00010000L +#define SDMA7_CHICKEN_BITS__RAW_CHECK_ENABLE_MASK 0x00020000L +#define SDMA7_CHICKEN_BITS__SRBM_POLL_RETRYING_MASK 0x00100000L +#define SDMA7_CHICKEN_BITS__CG_STATUS_OUTPUT_MASK 0x00800000L +#define SDMA7_CHICKEN_BITS__TIME_BASED_QOS_MASK 0x02000000L +#define SDMA7_CHICKEN_BITS__CE_AFIFO_WATERMARK_MASK 0x0C000000L +#define SDMA7_CHICKEN_BITS__CE_DFIFO_WATERMARK_MASK 0x30000000L +#define SDMA7_CHICKEN_BITS__CE_LFIFO_WATERMARK_MASK 0xC0000000L +//SDMA7_GB_ADDR_CONFIG +#define SDMA7_GB_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define SDMA7_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA7_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA7_GB_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define SDMA7_GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA7_GB_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define SDMA7_GB_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA7_GB_ADDR_CONFIG__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA7_GB_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define SDMA7_GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA7_GB_ADDR_CONFIG_READ +#define SDMA7_GB_ADDR_CONFIG_READ__NUM_PIPES__SHIFT 0x0 +#define SDMA7_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define SDMA7_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE__SHIFT 0x8 +#define SDMA7_GB_ADDR_CONFIG_READ__NUM_BANKS__SHIFT 0xc +#define SDMA7_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES__SHIFT 0x13 +#define SDMA7_GB_ADDR_CONFIG_READ__NUM_PIPES_MASK 0x00000007L +#define SDMA7_GB_ADDR_CONFIG_READ__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define SDMA7_GB_ADDR_CONFIG_READ__BANK_INTERLEAVE_SIZE_MASK 0x00000700L +#define SDMA7_GB_ADDR_CONFIG_READ__NUM_BANKS_MASK 0x00007000L +#define SDMA7_GB_ADDR_CONFIG_READ__NUM_SHADER_ENGINES_MASK 0x00180000L +//SDMA7_RB_RPTR_FETCH_HI +#define SDMA7_RB_RPTR_FETCH_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RB_RPTR_FETCH_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_SEM_WAIT_FAIL_TIMER_CNTL +#define SDMA7_SEM_WAIT_FAIL_TIMER_CNTL__TIMER__SHIFT 0x0 +#define SDMA7_SEM_WAIT_FAIL_TIMER_CNTL__TIMER_MASK 0xFFFFFFFFL +//SDMA7_RB_RPTR_FETCH +#define SDMA7_RB_RPTR_FETCH__OFFSET__SHIFT 0x2 +#define SDMA7_RB_RPTR_FETCH__OFFSET_MASK 0xFFFFFFFCL +//SDMA7_IB_OFFSET_FETCH +#define SDMA7_IB_OFFSET_FETCH__OFFSET__SHIFT 0x2 +#define SDMA7_IB_OFFSET_FETCH__OFFSET_MASK 0x003FFFFCL +//SDMA7_PROGRAM +#define SDMA7_PROGRAM__STREAM__SHIFT 0x0 +#define SDMA7_PROGRAM__STREAM_MASK 0xFFFFFFFFL +//SDMA7_STATUS_REG +#define SDMA7_STATUS_REG__IDLE__SHIFT 0x0 +#define SDMA7_STATUS_REG__REG_IDLE__SHIFT 0x1 +#define SDMA7_STATUS_REG__RB_EMPTY__SHIFT 0x2 +#define SDMA7_STATUS_REG__RB_FULL__SHIFT 0x3 +#define SDMA7_STATUS_REG__RB_CMD_IDLE__SHIFT 0x4 +#define SDMA7_STATUS_REG__RB_CMD_FULL__SHIFT 0x5 +#define SDMA7_STATUS_REG__IB_CMD_IDLE__SHIFT 0x6 +#define SDMA7_STATUS_REG__IB_CMD_FULL__SHIFT 0x7 +#define SDMA7_STATUS_REG__BLOCK_IDLE__SHIFT 0x8 +#define SDMA7_STATUS_REG__INSIDE_IB__SHIFT 0x9 +#define SDMA7_STATUS_REG__EX_IDLE__SHIFT 0xa +#define SDMA7_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE__SHIFT 0xb +#define SDMA7_STATUS_REG__PACKET_READY__SHIFT 0xc +#define SDMA7_STATUS_REG__MC_WR_IDLE__SHIFT 0xd +#define SDMA7_STATUS_REG__SRBM_IDLE__SHIFT 0xe +#define SDMA7_STATUS_REG__CONTEXT_EMPTY__SHIFT 0xf +#define SDMA7_STATUS_REG__DELTA_RPTR_FULL__SHIFT 0x10 +#define SDMA7_STATUS_REG__RB_MC_RREQ_IDLE__SHIFT 0x11 +#define SDMA7_STATUS_REG__IB_MC_RREQ_IDLE__SHIFT 0x12 +#define SDMA7_STATUS_REG__MC_RD_IDLE__SHIFT 0x13 +#define SDMA7_STATUS_REG__DELTA_RPTR_EMPTY__SHIFT 0x14 +#define SDMA7_STATUS_REG__MC_RD_RET_STALL__SHIFT 0x15 +#define SDMA7_STATUS_REG__MC_RD_NO_POLL_IDLE__SHIFT 0x16 +#define SDMA7_STATUS_REG__PREV_CMD_IDLE__SHIFT 0x19 +#define SDMA7_STATUS_REG__SEM_IDLE__SHIFT 0x1a +#define SDMA7_STATUS_REG__SEM_REQ_STALL__SHIFT 0x1b +#define SDMA7_STATUS_REG__SEM_RESP_STATE__SHIFT 0x1c +#define SDMA7_STATUS_REG__INT_IDLE__SHIFT 0x1e +#define SDMA7_STATUS_REG__INT_REQ_STALL__SHIFT 0x1f +#define SDMA7_STATUS_REG__IDLE_MASK 0x00000001L +#define SDMA7_STATUS_REG__REG_IDLE_MASK 0x00000002L +#define SDMA7_STATUS_REG__RB_EMPTY_MASK 0x00000004L +#define SDMA7_STATUS_REG__RB_FULL_MASK 0x00000008L +#define SDMA7_STATUS_REG__RB_CMD_IDLE_MASK 0x00000010L +#define SDMA7_STATUS_REG__RB_CMD_FULL_MASK 0x00000020L +#define SDMA7_STATUS_REG__IB_CMD_IDLE_MASK 0x00000040L +#define SDMA7_STATUS_REG__IB_CMD_FULL_MASK 0x00000080L +#define SDMA7_STATUS_REG__BLOCK_IDLE_MASK 0x00000100L +#define SDMA7_STATUS_REG__INSIDE_IB_MASK 0x00000200L +#define SDMA7_STATUS_REG__EX_IDLE_MASK 0x00000400L +#define SDMA7_STATUS_REG__EX_IDLE_POLL_TIMER_EXPIRE_MASK 0x00000800L +#define SDMA7_STATUS_REG__PACKET_READY_MASK 0x00001000L +#define SDMA7_STATUS_REG__MC_WR_IDLE_MASK 0x00002000L +#define SDMA7_STATUS_REG__SRBM_IDLE_MASK 0x00004000L +#define SDMA7_STATUS_REG__CONTEXT_EMPTY_MASK 0x00008000L +#define SDMA7_STATUS_REG__DELTA_RPTR_FULL_MASK 0x00010000L +#define SDMA7_STATUS_REG__RB_MC_RREQ_IDLE_MASK 0x00020000L +#define SDMA7_STATUS_REG__IB_MC_RREQ_IDLE_MASK 0x00040000L +#define SDMA7_STATUS_REG__MC_RD_IDLE_MASK 0x00080000L +#define SDMA7_STATUS_REG__DELTA_RPTR_EMPTY_MASK 0x00100000L +#define SDMA7_STATUS_REG__MC_RD_RET_STALL_MASK 0x00200000L +#define SDMA7_STATUS_REG__MC_RD_NO_POLL_IDLE_MASK 0x00400000L +#define SDMA7_STATUS_REG__PREV_CMD_IDLE_MASK 0x02000000L +#define SDMA7_STATUS_REG__SEM_IDLE_MASK 0x04000000L +#define SDMA7_STATUS_REG__SEM_REQ_STALL_MASK 0x08000000L +#define SDMA7_STATUS_REG__SEM_RESP_STATE_MASK 0x30000000L +#define SDMA7_STATUS_REG__INT_IDLE_MASK 0x40000000L +#define SDMA7_STATUS_REG__INT_REQ_STALL_MASK 0x80000000L +//SDMA7_STATUS1_REG +#define SDMA7_STATUS1_REG__CE_WREQ_IDLE__SHIFT 0x0 +#define SDMA7_STATUS1_REG__CE_WR_IDLE__SHIFT 0x1 +#define SDMA7_STATUS1_REG__CE_SPLIT_IDLE__SHIFT 0x2 +#define SDMA7_STATUS1_REG__CE_RREQ_IDLE__SHIFT 0x3 +#define SDMA7_STATUS1_REG__CE_OUT_IDLE__SHIFT 0x4 +#define SDMA7_STATUS1_REG__CE_IN_IDLE__SHIFT 0x5 +#define SDMA7_STATUS1_REG__CE_DST_IDLE__SHIFT 0x6 +#define SDMA7_STATUS1_REG__CE_CMD_IDLE__SHIFT 0x9 +#define SDMA7_STATUS1_REG__CE_AFIFO_FULL__SHIFT 0xa +#define SDMA7_STATUS1_REG__CE_INFO_FULL__SHIFT 0xd +#define SDMA7_STATUS1_REG__CE_INFO1_FULL__SHIFT 0xe +#define SDMA7_STATUS1_REG__EX_START__SHIFT 0xf +#define SDMA7_STATUS1_REG__CE_RD_STALL__SHIFT 0x11 +#define SDMA7_STATUS1_REG__CE_WR_STALL__SHIFT 0x12 +#define SDMA7_STATUS1_REG__CE_WREQ_IDLE_MASK 0x00000001L +#define SDMA7_STATUS1_REG__CE_WR_IDLE_MASK 0x00000002L +#define SDMA7_STATUS1_REG__CE_SPLIT_IDLE_MASK 0x00000004L +#define SDMA7_STATUS1_REG__CE_RREQ_IDLE_MASK 0x00000008L +#define SDMA7_STATUS1_REG__CE_OUT_IDLE_MASK 0x00000010L +#define SDMA7_STATUS1_REG__CE_IN_IDLE_MASK 0x00000020L +#define SDMA7_STATUS1_REG__CE_DST_IDLE_MASK 0x00000040L +#define SDMA7_STATUS1_REG__CE_CMD_IDLE_MASK 0x00000200L +#define SDMA7_STATUS1_REG__CE_AFIFO_FULL_MASK 0x00000400L +#define SDMA7_STATUS1_REG__CE_INFO_FULL_MASK 0x00002000L +#define SDMA7_STATUS1_REG__CE_INFO1_FULL_MASK 0x00004000L +#define SDMA7_STATUS1_REG__EX_START_MASK 0x00008000L +#define SDMA7_STATUS1_REG__CE_RD_STALL_MASK 0x00020000L +#define SDMA7_STATUS1_REG__CE_WR_STALL_MASK 0x00040000L +//SDMA7_RD_BURST_CNTL +#define SDMA7_RD_BURST_CNTL__RD_BURST__SHIFT 0x0 +#define SDMA7_RD_BURST_CNTL__CMD_BUFFER_RD_BURST__SHIFT 0x2 +#define SDMA7_RD_BURST_CNTL__RD_BURST_MASK 0x00000003L +#define SDMA7_RD_BURST_CNTL__CMD_BUFFER_RD_BURST_MASK 0x0000000CL +//SDMA7_HBM_PAGE_CONFIG +#define SDMA7_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT__SHIFT 0x0 +#define SDMA7_HBM_PAGE_CONFIG__PAGE_SIZE_EXPONENT_MASK 0x00000001L +//SDMA7_UCODE_CHECKSUM +#define SDMA7_UCODE_CHECKSUM__DATA__SHIFT 0x0 +#define SDMA7_UCODE_CHECKSUM__DATA_MASK 0xFFFFFFFFL +//SDMA7_F32_CNTL +#define SDMA7_F32_CNTL__HALT__SHIFT 0x0 +#define SDMA7_F32_CNTL__STEP__SHIFT 0x1 +#define SDMA7_F32_CNTL__HALT_MASK 0x00000001L +#define SDMA7_F32_CNTL__STEP_MASK 0x00000002L +//SDMA7_FREEZE +#define SDMA7_FREEZE__PREEMPT__SHIFT 0x0 +#define SDMA7_FREEZE__FREEZE__SHIFT 0x4 +#define SDMA7_FREEZE__FROZEN__SHIFT 0x5 +#define SDMA7_FREEZE__F32_FREEZE__SHIFT 0x6 +#define SDMA7_FREEZE__PREEMPT_MASK 0x00000001L +#define SDMA7_FREEZE__FREEZE_MASK 0x00000010L +#define SDMA7_FREEZE__FROZEN_MASK 0x00000020L +#define SDMA7_FREEZE__F32_FREEZE_MASK 0x00000040L +//SDMA7_PHASE0_QUANTUM +#define SDMA7_PHASE0_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA7_PHASE0_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA7_PHASE0_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA7_PHASE0_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA7_PHASE0_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA7_PHASE0_QUANTUM__PREFER_MASK 0x40000000L +//SDMA7_PHASE1_QUANTUM +#define SDMA7_PHASE1_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA7_PHASE1_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA7_PHASE1_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA7_PHASE1_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA7_PHASE1_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA7_PHASE1_QUANTUM__PREFER_MASK 0x40000000L +//SDMA7_EDC_CONFIG +#define SDMA7_EDC_CONFIG__DIS_EDC__SHIFT 0x1 +#define SDMA7_EDC_CONFIG__ECC_INT_ENABLE__SHIFT 0x2 +#define SDMA7_EDC_CONFIG__DIS_EDC_MASK 0x00000002L +#define SDMA7_EDC_CONFIG__ECC_INT_ENABLE_MASK 0x00000004L +//SDMA7_BA_THRESHOLD +#define SDMA7_BA_THRESHOLD__READ_THRES__SHIFT 0x0 +#define SDMA7_BA_THRESHOLD__WRITE_THRES__SHIFT 0x10 +#define SDMA7_BA_THRESHOLD__READ_THRES_MASK 0x000003FFL +#define SDMA7_BA_THRESHOLD__WRITE_THRES_MASK 0x03FF0000L +//SDMA7_ID +#define SDMA7_ID__DEVICE_ID__SHIFT 0x0 +#define SDMA7_ID__DEVICE_ID_MASK 0x000000FFL +//SDMA7_VERSION +#define SDMA7_VERSION__MINVER__SHIFT 0x0 +#define SDMA7_VERSION__MAJVER__SHIFT 0x8 +#define SDMA7_VERSION__REV__SHIFT 0x10 +#define SDMA7_VERSION__MINVER_MASK 0x0000007FL +#define SDMA7_VERSION__MAJVER_MASK 0x00007F00L +#define SDMA7_VERSION__REV_MASK 0x003F0000L +//SDMA7_EDC_COUNTER +#define SDMA7_EDC_COUNTER__SDMA_UCODE_BUF_SED__SHIFT 0x0 +#define SDMA7_EDC_COUNTER__SDMA_RB_CMD_BUF_SED__SHIFT 0x2 +#define SDMA7_EDC_COUNTER__SDMA_IB_CMD_BUF_SED__SHIFT 0x3 +#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED__SHIFT 0x4 +#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED__SHIFT 0x5 +#define SDMA7_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED__SHIFT 0x6 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED__SHIFT 0x7 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED__SHIFT 0x8 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED__SHIFT 0x9 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED__SHIFT 0xa +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED__SHIFT 0xb +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED__SHIFT 0xc +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED__SHIFT 0xd +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED__SHIFT 0xe +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED__SHIFT 0xf +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED__SHIFT 0x10 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED__SHIFT 0x11 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED__SHIFT 0x12 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED__SHIFT 0x13 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED__SHIFT 0x14 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED__SHIFT 0x15 +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED__SHIFT 0x16 +#define SDMA7_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED__SHIFT 0x17 +#define SDMA7_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED__SHIFT 0x18 +#define SDMA7_EDC_COUNTER__SDMA_UCODE_BUF_SED_MASK 0x00000001L +#define SDMA7_EDC_COUNTER__SDMA_RB_CMD_BUF_SED_MASK 0x00000004L +#define SDMA7_EDC_COUNTER__SDMA_IB_CMD_BUF_SED_MASK 0x00000008L +#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RD_FIFO_SED_MASK 0x00000010L +#define SDMA7_EDC_COUNTER__SDMA_UTCL1_RDBST_FIFO_SED_MASK 0x00000020L +#define SDMA7_EDC_COUNTER__SDMA_DATA_LUT_FIFO_SED_MASK 0x00000040L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF0_SED_MASK 0x00000080L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF1_SED_MASK 0x00000100L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF2_SED_MASK 0x00000200L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF3_SED_MASK 0x00000400L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF4_SED_MASK 0x00000800L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF5_SED_MASK 0x00001000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF6_SED_MASK 0x00002000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF7_SED_MASK 0x00004000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF8_SED_MASK 0x00008000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF9_SED_MASK 0x00010000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF10_SED_MASK 0x00020000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF11_SED_MASK 0x00040000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF12_SED_MASK 0x00080000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF13_SED_MASK 0x00100000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF14_SED_MASK 0x00200000L +#define SDMA7_EDC_COUNTER__SDMA_MBANK_DATA_BUF15_SED_MASK 0x00400000L +#define SDMA7_EDC_COUNTER__SDMA_SPLIT_DAT_BUF_SED_MASK 0x00800000L +#define SDMA7_EDC_COUNTER__SDMA_MC_WR_ADDR_FIFO_SED_MASK 0x01000000L +//SDMA7_EDC_COUNTER_CLEAR +#define SDMA7_EDC_COUNTER_CLEAR__DUMMY__SHIFT 0x0 +#define SDMA7_EDC_COUNTER_CLEAR__DUMMY_MASK 0x00000001L +//SDMA7_STATUS2_REG +#define SDMA7_STATUS2_REG__ID__SHIFT 0x0 +#define SDMA7_STATUS2_REG__F32_INSTR_PTR__SHIFT 0x3 +#define SDMA7_STATUS2_REG__CMD_OP__SHIFT 0x10 +#define SDMA7_STATUS2_REG__ID_MASK 0x00000007L +#define SDMA7_STATUS2_REG__F32_INSTR_PTR_MASK 0x0000FFF8L +#define SDMA7_STATUS2_REG__CMD_OP_MASK 0xFFFF0000L +//SDMA7_ATOMIC_CNTL +#define SDMA7_ATOMIC_CNTL__LOOP_TIMER__SHIFT 0x0 +#define SDMA7_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE__SHIFT 0x1f +#define SDMA7_ATOMIC_CNTL__LOOP_TIMER_MASK 0x7FFFFFFFL +#define SDMA7_ATOMIC_CNTL__ATOMIC_RTN_INT_ENABLE_MASK 0x80000000L +//SDMA7_ATOMIC_PREOP_LO +#define SDMA7_ATOMIC_PREOP_LO__DATA__SHIFT 0x0 +#define SDMA7_ATOMIC_PREOP_LO__DATA_MASK 0xFFFFFFFFL +//SDMA7_ATOMIC_PREOP_HI +#define SDMA7_ATOMIC_PREOP_HI__DATA__SHIFT 0x0 +#define SDMA7_ATOMIC_PREOP_HI__DATA_MASK 0xFFFFFFFFL +//SDMA7_UTCL1_CNTL +#define SDMA7_UTCL1_CNTL__REDO_ENABLE__SHIFT 0x0 +#define SDMA7_UTCL1_CNTL__REDO_DELAY__SHIFT 0x1 +#define SDMA7_UTCL1_CNTL__REDO_WATERMK__SHIFT 0xb +#define SDMA7_UTCL1_CNTL__INVACK_DELAY__SHIFT 0xe +#define SDMA7_UTCL1_CNTL__REQL2_CREDIT__SHIFT 0x18 +#define SDMA7_UTCL1_CNTL__VADDR_WATERMK__SHIFT 0x1d +#define SDMA7_UTCL1_CNTL__REDO_ENABLE_MASK 0x00000001L +#define SDMA7_UTCL1_CNTL__REDO_DELAY_MASK 0x000007FEL +#define SDMA7_UTCL1_CNTL__REDO_WATERMK_MASK 0x00003800L +#define SDMA7_UTCL1_CNTL__INVACK_DELAY_MASK 0x00FFC000L +#define SDMA7_UTCL1_CNTL__REQL2_CREDIT_MASK 0x1F000000L +#define SDMA7_UTCL1_CNTL__VADDR_WATERMK_MASK 0xE0000000L +//SDMA7_UTCL1_WATERMK +#define SDMA7_UTCL1_WATERMK__REQMC_WATERMK__SHIFT 0x0 +#define SDMA7_UTCL1_WATERMK__REQPG_WATERMK__SHIFT 0x9 +#define SDMA7_UTCL1_WATERMK__INVREQ_WATERMK__SHIFT 0x11 +#define SDMA7_UTCL1_WATERMK__XNACK_WATERMK__SHIFT 0x19 +#define SDMA7_UTCL1_WATERMK__REQMC_WATERMK_MASK 0x000001FFL +#define SDMA7_UTCL1_WATERMK__REQPG_WATERMK_MASK 0x0001FE00L +#define SDMA7_UTCL1_WATERMK__INVREQ_WATERMK_MASK 0x01FE0000L +#define SDMA7_UTCL1_WATERMK__XNACK_WATERMK_MASK 0xFE000000L +//SDMA7_UTCL1_RD_STATUS +#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA7_UTCL1_RD_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA7_UTCL1_RD_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA7_UTCL1_RD_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA7_UTCL1_RD_STATUS__CE_L1_STALL__SHIFT 0x15 +#define SDMA7_UTCL1_RD_STATUS__NEXT_RD_VECTOR__SHIFT 0x16 +#define SDMA7_UTCL1_RD_STATUS__MERGE_STATE__SHIFT 0x1a +#define SDMA7_UTCL1_RD_STATUS__ADDR_RD_RTR__SHIFT 0x1d +#define SDMA7_UTCL1_RD_STATUS__WPTR_POLLING__SHIFT 0x1e +#define SDMA7_UTCL1_RD_STATUS__INVREQ_SIZE__SHIFT 0x1f +#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA7_UTCL1_RD_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA7_UTCL1_RD_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA7_UTCL1_RD_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA7_UTCL1_RD_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA7_UTCL1_RD_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA7_UTCL1_RD_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA7_UTCL1_RD_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA7_UTCL1_RD_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA7_UTCL1_RD_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA7_UTCL1_RD_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA7_UTCL1_RD_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA7_UTCL1_RD_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA7_UTCL1_RD_STATUS__CE_L1_STALL_MASK 0x00200000L +#define SDMA7_UTCL1_RD_STATUS__NEXT_RD_VECTOR_MASK 0x03C00000L +#define SDMA7_UTCL1_RD_STATUS__MERGE_STATE_MASK 0x1C000000L +#define SDMA7_UTCL1_RD_STATUS__ADDR_RD_RTR_MASK 0x20000000L +#define SDMA7_UTCL1_RD_STATUS__WPTR_POLLING_MASK 0x40000000L +#define SDMA7_UTCL1_RD_STATUS__INVREQ_SIZE_MASK 0x80000000L +//SDMA7_UTCL1_WR_STATUS +#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY__SHIFT 0x0 +#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY__SHIFT 0x1 +#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY__SHIFT 0x2 +#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY__SHIFT 0x3 +#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY__SHIFT 0x4 +#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY__SHIFT 0x5 +#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY__SHIFT 0x6 +#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY__SHIFT 0x7 +#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY__SHIFT 0x8 +#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL__SHIFT 0x9 +#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL__SHIFT 0xa +#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL__SHIFT 0xb +#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL__SHIFT 0xc +#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL__SHIFT 0xd +#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL__SHIFT 0xe +#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL__SHIFT 0xf +#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL__SHIFT 0x10 +#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL__SHIFT 0x11 +#define SDMA7_UTCL1_WR_STATUS__PAGE_FAULT__SHIFT 0x12 +#define SDMA7_UTCL1_WR_STATUS__PAGE_NULL__SHIFT 0x13 +#define SDMA7_UTCL1_WR_STATUS__REQL2_IDLE__SHIFT 0x14 +#define SDMA7_UTCL1_WR_STATUS__F32_WR_RTR__SHIFT 0x15 +#define SDMA7_UTCL1_WR_STATUS__NEXT_WR_VECTOR__SHIFT 0x16 +#define SDMA7_UTCL1_WR_STATUS__MERGE_STATE__SHIFT 0x19 +#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY__SHIFT 0x1c +#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL__SHIFT 0x1d +#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY__SHIFT 0x1e +#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL__SHIFT 0x1f +#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_EMPTY_MASK 0x00000001L +#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_EMPTY_MASK 0x00000002L +#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_EMPTY_MASK 0x00000004L +#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_EMPTY_MASK 0x00000008L +#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_EMPTY_MASK 0x00000010L +#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_EMPTY_MASK 0x00000020L +#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_EMPTY_MASK 0x00000040L +#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_EMPTY_MASK 0x00000080L +#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_EMPTY_MASK 0x00000100L +#define SDMA7_UTCL1_WR_STATUS__RQMC_RET_ADDR_FIFO_FULL_MASK 0x00000200L +#define SDMA7_UTCL1_WR_STATUS__RQMC_REQ_FIFO_FULL_MASK 0x00000400L +#define SDMA7_UTCL1_WR_STATUS__RTPG_RET_BUF_FULL_MASK 0x00000800L +#define SDMA7_UTCL1_WR_STATUS__RTPG_VADDR_FIFO_FULL_MASK 0x00001000L +#define SDMA7_UTCL1_WR_STATUS__RQPG_HEAD_VIRT_FIFO_FULL_MASK 0x00002000L +#define SDMA7_UTCL1_WR_STATUS__RQPG_REDO_FIFO_FULL_MASK 0x00004000L +#define SDMA7_UTCL1_WR_STATUS__RQPG_REQPAGE_FIFO_FULL_MASK 0x00008000L +#define SDMA7_UTCL1_WR_STATUS__RQPG_XNACK_FIFO_FULL_MASK 0x00010000L +#define SDMA7_UTCL1_WR_STATUS__RQPG_INVREQ_FIFO_FULL_MASK 0x00020000L +#define SDMA7_UTCL1_WR_STATUS__PAGE_FAULT_MASK 0x00040000L +#define SDMA7_UTCL1_WR_STATUS__PAGE_NULL_MASK 0x00080000L +#define SDMA7_UTCL1_WR_STATUS__REQL2_IDLE_MASK 0x00100000L +#define SDMA7_UTCL1_WR_STATUS__F32_WR_RTR_MASK 0x00200000L +#define SDMA7_UTCL1_WR_STATUS__NEXT_WR_VECTOR_MASK 0x01C00000L +#define SDMA7_UTCL1_WR_STATUS__MERGE_STATE_MASK 0x0E000000L +#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_EMPTY_MASK 0x10000000L +#define SDMA7_UTCL1_WR_STATUS__RPTR_DATA_FIFO_FULL_MASK 0x20000000L +#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_EMPTY_MASK 0x40000000L +#define SDMA7_UTCL1_WR_STATUS__WRREQ_DATA_FIFO_FULL_MASK 0x80000000L +//SDMA7_UTCL1_INV0 +#define SDMA7_UTCL1_INV0__INV_MIDDLE__SHIFT 0x0 +#define SDMA7_UTCL1_INV0__RD_TIMEOUT__SHIFT 0x1 +#define SDMA7_UTCL1_INV0__WR_TIMEOUT__SHIFT 0x2 +#define SDMA7_UTCL1_INV0__RD_IN_INVADR__SHIFT 0x3 +#define SDMA7_UTCL1_INV0__WR_IN_INVADR__SHIFT 0x4 +#define SDMA7_UTCL1_INV0__PAGE_NULL_SW__SHIFT 0x5 +#define SDMA7_UTCL1_INV0__XNACK_IS_INVADR__SHIFT 0x6 +#define SDMA7_UTCL1_INV0__INVREQ_ENABLE__SHIFT 0x7 +#define SDMA7_UTCL1_INV0__NACK_TIMEOUT_SW__SHIFT 0x8 +#define SDMA7_UTCL1_INV0__NFLUSH_INV_IDLE__SHIFT 0x9 +#define SDMA7_UTCL1_INV0__FLUSH_INV_IDLE__SHIFT 0xa +#define SDMA7_UTCL1_INV0__INV_FLUSHTYPE__SHIFT 0xb +#define SDMA7_UTCL1_INV0__INV_VMID_VEC__SHIFT 0xc +#define SDMA7_UTCL1_INV0__INV_ADDR_HI__SHIFT 0x1c +#define SDMA7_UTCL1_INV0__INV_MIDDLE_MASK 0x00000001L +#define SDMA7_UTCL1_INV0__RD_TIMEOUT_MASK 0x00000002L +#define SDMA7_UTCL1_INV0__WR_TIMEOUT_MASK 0x00000004L +#define SDMA7_UTCL1_INV0__RD_IN_INVADR_MASK 0x00000008L +#define SDMA7_UTCL1_INV0__WR_IN_INVADR_MASK 0x00000010L +#define SDMA7_UTCL1_INV0__PAGE_NULL_SW_MASK 0x00000020L +#define SDMA7_UTCL1_INV0__XNACK_IS_INVADR_MASK 0x00000040L +#define SDMA7_UTCL1_INV0__INVREQ_ENABLE_MASK 0x00000080L +#define SDMA7_UTCL1_INV0__NACK_TIMEOUT_SW_MASK 0x00000100L +#define SDMA7_UTCL1_INV0__NFLUSH_INV_IDLE_MASK 0x00000200L +#define SDMA7_UTCL1_INV0__FLUSH_INV_IDLE_MASK 0x00000400L +#define SDMA7_UTCL1_INV0__INV_FLUSHTYPE_MASK 0x00000800L +#define SDMA7_UTCL1_INV0__INV_VMID_VEC_MASK 0x0FFFF000L +#define SDMA7_UTCL1_INV0__INV_ADDR_HI_MASK 0xF0000000L +//SDMA7_UTCL1_INV1 +#define SDMA7_UTCL1_INV1__INV_ADDR_LO__SHIFT 0x0 +#define SDMA7_UTCL1_INV1__INV_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA7_UTCL1_INV2 +#define SDMA7_UTCL1_INV2__INV_NFLUSH_VMID_VEC__SHIFT 0x0 +#define SDMA7_UTCL1_INV2__INV_NFLUSH_VMID_VEC_MASK 0xFFFFFFFFL +//SDMA7_UTCL1_RD_XNACK0 +#define SDMA7_UTCL1_RD_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA7_UTCL1_RD_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA7_UTCL1_RD_XNACK1 +#define SDMA7_UTCL1_RD_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA7_UTCL1_RD_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA7_UTCL1_RD_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA7_UTCL1_RD_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA7_UTCL1_RD_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA7_UTCL1_RD_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA7_UTCL1_RD_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA7_UTCL1_RD_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA7_UTCL1_WR_XNACK0 +#define SDMA7_UTCL1_WR_XNACK0__XNACK_ADDR_LO__SHIFT 0x0 +#define SDMA7_UTCL1_WR_XNACK0__XNACK_ADDR_LO_MASK 0xFFFFFFFFL +//SDMA7_UTCL1_WR_XNACK1 +#define SDMA7_UTCL1_WR_XNACK1__XNACK_ADDR_HI__SHIFT 0x0 +#define SDMA7_UTCL1_WR_XNACK1__XNACK_VMID__SHIFT 0x4 +#define SDMA7_UTCL1_WR_XNACK1__XNACK_VECTOR__SHIFT 0x8 +#define SDMA7_UTCL1_WR_XNACK1__IS_XNACK__SHIFT 0x1a +#define SDMA7_UTCL1_WR_XNACK1__XNACK_ADDR_HI_MASK 0x0000000FL +#define SDMA7_UTCL1_WR_XNACK1__XNACK_VMID_MASK 0x000000F0L +#define SDMA7_UTCL1_WR_XNACK1__XNACK_VECTOR_MASK 0x03FFFF00L +#define SDMA7_UTCL1_WR_XNACK1__IS_XNACK_MASK 0x0C000000L +//SDMA7_UTCL1_TIMEOUT +#define SDMA7_UTCL1_TIMEOUT__RD_XNACK_LIMIT__SHIFT 0x0 +#define SDMA7_UTCL1_TIMEOUT__WR_XNACK_LIMIT__SHIFT 0x10 +#define SDMA7_UTCL1_TIMEOUT__RD_XNACK_LIMIT_MASK 0x0000FFFFL +#define SDMA7_UTCL1_TIMEOUT__WR_XNACK_LIMIT_MASK 0xFFFF0000L +//SDMA7_UTCL1_PAGE +#define SDMA7_UTCL1_PAGE__VM_HOLE__SHIFT 0x0 +#define SDMA7_UTCL1_PAGE__REQ_TYPE__SHIFT 0x1 +#define SDMA7_UTCL1_PAGE__USE_MTYPE__SHIFT 0x6 +#define SDMA7_UTCL1_PAGE__USE_PT_SNOOP__SHIFT 0x9 +#define SDMA7_UTCL1_PAGE__VM_HOLE_MASK 0x00000001L +#define SDMA7_UTCL1_PAGE__REQ_TYPE_MASK 0x0000001EL +#define SDMA7_UTCL1_PAGE__USE_MTYPE_MASK 0x000001C0L +#define SDMA7_UTCL1_PAGE__USE_PT_SNOOP_MASK 0x00000200L +//SDMA7_POWER_CNTL_IDLE +#define SDMA7_POWER_CNTL_IDLE__DELAY0__SHIFT 0x0 +#define SDMA7_POWER_CNTL_IDLE__DELAY1__SHIFT 0x10 +#define SDMA7_POWER_CNTL_IDLE__DELAY2__SHIFT 0x18 +#define SDMA7_POWER_CNTL_IDLE__DELAY0_MASK 0x0000FFFFL +#define SDMA7_POWER_CNTL_IDLE__DELAY1_MASK 0x00FF0000L +#define SDMA7_POWER_CNTL_IDLE__DELAY2_MASK 0xFF000000L +//SDMA7_RELAX_ORDERING_LUT +#define SDMA7_RELAX_ORDERING_LUT__RESERVED0__SHIFT 0x0 +#define SDMA7_RELAX_ORDERING_LUT__COPY__SHIFT 0x1 +#define SDMA7_RELAX_ORDERING_LUT__WRITE__SHIFT 0x2 +#define SDMA7_RELAX_ORDERING_LUT__RESERVED3__SHIFT 0x3 +#define SDMA7_RELAX_ORDERING_LUT__RESERVED4__SHIFT 0x4 +#define SDMA7_RELAX_ORDERING_LUT__FENCE__SHIFT 0x5 +#define SDMA7_RELAX_ORDERING_LUT__RESERVED76__SHIFT 0x6 +#define SDMA7_RELAX_ORDERING_LUT__POLL_MEM__SHIFT 0x8 +#define SDMA7_RELAX_ORDERING_LUT__COND_EXE__SHIFT 0x9 +#define SDMA7_RELAX_ORDERING_LUT__ATOMIC__SHIFT 0xa +#define SDMA7_RELAX_ORDERING_LUT__CONST_FILL__SHIFT 0xb +#define SDMA7_RELAX_ORDERING_LUT__PTEPDE__SHIFT 0xc +#define SDMA7_RELAX_ORDERING_LUT__TIMESTAMP__SHIFT 0xd +#define SDMA7_RELAX_ORDERING_LUT__RESERVED__SHIFT 0xe +#define SDMA7_RELAX_ORDERING_LUT__WORLD_SWITCH__SHIFT 0x1b +#define SDMA7_RELAX_ORDERING_LUT__RPTR_WRB__SHIFT 0x1c +#define SDMA7_RELAX_ORDERING_LUT__WPTR_POLL__SHIFT 0x1d +#define SDMA7_RELAX_ORDERING_LUT__IB_FETCH__SHIFT 0x1e +#define SDMA7_RELAX_ORDERING_LUT__RB_FETCH__SHIFT 0x1f +#define SDMA7_RELAX_ORDERING_LUT__RESERVED0_MASK 0x00000001L +#define SDMA7_RELAX_ORDERING_LUT__COPY_MASK 0x00000002L +#define SDMA7_RELAX_ORDERING_LUT__WRITE_MASK 0x00000004L +#define SDMA7_RELAX_ORDERING_LUT__RESERVED3_MASK 0x00000008L +#define SDMA7_RELAX_ORDERING_LUT__RESERVED4_MASK 0x00000010L +#define SDMA7_RELAX_ORDERING_LUT__FENCE_MASK 0x00000020L +#define SDMA7_RELAX_ORDERING_LUT__RESERVED76_MASK 0x000000C0L +#define SDMA7_RELAX_ORDERING_LUT__POLL_MEM_MASK 0x00000100L +#define SDMA7_RELAX_ORDERING_LUT__COND_EXE_MASK 0x00000200L +#define SDMA7_RELAX_ORDERING_LUT__ATOMIC_MASK 0x00000400L +#define SDMA7_RELAX_ORDERING_LUT__CONST_FILL_MASK 0x00000800L +#define SDMA7_RELAX_ORDERING_LUT__PTEPDE_MASK 0x00001000L +#define SDMA7_RELAX_ORDERING_LUT__TIMESTAMP_MASK 0x00002000L +#define SDMA7_RELAX_ORDERING_LUT__RESERVED_MASK 0x07FFC000L +#define SDMA7_RELAX_ORDERING_LUT__WORLD_SWITCH_MASK 0x08000000L +#define SDMA7_RELAX_ORDERING_LUT__RPTR_WRB_MASK 0x10000000L +#define SDMA7_RELAX_ORDERING_LUT__WPTR_POLL_MASK 0x20000000L +#define SDMA7_RELAX_ORDERING_LUT__IB_FETCH_MASK 0x40000000L +#define SDMA7_RELAX_ORDERING_LUT__RB_FETCH_MASK 0x80000000L +//SDMA7_CHICKEN_BITS_2 +#define SDMA7_CHICKEN_BITS_2__F32_CMD_PROC_DELAY__SHIFT 0x0 +#define SDMA7_CHICKEN_BITS_2__F32_CMD_PROC_DELAY_MASK 0x0000000FL +//SDMA7_STATUS3_REG +#define SDMA7_STATUS3_REG__CMD_OP_STATUS__SHIFT 0x0 +#define SDMA7_STATUS3_REG__PREV_VM_CMD__SHIFT 0x10 +#define SDMA7_STATUS3_REG__EXCEPTION_IDLE__SHIFT 0x14 +#define SDMA7_STATUS3_REG__QUEUE_ID_MATCH__SHIFT 0x15 +#define SDMA7_STATUS3_REG__INT_QUEUE_ID__SHIFT 0x16 +#define SDMA7_STATUS3_REG__CMD_OP_STATUS_MASK 0x0000FFFFL +#define SDMA7_STATUS3_REG__PREV_VM_CMD_MASK 0x000F0000L +#define SDMA7_STATUS3_REG__EXCEPTION_IDLE_MASK 0x00100000L +#define SDMA7_STATUS3_REG__QUEUE_ID_MATCH_MASK 0x00200000L +#define SDMA7_STATUS3_REG__INT_QUEUE_ID_MASK 0x03C00000L +//SDMA7_PHYSICAL_ADDR_LO +#define SDMA7_PHYSICAL_ADDR_LO__D_VALID__SHIFT 0x0 +#define SDMA7_PHYSICAL_ADDR_LO__DIRTY__SHIFT 0x1 +#define SDMA7_PHYSICAL_ADDR_LO__PHY_VALID__SHIFT 0x2 +#define SDMA7_PHYSICAL_ADDR_LO__ADDR__SHIFT 0xc +#define SDMA7_PHYSICAL_ADDR_LO__D_VALID_MASK 0x00000001L +#define SDMA7_PHYSICAL_ADDR_LO__DIRTY_MASK 0x00000002L +#define SDMA7_PHYSICAL_ADDR_LO__PHY_VALID_MASK 0x00000004L +#define SDMA7_PHYSICAL_ADDR_LO__ADDR_MASK 0xFFFFF000L +//SDMA7_PHYSICAL_ADDR_HI +#define SDMA7_PHYSICAL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_PHYSICAL_ADDR_HI__ADDR_MASK 0x0000FFFFL +//SDMA7_PHASE2_QUANTUM +#define SDMA7_PHASE2_QUANTUM__UNIT__SHIFT 0x0 +#define SDMA7_PHASE2_QUANTUM__VALUE__SHIFT 0x8 +#define SDMA7_PHASE2_QUANTUM__PREFER__SHIFT 0x1e +#define SDMA7_PHASE2_QUANTUM__UNIT_MASK 0x0000000FL +#define SDMA7_PHASE2_QUANTUM__VALUE_MASK 0x00FFFF00L +#define SDMA7_PHASE2_QUANTUM__PREFER_MASK 0x40000000L +//SDMA7_ERROR_LOG +#define SDMA7_ERROR_LOG__OVERRIDE__SHIFT 0x0 +#define SDMA7_ERROR_LOG__STATUS__SHIFT 0x10 +#define SDMA7_ERROR_LOG__OVERRIDE_MASK 0x0000FFFFL +#define SDMA7_ERROR_LOG__STATUS_MASK 0xFFFF0000L +//SDMA7_PUB_DUMMY_REG0 +#define SDMA7_PUB_DUMMY_REG0__VALUE__SHIFT 0x0 +#define SDMA7_PUB_DUMMY_REG0__VALUE_MASK 0xFFFFFFFFL +//SDMA7_PUB_DUMMY_REG1 +#define SDMA7_PUB_DUMMY_REG1__VALUE__SHIFT 0x0 +#define SDMA7_PUB_DUMMY_REG1__VALUE_MASK 0xFFFFFFFFL +//SDMA7_PUB_DUMMY_REG2 +#define SDMA7_PUB_DUMMY_REG2__VALUE__SHIFT 0x0 +#define SDMA7_PUB_DUMMY_REG2__VALUE_MASK 0xFFFFFFFFL +//SDMA7_PUB_DUMMY_REG3 +#define SDMA7_PUB_DUMMY_REG3__VALUE__SHIFT 0x0 +#define SDMA7_PUB_DUMMY_REG3__VALUE_MASK 0xFFFFFFFFL +//SDMA7_F32_COUNTER +#define SDMA7_F32_COUNTER__VALUE__SHIFT 0x0 +#define SDMA7_F32_COUNTER__VALUE_MASK 0xFFFFFFFFL +//SDMA7_UNBREAKABLE +#define SDMA7_UNBREAKABLE__VALUE__SHIFT 0x0 +#define SDMA7_UNBREAKABLE__VALUE_MASK 0x00000001L +//SDMA7_PERFMON_CNTL +#define SDMA7_PERFMON_CNTL__PERF_ENABLE0__SHIFT 0x0 +#define SDMA7_PERFMON_CNTL__PERF_CLEAR0__SHIFT 0x1 +#define SDMA7_PERFMON_CNTL__PERF_SEL0__SHIFT 0x2 +#define SDMA7_PERFMON_CNTL__PERF_ENABLE1__SHIFT 0xa +#define SDMA7_PERFMON_CNTL__PERF_CLEAR1__SHIFT 0xb +#define SDMA7_PERFMON_CNTL__PERF_SEL1__SHIFT 0xc +#define SDMA7_PERFMON_CNTL__PERF_ENABLE0_MASK 0x00000001L +#define SDMA7_PERFMON_CNTL__PERF_CLEAR0_MASK 0x00000002L +#define SDMA7_PERFMON_CNTL__PERF_SEL0_MASK 0x000003FCL +#define SDMA7_PERFMON_CNTL__PERF_ENABLE1_MASK 0x00000400L +#define SDMA7_PERFMON_CNTL__PERF_CLEAR1_MASK 0x00000800L +#define SDMA7_PERFMON_CNTL__PERF_SEL1_MASK 0x000FF000L +//SDMA7_PERFCOUNTER0_RESULT +#define SDMA7_PERFCOUNTER0_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA7_PERFCOUNTER0_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA7_PERFCOUNTER1_RESULT +#define SDMA7_PERFCOUNTER1_RESULT__PERF_COUNT__SHIFT 0x0 +#define SDMA7_PERFCOUNTER1_RESULT__PERF_COUNT_MASK 0xFFFFFFFFL +//SDMA7_PERFCOUNTER_TAG_DELAY_RANGE +#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW__SHIFT 0x0 +#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH__SHIFT 0xe +#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW__SHIFT 0x1c +#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_LOW_MASK 0x00003FFFL +#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__RANGE_HIGH_MASK 0x0FFFC000L +#define SDMA7_PERFCOUNTER_TAG_DELAY_RANGE__SELECT_RW_MASK 0x10000000L +//SDMA7_CRD_CNTL +#define SDMA7_CRD_CNTL__MC_WRREQ_CREDIT__SHIFT 0x7 +#define SDMA7_CRD_CNTL__MC_RDREQ_CREDIT__SHIFT 0xd +#define SDMA7_CRD_CNTL__MC_WRREQ_CREDIT_MASK 0x00001F80L +#define SDMA7_CRD_CNTL__MC_RDREQ_CREDIT_MASK 0x0007E000L +//SDMA7_GPU_IOV_VIOLATION_LOG +#define SDMA7_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS__SHIFT 0x0 +#define SDMA7_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS__SHIFT 0x1 +#define SDMA7_GPU_IOV_VIOLATION_LOG__ADDRESS__SHIFT 0x2 +#define SDMA7_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION__SHIFT 0x14 +#define SDMA7_GPU_IOV_VIOLATION_LOG__VF__SHIFT 0x15 +#define SDMA7_GPU_IOV_VIOLATION_LOG__VFID__SHIFT 0x16 +#define SDMA7_GPU_IOV_VIOLATION_LOG__VIOLATION_STATUS_MASK 0x00000001L +#define SDMA7_GPU_IOV_VIOLATION_LOG__MULTIPLE_VIOLATION_STATUS_MASK 0x00000002L +#define SDMA7_GPU_IOV_VIOLATION_LOG__ADDRESS_MASK 0x000FFFFCL +#define SDMA7_GPU_IOV_VIOLATION_LOG__WRITE_OPERATION_MASK 0x00100000L +#define SDMA7_GPU_IOV_VIOLATION_LOG__VF_MASK 0x00200000L +#define SDMA7_GPU_IOV_VIOLATION_LOG__VFID_MASK 0x03C00000L +//SDMA7_ULV_CNTL +#define SDMA7_ULV_CNTL__HYSTERESIS__SHIFT 0x0 +#define SDMA7_ULV_CNTL__ENTER_ULV_INT_CLR__SHIFT 0x1b +#define SDMA7_ULV_CNTL__EXIT_ULV_INT_CLR__SHIFT 0x1c +#define SDMA7_ULV_CNTL__ENTER_ULV_INT__SHIFT 0x1d +#define SDMA7_ULV_CNTL__EXIT_ULV_INT__SHIFT 0x1e +#define SDMA7_ULV_CNTL__ULV_STATUS__SHIFT 0x1f +#define SDMA7_ULV_CNTL__HYSTERESIS_MASK 0x0000001FL +#define SDMA7_ULV_CNTL__ENTER_ULV_INT_CLR_MASK 0x08000000L +#define SDMA7_ULV_CNTL__EXIT_ULV_INT_CLR_MASK 0x10000000L +#define SDMA7_ULV_CNTL__ENTER_ULV_INT_MASK 0x20000000L +#define SDMA7_ULV_CNTL__EXIT_ULV_INT_MASK 0x40000000L +#define SDMA7_ULV_CNTL__ULV_STATUS_MASK 0x80000000L +//SDMA7_EA_DBIT_ADDR_DATA +#define SDMA7_EA_DBIT_ADDR_DATA__VALUE__SHIFT 0x0 +#define SDMA7_EA_DBIT_ADDR_DATA__VALUE_MASK 0xFFFFFFFFL +//SDMA7_EA_DBIT_ADDR_INDEX +#define SDMA7_EA_DBIT_ADDR_INDEX__VALUE__SHIFT 0x0 +#define SDMA7_EA_DBIT_ADDR_INDEX__VALUE_MASK 0x00000007L +//SDMA7_GPU_IOV_VIOLATION_LOG2 +#define SDMA7_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID__SHIFT 0x0 +#define SDMA7_GPU_IOV_VIOLATION_LOG2__INITIATOR_ID_MASK 0x000000FFL +//SDMA7_GFX_RB_CNTL +#define SDMA7_GFX_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_GFX_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_GFX_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_GFX_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_GFX_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_GFX_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_GFX_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_GFX_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_GFX_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_GFX_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_GFX_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_GFX_RB_BASE +#define SDMA7_GFX_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_GFX_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_BASE_HI +#define SDMA7_GFX_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_GFX_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_GFX_RB_RPTR +#define SDMA7_GFX_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_GFX_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_RPTR_HI +#define SDMA7_GFX_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_GFX_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_WPTR +#define SDMA7_GFX_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_GFX_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_WPTR_HI +#define SDMA7_GFX_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_GFX_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_WPTR_POLL_CNTL +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_GFX_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_GFX_RB_RPTR_ADDR_HI +#define SDMA7_GFX_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_GFX_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_RPTR_ADDR_LO +#define SDMA7_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_GFX_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_GFX_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_GFX_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_GFX_IB_CNTL +#define SDMA7_GFX_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_GFX_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_GFX_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_GFX_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_GFX_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_GFX_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_GFX_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_GFX_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_GFX_IB_RPTR +#define SDMA7_GFX_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_GFX_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_GFX_IB_OFFSET +#define SDMA7_GFX_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_GFX_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_GFX_IB_BASE_LO +#define SDMA7_GFX_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_GFX_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_GFX_IB_BASE_HI +#define SDMA7_GFX_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_GFX_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_GFX_IB_SIZE +#define SDMA7_GFX_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_GFX_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_GFX_SKIP_CNTL +#define SDMA7_GFX_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_GFX_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_GFX_CONTEXT_STATUS +#define SDMA7_GFX_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_GFX_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_GFX_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_GFX_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_GFX_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_GFX_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_GFX_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_GFX_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_GFX_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_GFX_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_GFX_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_GFX_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_GFX_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_GFX_DOORBELL +#define SDMA7_GFX_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_GFX_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_GFX_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_GFX_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_GFX_CONTEXT_CNTL +#define SDMA7_GFX_CONTEXT_CNTL__RESUME_CTX__SHIFT 0x10 +#define SDMA7_GFX_CONTEXT_CNTL__RESUME_CTX_MASK 0x00010000L +//SDMA7_GFX_STATUS +#define SDMA7_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_GFX_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_GFX_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_GFX_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_GFX_DOORBELL_LOG +#define SDMA7_GFX_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_GFX_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_GFX_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_GFX_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_GFX_WATERMARK +#define SDMA7_GFX_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_GFX_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_GFX_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_GFX_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_GFX_DOORBELL_OFFSET +#define SDMA7_GFX_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_GFX_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_GFX_CSA_ADDR_LO +#define SDMA7_GFX_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_GFX_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_GFX_CSA_ADDR_HI +#define SDMA7_GFX_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_GFX_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_GFX_IB_SUB_REMAIN +#define SDMA7_GFX_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_GFX_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_GFX_PREEMPT +#define SDMA7_GFX_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_GFX_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_GFX_DUMMY_REG +#define SDMA7_GFX_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_GFX_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_WPTR_POLL_ADDR_HI +#define SDMA7_GFX_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_GFX_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_GFX_RB_WPTR_POLL_ADDR_LO +#define SDMA7_GFX_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_GFX_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_GFX_RB_AQL_CNTL +#define SDMA7_GFX_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_GFX_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_GFX_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_GFX_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_GFX_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_GFX_MINOR_PTR_UPDATE +#define SDMA7_GFX_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_GFX_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_GFX_MIDCMD_DATA0 +#define SDMA7_GFX_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA1 +#define SDMA7_GFX_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA2 +#define SDMA7_GFX_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA3 +#define SDMA7_GFX_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA4 +#define SDMA7_GFX_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA5 +#define SDMA7_GFX_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA6 +#define SDMA7_GFX_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA7 +#define SDMA7_GFX_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_DATA8 +#define SDMA7_GFX_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_GFX_MIDCMD_CNTL +#define SDMA7_GFX_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_GFX_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_GFX_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_GFX_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_GFX_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_GFX_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_GFX_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_GFX_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_PAGE_RB_CNTL +#define SDMA7_PAGE_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_PAGE_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_PAGE_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_PAGE_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_PAGE_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_PAGE_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_PAGE_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_PAGE_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_PAGE_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_PAGE_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_PAGE_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_PAGE_RB_BASE +#define SDMA7_PAGE_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_PAGE_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_BASE_HI +#define SDMA7_PAGE_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_PAGE_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_PAGE_RB_RPTR +#define SDMA7_PAGE_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_PAGE_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_RPTR_HI +#define SDMA7_PAGE_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_PAGE_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_WPTR +#define SDMA7_PAGE_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_PAGE_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_WPTR_HI +#define SDMA7_PAGE_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_PAGE_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_WPTR_POLL_CNTL +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_PAGE_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_PAGE_RB_RPTR_ADDR_HI +#define SDMA7_PAGE_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_PAGE_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_RPTR_ADDR_LO +#define SDMA7_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_PAGE_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_PAGE_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_PAGE_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_PAGE_IB_CNTL +#define SDMA7_PAGE_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_PAGE_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_PAGE_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_PAGE_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_PAGE_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_PAGE_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_PAGE_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_PAGE_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_PAGE_IB_RPTR +#define SDMA7_PAGE_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_PAGE_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_PAGE_IB_OFFSET +#define SDMA7_PAGE_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_PAGE_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_PAGE_IB_BASE_LO +#define SDMA7_PAGE_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_PAGE_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_PAGE_IB_BASE_HI +#define SDMA7_PAGE_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_PAGE_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_PAGE_IB_SIZE +#define SDMA7_PAGE_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_PAGE_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_PAGE_SKIP_CNTL +#define SDMA7_PAGE_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_PAGE_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_PAGE_CONTEXT_STATUS +#define SDMA7_PAGE_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_PAGE_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_PAGE_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_PAGE_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_PAGE_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_PAGE_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_PAGE_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_PAGE_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_PAGE_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_PAGE_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_PAGE_DOORBELL +#define SDMA7_PAGE_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_PAGE_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_PAGE_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_PAGE_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_PAGE_STATUS +#define SDMA7_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_PAGE_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_PAGE_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_PAGE_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_PAGE_DOORBELL_LOG +#define SDMA7_PAGE_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_PAGE_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_PAGE_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_PAGE_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_PAGE_WATERMARK +#define SDMA7_PAGE_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_PAGE_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_PAGE_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_PAGE_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_PAGE_DOORBELL_OFFSET +#define SDMA7_PAGE_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_PAGE_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_PAGE_CSA_ADDR_LO +#define SDMA7_PAGE_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_PAGE_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_PAGE_CSA_ADDR_HI +#define SDMA7_PAGE_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_PAGE_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_PAGE_IB_SUB_REMAIN +#define SDMA7_PAGE_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_PAGE_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_PAGE_PREEMPT +#define SDMA7_PAGE_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_PAGE_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_PAGE_DUMMY_REG +#define SDMA7_PAGE_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_PAGE_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_WPTR_POLL_ADDR_HI +#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_PAGE_RB_WPTR_POLL_ADDR_LO +#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_PAGE_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_PAGE_RB_AQL_CNTL +#define SDMA7_PAGE_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_PAGE_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_PAGE_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_PAGE_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_PAGE_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_PAGE_MINOR_PTR_UPDATE +#define SDMA7_PAGE_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_PAGE_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_PAGE_MIDCMD_DATA0 +#define SDMA7_PAGE_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA1 +#define SDMA7_PAGE_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA2 +#define SDMA7_PAGE_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA3 +#define SDMA7_PAGE_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA4 +#define SDMA7_PAGE_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA5 +#define SDMA7_PAGE_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA6 +#define SDMA7_PAGE_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA7 +#define SDMA7_PAGE_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_DATA8 +#define SDMA7_PAGE_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_PAGE_MIDCMD_CNTL +#define SDMA7_PAGE_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_PAGE_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_PAGE_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_PAGE_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_PAGE_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_PAGE_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_PAGE_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC0_RB_CNTL +#define SDMA7_RLC0_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC0_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC0_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC0_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC0_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC0_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC0_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC0_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC0_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC0_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC0_RB_BASE +#define SDMA7_RLC0_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC0_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_BASE_HI +#define SDMA7_RLC0_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC0_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC0_RB_RPTR +#define SDMA7_RLC0_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC0_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_RPTR_HI +#define SDMA7_RLC0_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC0_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_WPTR +#define SDMA7_RLC0_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC0_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_WPTR_HI +#define SDMA7_RLC0_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC0_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_WPTR_POLL_CNTL +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC0_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC0_RB_RPTR_ADDR_HI +#define SDMA7_RLC0_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC0_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_RPTR_ADDR_LO +#define SDMA7_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC0_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC0_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC0_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC0_IB_CNTL +#define SDMA7_RLC0_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC0_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC0_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC0_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC0_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC0_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC0_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC0_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC0_IB_RPTR +#define SDMA7_RLC0_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC0_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC0_IB_OFFSET +#define SDMA7_RLC0_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC0_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC0_IB_BASE_LO +#define SDMA7_RLC0_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC0_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC0_IB_BASE_HI +#define SDMA7_RLC0_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC0_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC0_IB_SIZE +#define SDMA7_RLC0_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC0_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC0_SKIP_CNTL +#define SDMA7_RLC0_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC0_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC0_CONTEXT_STATUS +#define SDMA7_RLC0_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC0_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC0_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC0_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC0_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC0_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC0_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC0_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC0_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC0_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC0_DOORBELL +#define SDMA7_RLC0_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC0_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC0_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC0_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC0_STATUS +#define SDMA7_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC0_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC0_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC0_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC0_DOORBELL_LOG +#define SDMA7_RLC0_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC0_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC0_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC0_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC0_WATERMARK +#define SDMA7_RLC0_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC0_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC0_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC0_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC0_DOORBELL_OFFSET +#define SDMA7_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC0_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC0_CSA_ADDR_LO +#define SDMA7_RLC0_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC0_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC0_CSA_ADDR_HI +#define SDMA7_RLC0_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC0_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC0_IB_SUB_REMAIN +#define SDMA7_RLC0_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC0_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC0_PREEMPT +#define SDMA7_RLC0_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC0_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC0_DUMMY_REG +#define SDMA7_RLC0_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC0_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC0_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC0_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC0_RB_AQL_CNTL +#define SDMA7_RLC0_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC0_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC0_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC0_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC0_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC0_MINOR_PTR_UPDATE +#define SDMA7_RLC0_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC0_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC0_MIDCMD_DATA0 +#define SDMA7_RLC0_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA1 +#define SDMA7_RLC0_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA2 +#define SDMA7_RLC0_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA3 +#define SDMA7_RLC0_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA4 +#define SDMA7_RLC0_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA5 +#define SDMA7_RLC0_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA6 +#define SDMA7_RLC0_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA7 +#define SDMA7_RLC0_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_DATA8 +#define SDMA7_RLC0_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC0_MIDCMD_CNTL +#define SDMA7_RLC0_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC0_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC0_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC0_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC0_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC0_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC0_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC1_RB_CNTL +#define SDMA7_RLC1_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC1_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC1_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC1_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC1_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC1_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC1_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC1_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC1_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC1_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC1_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC1_RB_BASE +#define SDMA7_RLC1_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC1_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_BASE_HI +#define SDMA7_RLC1_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC1_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC1_RB_RPTR +#define SDMA7_RLC1_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC1_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_RPTR_HI +#define SDMA7_RLC1_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC1_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_WPTR +#define SDMA7_RLC1_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC1_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_WPTR_HI +#define SDMA7_RLC1_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC1_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_WPTR_POLL_CNTL +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC1_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC1_RB_RPTR_ADDR_HI +#define SDMA7_RLC1_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC1_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_RPTR_ADDR_LO +#define SDMA7_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC1_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC1_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC1_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC1_IB_CNTL +#define SDMA7_RLC1_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC1_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC1_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC1_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC1_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC1_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC1_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC1_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC1_IB_RPTR +#define SDMA7_RLC1_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC1_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC1_IB_OFFSET +#define SDMA7_RLC1_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC1_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC1_IB_BASE_LO +#define SDMA7_RLC1_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC1_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC1_IB_BASE_HI +#define SDMA7_RLC1_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC1_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC1_IB_SIZE +#define SDMA7_RLC1_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC1_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC1_SKIP_CNTL +#define SDMA7_RLC1_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC1_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC1_CONTEXT_STATUS +#define SDMA7_RLC1_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC1_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC1_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC1_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC1_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC1_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC1_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC1_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC1_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC1_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC1_DOORBELL +#define SDMA7_RLC1_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC1_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC1_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC1_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC1_STATUS +#define SDMA7_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC1_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC1_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC1_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC1_DOORBELL_LOG +#define SDMA7_RLC1_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC1_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC1_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC1_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC1_WATERMARK +#define SDMA7_RLC1_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC1_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC1_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC1_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC1_DOORBELL_OFFSET +#define SDMA7_RLC1_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC1_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC1_CSA_ADDR_LO +#define SDMA7_RLC1_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC1_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC1_CSA_ADDR_HI +#define SDMA7_RLC1_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC1_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC1_IB_SUB_REMAIN +#define SDMA7_RLC1_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC1_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC1_PREEMPT +#define SDMA7_RLC1_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC1_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC1_DUMMY_REG +#define SDMA7_RLC1_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC1_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC1_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC1_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC1_RB_AQL_CNTL +#define SDMA7_RLC1_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC1_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC1_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC1_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC1_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC1_MINOR_PTR_UPDATE +#define SDMA7_RLC1_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC1_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC1_MIDCMD_DATA0 +#define SDMA7_RLC1_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA1 +#define SDMA7_RLC1_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA2 +#define SDMA7_RLC1_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA3 +#define SDMA7_RLC1_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA4 +#define SDMA7_RLC1_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA5 +#define SDMA7_RLC1_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA6 +#define SDMA7_RLC1_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA7 +#define SDMA7_RLC1_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_DATA8 +#define SDMA7_RLC1_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC1_MIDCMD_CNTL +#define SDMA7_RLC1_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC1_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC1_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC1_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC1_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC1_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC1_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC2_RB_CNTL +#define SDMA7_RLC2_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC2_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC2_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC2_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC2_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC2_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC2_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC2_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC2_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC2_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC2_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC2_RB_BASE +#define SDMA7_RLC2_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC2_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_BASE_HI +#define SDMA7_RLC2_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC2_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC2_RB_RPTR +#define SDMA7_RLC2_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC2_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_RPTR_HI +#define SDMA7_RLC2_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC2_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_WPTR +#define SDMA7_RLC2_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC2_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_WPTR_HI +#define SDMA7_RLC2_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC2_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_WPTR_POLL_CNTL +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC2_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC2_RB_RPTR_ADDR_HI +#define SDMA7_RLC2_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC2_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_RPTR_ADDR_LO +#define SDMA7_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC2_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC2_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC2_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC2_IB_CNTL +#define SDMA7_RLC2_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC2_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC2_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC2_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC2_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC2_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC2_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC2_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC2_IB_RPTR +#define SDMA7_RLC2_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC2_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC2_IB_OFFSET +#define SDMA7_RLC2_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC2_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC2_IB_BASE_LO +#define SDMA7_RLC2_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC2_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC2_IB_BASE_HI +#define SDMA7_RLC2_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC2_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC2_IB_SIZE +#define SDMA7_RLC2_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC2_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC2_SKIP_CNTL +#define SDMA7_RLC2_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC2_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC2_CONTEXT_STATUS +#define SDMA7_RLC2_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC2_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC2_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC2_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC2_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC2_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC2_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC2_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC2_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC2_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC2_DOORBELL +#define SDMA7_RLC2_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC2_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC2_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC2_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC2_STATUS +#define SDMA7_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC2_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC2_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC2_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC2_DOORBELL_LOG +#define SDMA7_RLC2_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC2_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC2_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC2_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC2_WATERMARK +#define SDMA7_RLC2_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC2_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC2_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC2_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC2_DOORBELL_OFFSET +#define SDMA7_RLC2_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC2_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC2_CSA_ADDR_LO +#define SDMA7_RLC2_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC2_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC2_CSA_ADDR_HI +#define SDMA7_RLC2_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC2_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC2_IB_SUB_REMAIN +#define SDMA7_RLC2_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC2_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC2_PREEMPT +#define SDMA7_RLC2_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC2_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC2_DUMMY_REG +#define SDMA7_RLC2_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC2_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC2_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC2_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC2_RB_AQL_CNTL +#define SDMA7_RLC2_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC2_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC2_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC2_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC2_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC2_MINOR_PTR_UPDATE +#define SDMA7_RLC2_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC2_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC2_MIDCMD_DATA0 +#define SDMA7_RLC2_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA1 +#define SDMA7_RLC2_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA2 +#define SDMA7_RLC2_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA3 +#define SDMA7_RLC2_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA4 +#define SDMA7_RLC2_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA5 +#define SDMA7_RLC2_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA6 +#define SDMA7_RLC2_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA7 +#define SDMA7_RLC2_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_DATA8 +#define SDMA7_RLC2_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC2_MIDCMD_CNTL +#define SDMA7_RLC2_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC2_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC2_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC2_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC2_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC2_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC2_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC3_RB_CNTL +#define SDMA7_RLC3_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC3_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC3_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC3_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC3_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC3_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC3_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC3_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC3_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC3_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC3_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC3_RB_BASE +#define SDMA7_RLC3_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC3_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_BASE_HI +#define SDMA7_RLC3_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC3_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC3_RB_RPTR +#define SDMA7_RLC3_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC3_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_RPTR_HI +#define SDMA7_RLC3_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC3_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_WPTR +#define SDMA7_RLC3_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC3_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_WPTR_HI +#define SDMA7_RLC3_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC3_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_WPTR_POLL_CNTL +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC3_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC3_RB_RPTR_ADDR_HI +#define SDMA7_RLC3_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC3_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_RPTR_ADDR_LO +#define SDMA7_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC3_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC3_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC3_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC3_IB_CNTL +#define SDMA7_RLC3_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC3_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC3_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC3_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC3_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC3_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC3_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC3_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC3_IB_RPTR +#define SDMA7_RLC3_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC3_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC3_IB_OFFSET +#define SDMA7_RLC3_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC3_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC3_IB_BASE_LO +#define SDMA7_RLC3_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC3_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC3_IB_BASE_HI +#define SDMA7_RLC3_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC3_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC3_IB_SIZE +#define SDMA7_RLC3_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC3_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC3_SKIP_CNTL +#define SDMA7_RLC3_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC3_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC3_CONTEXT_STATUS +#define SDMA7_RLC3_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC3_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC3_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC3_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC3_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC3_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC3_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC3_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC3_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC3_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC3_DOORBELL +#define SDMA7_RLC3_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC3_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC3_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC3_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC3_STATUS +#define SDMA7_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC3_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC3_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC3_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC3_DOORBELL_LOG +#define SDMA7_RLC3_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC3_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC3_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC3_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC3_WATERMARK +#define SDMA7_RLC3_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC3_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC3_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC3_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC3_DOORBELL_OFFSET +#define SDMA7_RLC3_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC3_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC3_CSA_ADDR_LO +#define SDMA7_RLC3_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC3_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC3_CSA_ADDR_HI +#define SDMA7_RLC3_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC3_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC3_IB_SUB_REMAIN +#define SDMA7_RLC3_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC3_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC3_PREEMPT +#define SDMA7_RLC3_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC3_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC3_DUMMY_REG +#define SDMA7_RLC3_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC3_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC3_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC3_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC3_RB_AQL_CNTL +#define SDMA7_RLC3_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC3_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC3_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC3_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC3_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC3_MINOR_PTR_UPDATE +#define SDMA7_RLC3_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC3_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC3_MIDCMD_DATA0 +#define SDMA7_RLC3_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA1 +#define SDMA7_RLC3_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA2 +#define SDMA7_RLC3_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA3 +#define SDMA7_RLC3_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA4 +#define SDMA7_RLC3_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA5 +#define SDMA7_RLC3_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA6 +#define SDMA7_RLC3_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA7 +#define SDMA7_RLC3_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_DATA8 +#define SDMA7_RLC3_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC3_MIDCMD_CNTL +#define SDMA7_RLC3_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC3_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC3_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC3_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC3_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC3_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC3_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC4_RB_CNTL +#define SDMA7_RLC4_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC4_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC4_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC4_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC4_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC4_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC4_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC4_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC4_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC4_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC4_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC4_RB_BASE +#define SDMA7_RLC4_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC4_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_BASE_HI +#define SDMA7_RLC4_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC4_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC4_RB_RPTR +#define SDMA7_RLC4_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC4_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_RPTR_HI +#define SDMA7_RLC4_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC4_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_WPTR +#define SDMA7_RLC4_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC4_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_WPTR_HI +#define SDMA7_RLC4_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC4_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_WPTR_POLL_CNTL +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC4_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC4_RB_RPTR_ADDR_HI +#define SDMA7_RLC4_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC4_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_RPTR_ADDR_LO +#define SDMA7_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC4_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC4_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC4_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC4_IB_CNTL +#define SDMA7_RLC4_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC4_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC4_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC4_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC4_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC4_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC4_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC4_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC4_IB_RPTR +#define SDMA7_RLC4_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC4_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC4_IB_OFFSET +#define SDMA7_RLC4_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC4_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC4_IB_BASE_LO +#define SDMA7_RLC4_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC4_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC4_IB_BASE_HI +#define SDMA7_RLC4_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC4_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC4_IB_SIZE +#define SDMA7_RLC4_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC4_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC4_SKIP_CNTL +#define SDMA7_RLC4_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC4_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC4_CONTEXT_STATUS +#define SDMA7_RLC4_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC4_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC4_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC4_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC4_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC4_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC4_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC4_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC4_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC4_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC4_DOORBELL +#define SDMA7_RLC4_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC4_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC4_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC4_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC4_STATUS +#define SDMA7_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC4_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC4_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC4_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC4_DOORBELL_LOG +#define SDMA7_RLC4_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC4_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC4_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC4_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC4_WATERMARK +#define SDMA7_RLC4_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC4_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC4_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC4_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC4_DOORBELL_OFFSET +#define SDMA7_RLC4_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC4_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC4_CSA_ADDR_LO +#define SDMA7_RLC4_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC4_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC4_CSA_ADDR_HI +#define SDMA7_RLC4_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC4_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC4_IB_SUB_REMAIN +#define SDMA7_RLC4_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC4_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC4_PREEMPT +#define SDMA7_RLC4_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC4_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC4_DUMMY_REG +#define SDMA7_RLC4_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC4_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC4_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC4_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC4_RB_AQL_CNTL +#define SDMA7_RLC4_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC4_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC4_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC4_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC4_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC4_MINOR_PTR_UPDATE +#define SDMA7_RLC4_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC4_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC4_MIDCMD_DATA0 +#define SDMA7_RLC4_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA1 +#define SDMA7_RLC4_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA2 +#define SDMA7_RLC4_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA3 +#define SDMA7_RLC4_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA4 +#define SDMA7_RLC4_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA5 +#define SDMA7_RLC4_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA6 +#define SDMA7_RLC4_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA7 +#define SDMA7_RLC4_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_DATA8 +#define SDMA7_RLC4_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC4_MIDCMD_CNTL +#define SDMA7_RLC4_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC4_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC4_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC4_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC4_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC4_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC4_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC5_RB_CNTL +#define SDMA7_RLC5_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC5_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC5_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC5_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC5_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC5_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC5_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC5_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC5_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC5_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC5_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC5_RB_BASE +#define SDMA7_RLC5_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC5_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_BASE_HI +#define SDMA7_RLC5_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC5_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC5_RB_RPTR +#define SDMA7_RLC5_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC5_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_RPTR_HI +#define SDMA7_RLC5_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC5_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_WPTR +#define SDMA7_RLC5_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC5_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_WPTR_HI +#define SDMA7_RLC5_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC5_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_WPTR_POLL_CNTL +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC5_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC5_RB_RPTR_ADDR_HI +#define SDMA7_RLC5_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC5_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_RPTR_ADDR_LO +#define SDMA7_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC5_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC5_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC5_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC5_IB_CNTL +#define SDMA7_RLC5_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC5_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC5_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC5_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC5_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC5_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC5_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC5_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC5_IB_RPTR +#define SDMA7_RLC5_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC5_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC5_IB_OFFSET +#define SDMA7_RLC5_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC5_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC5_IB_BASE_LO +#define SDMA7_RLC5_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC5_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC5_IB_BASE_HI +#define SDMA7_RLC5_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC5_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC5_IB_SIZE +#define SDMA7_RLC5_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC5_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC5_SKIP_CNTL +#define SDMA7_RLC5_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC5_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC5_CONTEXT_STATUS +#define SDMA7_RLC5_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC5_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC5_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC5_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC5_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC5_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC5_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC5_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC5_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC5_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC5_DOORBELL +#define SDMA7_RLC5_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC5_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC5_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC5_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC5_STATUS +#define SDMA7_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC5_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC5_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC5_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC5_DOORBELL_LOG +#define SDMA7_RLC5_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC5_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC5_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC5_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC5_WATERMARK +#define SDMA7_RLC5_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC5_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC5_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC5_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC5_DOORBELL_OFFSET +#define SDMA7_RLC5_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC5_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC5_CSA_ADDR_LO +#define SDMA7_RLC5_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC5_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC5_CSA_ADDR_HI +#define SDMA7_RLC5_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC5_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC5_IB_SUB_REMAIN +#define SDMA7_RLC5_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC5_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC5_PREEMPT +#define SDMA7_RLC5_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC5_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC5_DUMMY_REG +#define SDMA7_RLC5_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC5_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC5_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC5_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC5_RB_AQL_CNTL +#define SDMA7_RLC5_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC5_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC5_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC5_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC5_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC5_MINOR_PTR_UPDATE +#define SDMA7_RLC5_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC5_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC5_MIDCMD_DATA0 +#define SDMA7_RLC5_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA1 +#define SDMA7_RLC5_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA2 +#define SDMA7_RLC5_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA3 +#define SDMA7_RLC5_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA4 +#define SDMA7_RLC5_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA5 +#define SDMA7_RLC5_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA6 +#define SDMA7_RLC5_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA7 +#define SDMA7_RLC5_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_DATA8 +#define SDMA7_RLC5_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC5_MIDCMD_CNTL +#define SDMA7_RLC5_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC5_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC5_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC5_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC5_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC5_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC5_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC6_RB_CNTL +#define SDMA7_RLC6_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC6_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC6_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC6_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC6_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC6_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC6_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC6_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC6_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC6_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC6_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC6_RB_BASE +#define SDMA7_RLC6_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC6_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_BASE_HI +#define SDMA7_RLC6_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC6_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC6_RB_RPTR +#define SDMA7_RLC6_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC6_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_RPTR_HI +#define SDMA7_RLC6_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC6_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_WPTR +#define SDMA7_RLC6_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC6_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_WPTR_HI +#define SDMA7_RLC6_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC6_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_WPTR_POLL_CNTL +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC6_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC6_RB_RPTR_ADDR_HI +#define SDMA7_RLC6_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC6_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_RPTR_ADDR_LO +#define SDMA7_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC6_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC6_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC6_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC6_IB_CNTL +#define SDMA7_RLC6_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC6_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC6_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC6_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC6_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC6_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC6_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC6_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC6_IB_RPTR +#define SDMA7_RLC6_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC6_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC6_IB_OFFSET +#define SDMA7_RLC6_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC6_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC6_IB_BASE_LO +#define SDMA7_RLC6_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC6_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC6_IB_BASE_HI +#define SDMA7_RLC6_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC6_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC6_IB_SIZE +#define SDMA7_RLC6_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC6_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC6_SKIP_CNTL +#define SDMA7_RLC6_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC6_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC6_CONTEXT_STATUS +#define SDMA7_RLC6_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC6_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC6_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC6_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC6_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC6_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC6_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC6_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC6_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC6_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC6_DOORBELL +#define SDMA7_RLC6_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC6_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC6_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC6_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC6_STATUS +#define SDMA7_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC6_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC6_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC6_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC6_DOORBELL_LOG +#define SDMA7_RLC6_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC6_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC6_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC6_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC6_WATERMARK +#define SDMA7_RLC6_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC6_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC6_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC6_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC6_DOORBELL_OFFSET +#define SDMA7_RLC6_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC6_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC6_CSA_ADDR_LO +#define SDMA7_RLC6_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC6_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC6_CSA_ADDR_HI +#define SDMA7_RLC6_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC6_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC6_IB_SUB_REMAIN +#define SDMA7_RLC6_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC6_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC6_PREEMPT +#define SDMA7_RLC6_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC6_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC6_DUMMY_REG +#define SDMA7_RLC6_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC6_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC6_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC6_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC6_RB_AQL_CNTL +#define SDMA7_RLC6_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC6_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC6_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC6_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC6_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC6_MINOR_PTR_UPDATE +#define SDMA7_RLC6_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC6_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC6_MIDCMD_DATA0 +#define SDMA7_RLC6_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA1 +#define SDMA7_RLC6_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA2 +#define SDMA7_RLC6_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA3 +#define SDMA7_RLC6_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA4 +#define SDMA7_RLC6_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA5 +#define SDMA7_RLC6_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA6 +#define SDMA7_RLC6_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA7 +#define SDMA7_RLC6_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_DATA8 +#define SDMA7_RLC6_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC6_MIDCMD_CNTL +#define SDMA7_RLC6_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC6_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC6_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC6_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC6_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC6_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC6_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L +//SDMA7_RLC7_RB_CNTL +#define SDMA7_RLC7_RB_CNTL__RB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC7_RB_CNTL__RB_SIZE__SHIFT 0x1 +#define SDMA7_RLC7_RB_CNTL__RB_SWAP_ENABLE__SHIFT 0x9 +#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT 0xc +#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE__SHIFT 0xd +#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT 0x10 +#define SDMA7_RLC7_RB_CNTL__RB_PRIV__SHIFT 0x17 +#define SDMA7_RLC7_RB_CNTL__RB_VMID__SHIFT 0x18 +#define SDMA7_RLC7_RB_CNTL__RB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC7_RB_CNTL__RB_SIZE_MASK 0x0000003EL +#define SDMA7_RLC7_RB_CNTL__RB_SWAP_ENABLE_MASK 0x00000200L +#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK 0x00001000L +#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_SWAP_ENABLE_MASK 0x00002000L +#define SDMA7_RLC7_RB_CNTL__RPTR_WRITEBACK_TIMER_MASK 0x001F0000L +#define SDMA7_RLC7_RB_CNTL__RB_PRIV_MASK 0x00800000L +#define SDMA7_RLC7_RB_CNTL__RB_VMID_MASK 0x0F000000L +//SDMA7_RLC7_RB_BASE +#define SDMA7_RLC7_RB_BASE__ADDR__SHIFT 0x0 +#define SDMA7_RLC7_RB_BASE__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_BASE_HI +#define SDMA7_RLC7_RB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC7_RB_BASE_HI__ADDR_MASK 0x00FFFFFFL +//SDMA7_RLC7_RB_RPTR +#define SDMA7_RLC7_RB_RPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC7_RB_RPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_RPTR_HI +#define SDMA7_RLC7_RB_RPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC7_RB_RPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_WPTR +#define SDMA7_RLC7_RB_WPTR__OFFSET__SHIFT 0x0 +#define SDMA7_RLC7_RB_WPTR__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_WPTR_HI +#define SDMA7_RLC7_RB_WPTR_HI__OFFSET__SHIFT 0x0 +#define SDMA7_RLC7_RB_WPTR_HI__OFFSET_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_WPTR_POLL_CNTL +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__ENABLE__SHIFT 0x0 +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE__SHIFT 0x1 +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE__SHIFT 0x2 +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY__SHIFT 0x4 +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__ENABLE_MASK 0x00000001L +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__SWAP_ENABLE_MASK 0x00000002L +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__F32_POLL_ENABLE_MASK 0x00000004L +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__FREQUENCY_MASK 0x0000FFF0L +#define SDMA7_RLC7_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//SDMA7_RLC7_RB_RPTR_ADDR_HI +#define SDMA7_RLC7_RB_RPTR_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC7_RB_RPTR_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_RPTR_ADDR_LO +#define SDMA7_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE__SHIFT 0x0 +#define SDMA7_RLC7_RB_RPTR_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC7_RB_RPTR_ADDR_LO__RPTR_WB_IDLE_MASK 0x00000001L +#define SDMA7_RLC7_RB_RPTR_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC7_IB_CNTL +#define SDMA7_RLC7_IB_CNTL__IB_ENABLE__SHIFT 0x0 +#define SDMA7_RLC7_IB_CNTL__IB_SWAP_ENABLE__SHIFT 0x4 +#define SDMA7_RLC7_IB_CNTL__SWITCH_INSIDE_IB__SHIFT 0x8 +#define SDMA7_RLC7_IB_CNTL__CMD_VMID__SHIFT 0x10 +#define SDMA7_RLC7_IB_CNTL__IB_ENABLE_MASK 0x00000001L +#define SDMA7_RLC7_IB_CNTL__IB_SWAP_ENABLE_MASK 0x00000010L +#define SDMA7_RLC7_IB_CNTL__SWITCH_INSIDE_IB_MASK 0x00000100L +#define SDMA7_RLC7_IB_CNTL__CMD_VMID_MASK 0x000F0000L +//SDMA7_RLC7_IB_RPTR +#define SDMA7_RLC7_IB_RPTR__OFFSET__SHIFT 0x2 +#define SDMA7_RLC7_IB_RPTR__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC7_IB_OFFSET +#define SDMA7_RLC7_IB_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC7_IB_OFFSET__OFFSET_MASK 0x003FFFFCL +//SDMA7_RLC7_IB_BASE_LO +#define SDMA7_RLC7_IB_BASE_LO__ADDR__SHIFT 0x5 +#define SDMA7_RLC7_IB_BASE_LO__ADDR_MASK 0xFFFFFFE0L +//SDMA7_RLC7_IB_BASE_HI +#define SDMA7_RLC7_IB_BASE_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC7_IB_BASE_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC7_IB_SIZE +#define SDMA7_RLC7_IB_SIZE__SIZE__SHIFT 0x0 +#define SDMA7_RLC7_IB_SIZE__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC7_SKIP_CNTL +#define SDMA7_RLC7_SKIP_CNTL__SKIP_COUNT__SHIFT 0x0 +#define SDMA7_RLC7_SKIP_CNTL__SKIP_COUNT_MASK 0x000FFFFFL +//SDMA7_RLC7_CONTEXT_STATUS +#define SDMA7_RLC7_CONTEXT_STATUS__SELECTED__SHIFT 0x0 +#define SDMA7_RLC7_CONTEXT_STATUS__IDLE__SHIFT 0x2 +#define SDMA7_RLC7_CONTEXT_STATUS__EXPIRED__SHIFT 0x3 +#define SDMA7_RLC7_CONTEXT_STATUS__EXCEPTION__SHIFT 0x4 +#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_ABLE__SHIFT 0x7 +#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_READY__SHIFT 0x8 +#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPTED__SHIFT 0x9 +#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE__SHIFT 0xa +#define SDMA7_RLC7_CONTEXT_STATUS__SELECTED_MASK 0x00000001L +#define SDMA7_RLC7_CONTEXT_STATUS__IDLE_MASK 0x00000004L +#define SDMA7_RLC7_CONTEXT_STATUS__EXPIRED_MASK 0x00000008L +#define SDMA7_RLC7_CONTEXT_STATUS__EXCEPTION_MASK 0x00000070L +#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_ABLE_MASK 0x00000080L +#define SDMA7_RLC7_CONTEXT_STATUS__CTXSW_READY_MASK 0x00000100L +#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPTED_MASK 0x00000200L +#define SDMA7_RLC7_CONTEXT_STATUS__PREEMPT_DISABLE_MASK 0x00000400L +//SDMA7_RLC7_DOORBELL +#define SDMA7_RLC7_DOORBELL__ENABLE__SHIFT 0x1c +#define SDMA7_RLC7_DOORBELL__CAPTURED__SHIFT 0x1e +#define SDMA7_RLC7_DOORBELL__ENABLE_MASK 0x10000000L +#define SDMA7_RLC7_DOORBELL__CAPTURED_MASK 0x40000000L +//SDMA7_RLC7_STATUS +#define SDMA7_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT__SHIFT 0x0 +#define SDMA7_RLC7_STATUS__WPTR_UPDATE_PENDING__SHIFT 0x8 +#define SDMA7_RLC7_STATUS__WPTR_UPDATE_FAIL_COUNT_MASK 0x000000FFL +#define SDMA7_RLC7_STATUS__WPTR_UPDATE_PENDING_MASK 0x00000100L +//SDMA7_RLC7_DOORBELL_LOG +#define SDMA7_RLC7_DOORBELL_LOG__BE_ERROR__SHIFT 0x0 +#define SDMA7_RLC7_DOORBELL_LOG__DATA__SHIFT 0x2 +#define SDMA7_RLC7_DOORBELL_LOG__BE_ERROR_MASK 0x00000001L +#define SDMA7_RLC7_DOORBELL_LOG__DATA_MASK 0xFFFFFFFCL +//SDMA7_RLC7_WATERMARK +#define SDMA7_RLC7_WATERMARK__RD_OUTSTANDING__SHIFT 0x0 +#define SDMA7_RLC7_WATERMARK__WR_OUTSTANDING__SHIFT 0x10 +#define SDMA7_RLC7_WATERMARK__RD_OUTSTANDING_MASK 0x00000FFFL +#define SDMA7_RLC7_WATERMARK__WR_OUTSTANDING_MASK 0x03FF0000L +//SDMA7_RLC7_DOORBELL_OFFSET +#define SDMA7_RLC7_DOORBELL_OFFSET__OFFSET__SHIFT 0x2 +#define SDMA7_RLC7_DOORBELL_OFFSET__OFFSET_MASK 0x0FFFFFFCL +//SDMA7_RLC7_CSA_ADDR_LO +#define SDMA7_RLC7_CSA_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC7_CSA_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC7_CSA_ADDR_HI +#define SDMA7_RLC7_CSA_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC7_CSA_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC7_IB_SUB_REMAIN +#define SDMA7_RLC7_IB_SUB_REMAIN__SIZE__SHIFT 0x0 +#define SDMA7_RLC7_IB_SUB_REMAIN__SIZE_MASK 0x000FFFFFL +//SDMA7_RLC7_PREEMPT +#define SDMA7_RLC7_PREEMPT__IB_PREEMPT__SHIFT 0x0 +#define SDMA7_RLC7_PREEMPT__IB_PREEMPT_MASK 0x00000001L +//SDMA7_RLC7_DUMMY_REG +#define SDMA7_RLC7_DUMMY_REG__DUMMY__SHIFT 0x0 +#define SDMA7_RLC7_DUMMY_REG__DUMMY_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_WPTR_POLL_ADDR_HI +#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR__SHIFT 0x0 +#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_HI__ADDR_MASK 0xFFFFFFFFL +//SDMA7_RLC7_RB_WPTR_POLL_ADDR_LO +#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR__SHIFT 0x2 +#define SDMA7_RLC7_RB_WPTR_POLL_ADDR_LO__ADDR_MASK 0xFFFFFFFCL +//SDMA7_RLC7_RB_AQL_CNTL +#define SDMA7_RLC7_RB_AQL_CNTL__AQL_ENABLE__SHIFT 0x0 +#define SDMA7_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE__SHIFT 0x1 +#define SDMA7_RLC7_RB_AQL_CNTL__PACKET_STEP__SHIFT 0x8 +#define SDMA7_RLC7_RB_AQL_CNTL__AQL_ENABLE_MASK 0x00000001L +#define SDMA7_RLC7_RB_AQL_CNTL__AQL_PACKET_SIZE_MASK 0x000000FEL +#define SDMA7_RLC7_RB_AQL_CNTL__PACKET_STEP_MASK 0x0000FF00L +//SDMA7_RLC7_MINOR_PTR_UPDATE +#define SDMA7_RLC7_MINOR_PTR_UPDATE__ENABLE__SHIFT 0x0 +#define SDMA7_RLC7_MINOR_PTR_UPDATE__ENABLE_MASK 0x00000001L +//SDMA7_RLC7_MIDCMD_DATA0 +#define SDMA7_RLC7_MIDCMD_DATA0__DATA0__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA0__DATA0_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA1 +#define SDMA7_RLC7_MIDCMD_DATA1__DATA1__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA1__DATA1_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA2 +#define SDMA7_RLC7_MIDCMD_DATA2__DATA2__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA2__DATA2_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA3 +#define SDMA7_RLC7_MIDCMD_DATA3__DATA3__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA3__DATA3_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA4 +#define SDMA7_RLC7_MIDCMD_DATA4__DATA4__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA4__DATA4_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA5 +#define SDMA7_RLC7_MIDCMD_DATA5__DATA5__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA5__DATA5_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA6 +#define SDMA7_RLC7_MIDCMD_DATA6__DATA6__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA6__DATA6_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA7 +#define SDMA7_RLC7_MIDCMD_DATA7__DATA7__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA7__DATA7_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_DATA8 +#define SDMA7_RLC7_MIDCMD_DATA8__DATA8__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_DATA8__DATA8_MASK 0xFFFFFFFFL +//SDMA7_RLC7_MIDCMD_CNTL +#define SDMA7_RLC7_MIDCMD_CNTL__DATA_VALID__SHIFT 0x0 +#define SDMA7_RLC7_MIDCMD_CNTL__COPY_MODE__SHIFT 0x1 +#define SDMA7_RLC7_MIDCMD_CNTL__SPLIT_STATE__SHIFT 0x4 +#define SDMA7_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT__SHIFT 0x8 +#define SDMA7_RLC7_MIDCMD_CNTL__DATA_VALID_MASK 0x00000001L +#define SDMA7_RLC7_MIDCMD_CNTL__COPY_MODE_MASK 0x00000002L +#define SDMA7_RLC7_MIDCMD_CNTL__SPLIT_STATE_MASK 0x000000F0L +#define SDMA7_RLC7_MIDCMD_CNTL__ALLOW_PREEMPT_MASK 0x00000100L + +#endif -- cgit v1.2.3 From c54a60db0d722413628b8d6c4c480c7ac1238428 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 21 Mar 2019 09:08:19 -0400 Subject: drm/amdgpu: add VCN2.5 headers VCN is the multi-media block. Signed-off-by: Leo Liu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h | 979 ++++++ .../drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h | 3609 ++++++++++++++++++++ 2 files changed, 4588 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h new file mode 100644 index 000000000000..cf2149cc12ee --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h @@ -0,0 +1,979 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _vcn_2_5_OFFSET_HEADER +#define _vcn_2_5_OFFSET_HEADER + +// addressBlock: uvd0_mmsch_dec +// base address: 0x1e000 + + +// addressBlock: uvd0_jpegnpdec +// base address: 0x1e200 +#define mmUVD_JPEG_CNTL 0x0080 +#define mmUVD_JPEG_CNTL_BASE_IDX 0 +#define mmUVD_JPEG_RB_BASE 0x0081 +#define mmUVD_JPEG_RB_BASE_BASE_IDX 0 +#define mmUVD_JPEG_RB_WPTR 0x0082 +#define mmUVD_JPEG_RB_WPTR_BASE_IDX 0 +#define mmUVD_JPEG_RB_RPTR 0x0083 +#define mmUVD_JPEG_RB_RPTR_BASE_IDX 0 +#define mmUVD_JPEG_RB_SIZE 0x0084 +#define mmUVD_JPEG_RB_SIZE_BASE_IDX 0 +#define mmUVD_JPEG_DEC_SCRATCH0 0x0089 +#define mmUVD_JPEG_DEC_SCRATCH0_BASE_IDX 0 +#define mmUVD_JPEG_INT_EN 0x008a +#define mmUVD_JPEG_INT_EN_BASE_IDX 0 +#define mmUVD_JPEG_INT_STAT 0x008b +#define mmUVD_JPEG_INT_STAT_BASE_IDX 0 +#define mmUVD_JPEG_PITCH 0x009f +#define mmUVD_JPEG_PITCH_BASE_IDX 0 +#define mmUVD_JPEG_UV_PITCH 0x00a0 +#define mmUVD_JPEG_UV_PITCH_BASE_IDX 0 +#define mmJPEG_DEC_Y_GFX8_TILING_SURFACE 0x00a1 +#define mmJPEG_DEC_Y_GFX8_TILING_SURFACE_BASE_IDX 0 +#define mmJPEG_DEC_UV_GFX8_TILING_SURFACE 0x00a2 +#define mmJPEG_DEC_UV_GFX8_TILING_SURFACE_BASE_IDX 0 +#define mmJPEG_DEC_GFX8_ADDR_CONFIG 0x00a3 +#define mmJPEG_DEC_GFX8_ADDR_CONFIG_BASE_IDX 0 +#define mmJPEG_DEC_Y_GFX10_TILING_SURFACE 0x00a4 +#define mmJPEG_DEC_Y_GFX10_TILING_SURFACE_BASE_IDX 0 +#define mmJPEG_DEC_UV_GFX10_TILING_SURFACE 0x00a5 +#define mmJPEG_DEC_UV_GFX10_TILING_SURFACE_BASE_IDX 0 +#define mmJPEG_DEC_GFX10_ADDR_CONFIG 0x00a6 +#define mmJPEG_DEC_GFX10_ADDR_CONFIG_BASE_IDX 0 +#define mmJPEG_DEC_ADDR_MODE 0x00a7 +#define mmJPEG_DEC_ADDR_MODE_BASE_IDX 0 +#define mmUVD_JPEG_GPCOM_CMD 0x00a9 +#define mmUVD_JPEG_GPCOM_CMD_BASE_IDX 0 +#define mmUVD_JPEG_GPCOM_DATA0 0x00aa +#define mmUVD_JPEG_GPCOM_DATA0_BASE_IDX 0 +#define mmUVD_JPEG_GPCOM_DATA1 0x00ab +#define mmUVD_JPEG_GPCOM_DATA1_BASE_IDX 0 +#define mmUVD_JPEG_SCRATCH1 0x00ae +#define mmUVD_JPEG_SCRATCH1_BASE_IDX 0 +#define mmUVD_JPEG_DEC_SOFT_RST 0x00af +#define mmUVD_JPEG_DEC_SOFT_RST_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_jpeg_enc_dec +// base address: 0x1e300 +#define mmUVD_JPEG_ENC_INT_EN 0x00c1 +#define mmUVD_JPEG_ENC_INT_EN_BASE_IDX 0 +#define mmUVD_JPEG_ENC_INT_STATUS 0x00c2 +#define mmUVD_JPEG_ENC_INT_STATUS_BASE_IDX 0 +#define mmUVD_JPEG_ENC_ENGINE_CNTL 0x00c5 +#define mmUVD_JPEG_ENC_ENGINE_CNTL_BASE_IDX 0 +#define mmUVD_JPEG_ENC_SCRATCH1 0x00ce +#define mmUVD_JPEG_ENC_SCRATCH1_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_jpeg_enc_sclk_dec +// base address: 0x1e380 +#define mmUVD_JPEG_ENC_STATUS 0x00e5 +#define mmUVD_JPEG_ENC_STATUS_BASE_IDX 0 +#define mmUVD_JPEG_ENC_PITCH 0x00e6 +#define mmUVD_JPEG_ENC_PITCH_BASE_IDX 0 +#define mmUVD_JPEG_ENC_LUMA_BASE 0x00e7 +#define mmUVD_JPEG_ENC_LUMA_BASE_BASE_IDX 0 +#define mmUVD_JPEG_ENC_CHROMAU_BASE 0x00e8 +#define mmUVD_JPEG_ENC_CHROMAU_BASE_BASE_IDX 0 +#define mmUVD_JPEG_ENC_CHROMAV_BASE 0x00e9 +#define mmUVD_JPEG_ENC_CHROMAV_BASE_BASE_IDX 0 +#define mmJPEG_ENC_Y_GFX10_TILING_SURFACE 0x00ea +#define mmJPEG_ENC_Y_GFX10_TILING_SURFACE_BASE_IDX 0 +#define mmJPEG_ENC_UV_GFX10_TILING_SURFACE 0x00eb +#define mmJPEG_ENC_UV_GFX10_TILING_SURFACE_BASE_IDX 0 +#define mmJPEG_ENC_GFX10_ADDR_CONFIG 0x00ec +#define mmJPEG_ENC_GFX10_ADDR_CONFIG_BASE_IDX 0 +#define mmJPEG_ENC_ADDR_MODE 0x00ed +#define mmJPEG_ENC_ADDR_MODE_BASE_IDX 0 +#define mmUVD_JPEG_ENC_GPCOM_CMD 0x00ee +#define mmUVD_JPEG_ENC_GPCOM_CMD_BASE_IDX 0 +#define mmUVD_JPEG_ENC_GPCOM_DATA0 0x00ef +#define mmUVD_JPEG_ENC_GPCOM_DATA0_BASE_IDX 0 +#define mmUVD_JPEG_ENC_GPCOM_DATA1 0x00f0 +#define mmUVD_JPEG_ENC_GPCOM_DATA1_BASE_IDX 0 +#define mmUVD_JPEG_ENC_CGC_CNTL 0x00f5 +#define mmUVD_JPEG_ENC_CGC_CNTL_BASE_IDX 0 +#define mmUVD_JPEG_ENC_SCRATCH0 0x00f6 +#define mmUVD_JPEG_ENC_SCRATCH0_BASE_IDX 0 +#define mmUVD_JPEG_ENC_SOFT_RST 0x00f7 +#define mmUVD_JPEG_ENC_SOFT_RST_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_jrbc_dec +// base address: 0x1e400 +#define mmUVD_JRBC_RB_WPTR 0x0100 +#define mmUVD_JRBC_RB_WPTR_BASE_IDX 0 +#define mmUVD_JRBC_RB_CNTL 0x0101 +#define mmUVD_JRBC_RB_CNTL_BASE_IDX 0 +#define mmUVD_JRBC_IB_SIZE 0x0102 +#define mmUVD_JRBC_IB_SIZE_BASE_IDX 0 +#define mmUVD_JRBC_URGENT_CNTL 0x0103 +#define mmUVD_JRBC_URGENT_CNTL_BASE_IDX 0 +#define mmUVD_JRBC_RB_REF_DATA 0x0104 +#define mmUVD_JRBC_RB_REF_DATA_BASE_IDX 0 +#define mmUVD_JRBC_RB_COND_RD_TIMER 0x0105 +#define mmUVD_JRBC_RB_COND_RD_TIMER_BASE_IDX 0 +#define mmUVD_JRBC_SOFT_RESET 0x0108 +#define mmUVD_JRBC_SOFT_RESET_BASE_IDX 0 +#define mmUVD_JRBC_STATUS 0x0109 +#define mmUVD_JRBC_STATUS_BASE_IDX 0 +#define mmUVD_JRBC_RB_RPTR 0x010a +#define mmUVD_JRBC_RB_RPTR_BASE_IDX 0 +#define mmUVD_JRBC_RB_BUF_STATUS 0x010b +#define mmUVD_JRBC_RB_BUF_STATUS_BASE_IDX 0 +#define mmUVD_JRBC_IB_BUF_STATUS 0x010c +#define mmUVD_JRBC_IB_BUF_STATUS_BASE_IDX 0 +#define mmUVD_JRBC_IB_SIZE_UPDATE 0x010d +#define mmUVD_JRBC_IB_SIZE_UPDATE_BASE_IDX 0 +#define mmUVD_JRBC_IB_COND_RD_TIMER 0x010e +#define mmUVD_JRBC_IB_COND_RD_TIMER_BASE_IDX 0 +#define mmUVD_JRBC_IB_REF_DATA 0x010f +#define mmUVD_JRBC_IB_REF_DATA_BASE_IDX 0 +#define mmUVD_JPEG_PREEMPT_CMD 0x0110 +#define mmUVD_JPEG_PREEMPT_CMD_BASE_IDX 0 +#define mmUVD_JPEG_PREEMPT_FENCE_DATA0 0x0111 +#define mmUVD_JPEG_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define mmUVD_JPEG_PREEMPT_FENCE_DATA1 0x0112 +#define mmUVD_JPEG_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define mmUVD_JRBC_RB_SIZE 0x0113 +#define mmUVD_JRBC_RB_SIZE_BASE_IDX 0 +#define mmUVD_JRBC_SCRATCH0 0x0114 +#define mmUVD_JRBC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_jrbc_enc_dec +// base address: 0x1e480 +#define mmUVD_JRBC_ENC_RB_WPTR 0x0120 +#define mmUVD_JRBC_ENC_RB_WPTR_BASE_IDX 0 +#define mmUVD_JRBC_ENC_RB_CNTL 0x0121 +#define mmUVD_JRBC_ENC_RB_CNTL_BASE_IDX 0 +#define mmUVD_JRBC_ENC_IB_SIZE 0x0122 +#define mmUVD_JRBC_ENC_IB_SIZE_BASE_IDX 0 +#define mmUVD_JRBC_ENC_URGENT_CNTL 0x0123 +#define mmUVD_JRBC_ENC_URGENT_CNTL_BASE_IDX 0 +#define mmUVD_JRBC_ENC_RB_REF_DATA 0x0124 +#define mmUVD_JRBC_ENC_RB_REF_DATA_BASE_IDX 0 +#define mmUVD_JRBC_ENC_RB_COND_RD_TIMER 0x0125 +#define mmUVD_JRBC_ENC_RB_COND_RD_TIMER_BASE_IDX 0 +#define mmUVD_JRBC_ENC_SOFT_RESET 0x0128 +#define mmUVD_JRBC_ENC_SOFT_RESET_BASE_IDX 0 +#define mmUVD_JRBC_ENC_STATUS 0x0129 +#define mmUVD_JRBC_ENC_STATUS_BASE_IDX 0 +#define mmUVD_JRBC_ENC_RB_RPTR 0x012a +#define mmUVD_JRBC_ENC_RB_RPTR_BASE_IDX 0 +#define mmUVD_JRBC_ENC_RB_BUF_STATUS 0x012b +#define mmUVD_JRBC_ENC_RB_BUF_STATUS_BASE_IDX 0 +#define mmUVD_JRBC_ENC_IB_BUF_STATUS 0x012c +#define mmUVD_JRBC_ENC_IB_BUF_STATUS_BASE_IDX 0 +#define mmUVD_JRBC_ENC_IB_SIZE_UPDATE 0x012d +#define mmUVD_JRBC_ENC_IB_SIZE_UPDATE_BASE_IDX 0 +#define mmUVD_JRBC_ENC_IB_COND_RD_TIMER 0x012e +#define mmUVD_JRBC_ENC_IB_COND_RD_TIMER_BASE_IDX 0 +#define mmUVD_JRBC_ENC_IB_REF_DATA 0x012f +#define mmUVD_JRBC_ENC_IB_REF_DATA_BASE_IDX 0 +#define mmUVD_JPEG_ENC_PREEMPT_CMD 0x0130 +#define mmUVD_JPEG_ENC_PREEMPT_CMD_BASE_IDX 0 +#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA0 0x0131 +#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA0_BASE_IDX 0 +#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA1 0x0132 +#define mmUVD_JPEG_ENC_PREEMPT_FENCE_DATA1_BASE_IDX 0 +#define mmUVD_JRBC_ENC_RB_SIZE 0x0133 +#define mmUVD_JRBC_ENC_RB_SIZE_BASE_IDX 0 +#define mmUVD_JRBC_ENC_SCRATCH0 0x0134 +#define mmUVD_JRBC_ENC_SCRATCH0_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_jmi_dec +// base address: 0x1e500 +#define mmUVD_JMI_CTRL 0x0145 +#define mmUVD_JMI_CTRL_BASE_IDX 0 +#define mmUVD_LMI_JRBC_CTRL 0x0146 +#define mmUVD_LMI_JRBC_CTRL_BASE_IDX 0 +#define mmUVD_LMI_JPEG_CTRL 0x0147 +#define mmUVD_LMI_JPEG_CTRL_BASE_IDX 0 +#define mmUVD_JMI_EJRBC_CTRL 0x0148 +#define mmUVD_JMI_EJRBC_CTRL_BASE_IDX 0 +#define mmUVD_LMI_EJPEG_CTRL 0x0149 +#define mmUVD_LMI_EJPEG_CTRL_BASE_IDX 0 +#define mmUVD_LMI_JRBC_IB_VMID 0x014f +#define mmUVD_LMI_JRBC_IB_VMID_BASE_IDX 0 +#define mmUVD_LMI_JRBC_RB_VMID 0x0150 +#define mmUVD_LMI_JRBC_RB_VMID_BASE_IDX 0 +#define mmUVD_LMI_JPEG_VMID 0x0151 +#define mmUVD_LMI_JPEG_VMID_BASE_IDX 0 +#define mmUVD_JMI_ENC_JRBC_IB_VMID 0x0152 +#define mmUVD_JMI_ENC_JRBC_IB_VMID_BASE_IDX 0 +#define mmUVD_JMI_ENC_JRBC_RB_VMID 0x0153 +#define mmUVD_JMI_ENC_JRBC_RB_VMID_BASE_IDX 0 +#define mmUVD_JMI_ENC_JPEG_VMID 0x0154 +#define mmUVD_JMI_ENC_JPEG_VMID_BASE_IDX 0 +#define mmUVD_JMI_PERFMON_CTRL 0x015c +#define mmUVD_JMI_PERFMON_CTRL_BASE_IDX 0 +#define mmUVD_JMI_PERFMON_COUNT_LO 0x015d +#define mmUVD_JMI_PERFMON_COUNT_LO_BASE_IDX 0 +#define mmUVD_JMI_PERFMON_COUNT_HI 0x015e +#define mmUVD_JMI_PERFMON_COUNT_HI_BASE_IDX 0 +#define mmUVD_LMI_JPEG_READ_64BIT_BAR_LOW 0x0160 +#define mmUVD_LMI_JPEG_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JPEG_READ_64BIT_BAR_HIGH 0x0161 +#define mmUVD_LMI_JPEG_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x0162 +#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x0163 +#define mmUVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x0164 +#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x0165 +#define mmUVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW 0x0166 +#define mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH 0x0167 +#define mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW 0x0168 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH 0x0169 +#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW 0x016a +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x016b +#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW 0x016c +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x016d +#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW 0x016e +#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x016f +#define mmUVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW 0x0170 +#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH 0x0171 +#define mmUVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW 0x017a +#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH 0x017b +#define mmUVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_LOW 0x017c +#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_HIGH 0x017d +#define mmUVD_LMI_EJRBC_RB_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_LOW 0x017e +#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_HIGH 0x017f +#define mmUVD_LMI_EJRBC_IB_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW 0x0180 +#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH 0x0181 +#define mmUVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW 0x0182 +#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH 0x0183 +#define mmUVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW 0x0184 +#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH 0x0185 +#define mmUVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW 0x0186 +#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH 0x0187 +#define mmUVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JPEG_PREEMPT_VMID 0x0188 +#define mmUVD_LMI_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define mmUVD_LMI_ENC_JPEG_PREEMPT_VMID 0x0189 +#define mmUVD_LMI_ENC_JPEG_PREEMPT_VMID_BASE_IDX 0 +#define mmUVD_LMI_JPEG2_VMID 0x018a +#define mmUVD_LMI_JPEG2_VMID_BASE_IDX 0 +#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_LOW 0x018b +#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_HIGH 0x018c +#define mmUVD_LMI_JPEG2_READ_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW 0x018d +#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH 0x018e +#define mmUVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_LMI_JPEG_CTRL2 0x018f +#define mmUVD_LMI_JPEG_CTRL2_BASE_IDX 0 +#define mmUVD_JMI_DEC_SWAP_CNTL 0x0190 +#define mmUVD_JMI_DEC_SWAP_CNTL_BASE_IDX 0 +#define mmUVD_JMI_ENC_SWAP_CNTL 0x0191 +#define mmUVD_JMI_ENC_SWAP_CNTL_BASE_IDX 0 +#define mmUVD_JMI_CNTL 0x0192 +#define mmUVD_JMI_CNTL_BASE_IDX 0 +#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_LOW 0x019a +#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_LOW_BASE_IDX 0 +#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH 0x019b +#define mmUVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH_BASE_IDX 0 +#define mmUVD_JMI_DEC_SWAP_CNTL2 0x019c +#define mmUVD_JMI_DEC_SWAP_CNTL2_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_jpeg_common_dec +// base address: 0x1e700 +#define mmJPEG_SOFT_RESET_STATUS 0x01c0 +#define mmJPEG_SOFT_RESET_STATUS_BASE_IDX 0 +#define mmJPEG_SYS_INT_EN 0x01c1 +#define mmJPEG_SYS_INT_EN_BASE_IDX 0 +#define mmJPEG_SYS_INT_STATUS 0x01c2 +#define mmJPEG_SYS_INT_STATUS_BASE_IDX 0 +#define mmJPEG_SYS_INT_ACK 0x01c3 +#define mmJPEG_SYS_INT_ACK_BASE_IDX 0 +#define mmJPEG_MASTINT_EN 0x01c8 +#define mmJPEG_MASTINT_EN_BASE_IDX 0 +#define mmJPEG_IH_CTRL 0x01c9 +#define mmJPEG_IH_CTRL_BASE_IDX 0 +#define mmJRBBM_ARB_CTRL 0x01cb +#define mmJRBBM_ARB_CTRL_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_jpeg_common_sclk_dec +// base address: 0x1e780 +#define mmJPEG_CGC_GATE 0x01e0 +#define mmJPEG_CGC_GATE_BASE_IDX 0 +#define mmJPEG_CGC_CTRL 0x01e1 +#define mmJPEG_CGC_CTRL_BASE_IDX 0 +#define mmJPEG_CGC_STATUS 0x01e2 +#define mmJPEG_CGC_STATUS_BASE_IDX 0 +#define mmJPEG_COMN_CGC_MEM_CTRL 0x01e3 +#define mmJPEG_COMN_CGC_MEM_CTRL_BASE_IDX 0 +#define mmJPEG_DEC_CGC_MEM_CTRL 0x01e4 +#define mmJPEG_DEC_CGC_MEM_CTRL_BASE_IDX 0 +#define mmJPEG2_DEC_CGC_MEM_CTRL 0x01e5 +#define mmJPEG2_DEC_CGC_MEM_CTRL_BASE_IDX 0 +#define mmJPEG_ENC_CGC_MEM_CTRL 0x01e6 +#define mmJPEG_ENC_CGC_MEM_CTRL_BASE_IDX 0 +#define mmJPEG_SOFT_RESET2 0x01e7 +#define mmJPEG_SOFT_RESET2_BASE_IDX 0 +#define mmJPEG_PERF_BANK_CONF 0x01e8 +#define mmJPEG_PERF_BANK_CONF_BASE_IDX 0 +#define mmJPEG_PERF_BANK_EVENT_SEL 0x01e9 +#define mmJPEG_PERF_BANK_EVENT_SEL_BASE_IDX 0 +#define mmJPEG_PERF_BANK_COUNT0 0x01ea +#define mmJPEG_PERF_BANK_COUNT0_BASE_IDX 0 +#define mmJPEG_PERF_BANK_COUNT1 0x01eb +#define mmJPEG_PERF_BANK_COUNT1_BASE_IDX 0 +#define mmJPEG_PERF_BANK_COUNT2 0x01ec +#define mmJPEG_PERF_BANK_COUNT2_BASE_IDX 0 +#define mmJPEG_PERF_BANK_COUNT3 0x01ed +#define mmJPEG_PERF_BANK_COUNT3_BASE_IDX 0 + + +// addressBlock: uvd0_uvd_pg_dec +// base address: 0x1f800 +#define mmUVD_PGFSM_CONFIG 0x0000 +#define mmUVD_PGFSM_CONFIG_BASE_IDX 1 +#define mmUVD_PGFSM_STATUS 0x0001 +#define mmUVD_PGFSM_STATUS_BASE_IDX 1 +#define mmUVD_POWER_STATUS 0x0004 +#define mmUVD_POWER_STATUS_BASE_IDX 1 +#define mmUVD_PG_IND_INDEX 0x0005 +#define mmUVD_PG_IND_INDEX_BASE_IDX 1 +#define mmUVD_PG_IND_DATA 0x0006 +#define mmUVD_PG_IND_DATA_BASE_IDX 1 +#define mmCC_UVD_HARVESTING 0x0007 +#define mmCC_UVD_HARVESTING_BASE_IDX 1 +#define mmUVD_JPEG_POWER_STATUS 0x000a +#define mmUVD_JPEG_POWER_STATUS_BASE_IDX 1 +#define mmUVD_DPG_LMA_CTL 0x0011 +#define mmUVD_DPG_LMA_CTL_BASE_IDX 1 +#define mmUVD_DPG_LMA_DATA 0x0012 +#define mmUVD_DPG_LMA_DATA_BASE_IDX 1 +#define mmUVD_DPG_LMA_MASK 0x0013 +#define mmUVD_DPG_LMA_MASK_BASE_IDX 1 +#define mmUVD_DPG_PAUSE 0x0014 +#define mmUVD_DPG_PAUSE_BASE_IDX 1 +#define mmUVD_SCRATCH1 0x0015 +#define mmUVD_SCRATCH1_BASE_IDX 1 +#define mmUVD_SCRATCH2 0x0016 +#define mmUVD_SCRATCH2_BASE_IDX 1 +#define mmUVD_SCRATCH3 0x0017 +#define mmUVD_SCRATCH3_BASE_IDX 1 +#define mmUVD_SCRATCH4 0x0018 +#define mmUVD_SCRATCH4_BASE_IDX 1 +#define mmUVD_SCRATCH5 0x0019 +#define mmUVD_SCRATCH5_BASE_IDX 1 +#define mmUVD_SCRATCH6 0x001a +#define mmUVD_SCRATCH6_BASE_IDX 1 +#define mmUVD_SCRATCH7 0x001b +#define mmUVD_SCRATCH7_BASE_IDX 1 +#define mmUVD_SCRATCH8 0x001c +#define mmUVD_SCRATCH8_BASE_IDX 1 +#define mmUVD_SCRATCH9 0x001d +#define mmUVD_SCRATCH9_BASE_IDX 1 +#define mmUVD_SCRATCH10 0x001e +#define mmUVD_SCRATCH10_BASE_IDX 1 +#define mmUVD_SCRATCH11 0x001f +#define mmUVD_SCRATCH11_BASE_IDX 1 +#define mmUVD_SCRATCH12 0x0020 +#define mmUVD_SCRATCH12_BASE_IDX 1 +#define mmUVD_SCRATCH13 0x0021 +#define mmUVD_SCRATCH13_BASE_IDX 1 +#define mmUVD_SCRATCH14 0x0022 +#define mmUVD_SCRATCH14_BASE_IDX 1 +#define mmUVD_FREE_COUNTER_REG 0x0024 +#define mmUVD_FREE_COUNTER_REG_BASE_IDX 1 +#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x0025 +#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x0026 +#define mmUVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_DPG_VCPU_CACHE_OFFSET0 0x0027 +#define mmUVD_DPG_VCPU_CACHE_OFFSET0_BASE_IDX 1 +#define mmUVD_DPG_LMI_VCPU_CACHE_VMID 0x0028 +#define mmUVD_DPG_LMI_VCPU_CACHE_VMID_BASE_IDX 1 +#define mmUVD_PF_STATUS 0x0039 +#define mmUVD_PF_STATUS_BASE_IDX 1 +#define mmUVD_DPG_CLK_EN_VCPU_REPORT 0x003c +#define mmUVD_DPG_CLK_EN_VCPU_REPORT_BASE_IDX 1 +#define mmUVD_GFX8_ADDR_CONFIG 0x0049 +#define mmUVD_GFX8_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_GFX10_ADDR_CONFIG 0x004a +#define mmUVD_GFX10_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_GPCNT2_CNTL 0x004b +#define mmUVD_GPCNT2_CNTL_BASE_IDX 1 +#define mmUVD_GPCNT2_TARGET_LOWER 0x004c +#define mmUVD_GPCNT2_TARGET_LOWER_BASE_IDX 1 +#define mmUVD_GPCNT2_STATUS_LOWER 0x004d +#define mmUVD_GPCNT2_STATUS_LOWER_BASE_IDX 1 +#define mmUVD_GPCNT2_TARGET_UPPER 0x004e +#define mmUVD_GPCNT2_TARGET_UPPER_BASE_IDX 1 +#define mmUVD_GPCNT2_STATUS_UPPER 0x004f +#define mmUVD_GPCNT2_STATUS_UPPER_BASE_IDX 1 +#define mmUVD_GPCNT3_CNTL 0x0050 +#define mmUVD_GPCNT3_CNTL_BASE_IDX 1 +#define mmUVD_GPCNT3_TARGET_LOWER 0x0051 +#define mmUVD_GPCNT3_TARGET_LOWER_BASE_IDX 1 +#define mmUVD_GPCNT3_STATUS_LOWER 0x0052 +#define mmUVD_GPCNT3_STATUS_LOWER_BASE_IDX 1 +#define mmUVD_GPCNT3_TARGET_UPPER 0x0053 +#define mmUVD_GPCNT3_TARGET_UPPER_BASE_IDX 1 +#define mmUVD_GPCNT3_STATUS_UPPER 0x0054 +#define mmUVD_GPCNT3_STATUS_UPPER_BASE_IDX 1 + + +// addressBlock: uvd0_uvddec +// base address: 0x1fa00 +#define mmUVD_STATUS 0x0080 +#define mmUVD_STATUS_BASE_IDX 1 +#define mmUVD_ENC_PIPE_BUSY 0x0081 +#define mmUVD_ENC_PIPE_BUSY_BASE_IDX 1 +#define mmUVD_SOFT_RESET 0x0084 +#define mmUVD_SOFT_RESET_BASE_IDX 1 +#define mmUVD_SOFT_RESET2 0x0085 +#define mmUVD_SOFT_RESET2_BASE_IDX 1 +#define mmUVD_MMSCH_SOFT_RESET 0x0086 +#define mmUVD_MMSCH_SOFT_RESET_BASE_IDX 1 +#define mmUVD_CGC_GATE 0x0088 +#define mmUVD_CGC_GATE_BASE_IDX 1 +#define mmUVD_CGC_STATUS 0x0089 +#define mmUVD_CGC_STATUS_BASE_IDX 1 +#define mmUVD_CGC_CTRL 0x008a +#define mmUVD_CGC_CTRL_BASE_IDX 1 +#define mmUVD_CGC_UDEC_STATUS 0x008b +#define mmUVD_CGC_UDEC_STATUS_BASE_IDX 1 +#define mmUVD_SUVD_CGC_GATE 0x008c +#define mmUVD_SUVD_CGC_GATE_BASE_IDX 1 +#define mmUVD_SUVD_CGC_STATUS 0x008d +#define mmUVD_SUVD_CGC_STATUS_BASE_IDX 1 +#define mmUVD_SUVD_CGC_CTRL 0x008e +#define mmUVD_SUVD_CGC_CTRL_BASE_IDX 1 +#define mmUVD_GPCOM_VCPU_CMD 0x008f +#define mmUVD_GPCOM_VCPU_CMD_BASE_IDX 1 +#define mmUVD_GPCOM_VCPU_DATA0 0x0090 +#define mmUVD_GPCOM_VCPU_DATA0_BASE_IDX 1 +#define mmUVD_GPCOM_VCPU_DATA1 0x0091 +#define mmUVD_GPCOM_VCPU_DATA1_BASE_IDX 1 +#define mmUVD_GPCOM_SYS_CMD 0x0092 +#define mmUVD_GPCOM_SYS_CMD_BASE_IDX 1 +#define mmUVD_GPCOM_SYS_DATA0 0x0093 +#define mmUVD_GPCOM_SYS_DATA0_BASE_IDX 1 +#define mmUVD_GPCOM_SYS_DATA1 0x0094 +#define mmUVD_GPCOM_SYS_DATA1_BASE_IDX 1 +#define mmUVD_VCPU_INT_EN 0x0095 +#define mmUVD_VCPU_INT_EN_BASE_IDX 1 +#define mmUVD_VCPU_INT_ACK 0x0097 +#define mmUVD_VCPU_INT_ACK_BASE_IDX 1 +#define mmUVD_VCPU_INT_ROUTE 0x0098 +#define mmUVD_VCPU_INT_ROUTE_BASE_IDX 1 +#define mmUVD_ENC_VCPU_INT_EN 0x009e +#define mmUVD_ENC_VCPU_INT_EN_BASE_IDX 1 +#define mmUVD_ENC_VCPU_INT_ACK 0x00a0 +#define mmUVD_ENC_VCPU_INT_ACK_BASE_IDX 1 +#define mmUVD_MASTINT_EN 0x00a1 +#define mmUVD_MASTINT_EN_BASE_IDX 1 +#define mmUVD_SYS_INT_EN 0x00a2 +#define mmUVD_SYS_INT_EN_BASE_IDX 1 +#define mmUVD_SYS_INT_STATUS 0x00a3 +#define mmUVD_SYS_INT_STATUS_BASE_IDX 1 +#define mmUVD_SYS_INT_ACK 0x00a4 +#define mmUVD_SYS_INT_ACK_BASE_IDX 1 +#define mmUVD_JOB_DONE 0x00a5 +#define mmUVD_JOB_DONE_BASE_IDX 1 +#define mmUVD_CBUF_ID 0x00a6 +#define mmUVD_CBUF_ID_BASE_IDX 1 +#define mmUVD_CONTEXT_ID 0x00a7 +#define mmUVD_CONTEXT_ID_BASE_IDX 1 +#define mmUVD_CONTEXT_ID2 0x00a8 +#define mmUVD_CONTEXT_ID2_BASE_IDX 1 +#define mmUVD_NO_OP 0x00a9 +#define mmUVD_NO_OP_BASE_IDX 1 +#define mmUVD_RB_BASE_LO 0x00aa +#define mmUVD_RB_BASE_LO_BASE_IDX 1 +#define mmUVD_RB_BASE_HI 0x00ab +#define mmUVD_RB_BASE_HI_BASE_IDX 1 +#define mmUVD_RB_SIZE 0x00ac +#define mmUVD_RB_SIZE_BASE_IDX 1 +#define mmUVD_RB_RPTR 0x00ad +#define mmUVD_RB_RPTR_BASE_IDX 1 +#define mmUVD_RB_WPTR 0x00ae +#define mmUVD_RB_WPTR_BASE_IDX 1 +#define mmUVD_RB_BASE_LO2 0x00af +#define mmUVD_RB_BASE_LO2_BASE_IDX 1 +#define mmUVD_RB_BASE_HI2 0x00b0 +#define mmUVD_RB_BASE_HI2_BASE_IDX 1 +#define mmUVD_RB_SIZE2 0x00b1 +#define mmUVD_RB_SIZE2_BASE_IDX 1 +#define mmUVD_RB_RPTR2 0x00b2 +#define mmUVD_RB_RPTR2_BASE_IDX 1 +#define mmUVD_RB_WPTR2 0x00b3 +#define mmUVD_RB_WPTR2_BASE_IDX 1 +#define mmUVD_RB_BASE_LO3 0x00b4 +#define mmUVD_RB_BASE_LO3_BASE_IDX 1 +#define mmUVD_RB_BASE_HI3 0x00b5 +#define mmUVD_RB_BASE_HI3_BASE_IDX 1 +#define mmUVD_RB_SIZE3 0x00b6 +#define mmUVD_RB_SIZE3_BASE_IDX 1 +#define mmUVD_RB_RPTR3 0x00b7 +#define mmUVD_RB_RPTR3_BASE_IDX 1 +#define mmUVD_RB_WPTR3 0x00b8 +#define mmUVD_RB_WPTR3_BASE_IDX 1 +#define mmUVD_RB_BASE_LO4 0x00b9 +#define mmUVD_RB_BASE_LO4_BASE_IDX 1 +#define mmUVD_RB_BASE_HI4 0x00ba +#define mmUVD_RB_BASE_HI4_BASE_IDX 1 +#define mmUVD_RB_SIZE4 0x00bb +#define mmUVD_RB_SIZE4_BASE_IDX 1 +#define mmUVD_RB_RPTR4 0x00bc +#define mmUVD_RB_RPTR4_BASE_IDX 1 +#define mmUVD_RB_WPTR4 0x00bd +#define mmUVD_RB_WPTR4_BASE_IDX 1 +#define mmUVD_OUT_RB_BASE_LO 0x00be +#define mmUVD_OUT_RB_BASE_LO_BASE_IDX 1 +#define mmUVD_OUT_RB_BASE_HI 0x00bf +#define mmUVD_OUT_RB_BASE_HI_BASE_IDX 1 +#define mmUVD_OUT_RB_SIZE 0x00c0 +#define mmUVD_OUT_RB_SIZE_BASE_IDX 1 +#define mmUVD_OUT_RB_RPTR 0x00c1 +#define mmUVD_OUT_RB_RPTR_BASE_IDX 1 +#define mmUVD_OUT_RB_WPTR 0x00c2 +#define mmUVD_OUT_RB_WPTR_BASE_IDX 1 +#define mmUVD_RB_ARB_CTRL 0x00c6 +#define mmUVD_RB_ARB_CTRL_BASE_IDX 1 +#define mmUVD_CTX_INDEX 0x00c7 +#define mmUVD_CTX_INDEX_BASE_IDX 1 +#define mmUVD_CTX_DATA 0x00c8 +#define mmUVD_CTX_DATA_BASE_IDX 1 +#define mmUVD_CXW_WR 0x00c9 +#define mmUVD_CXW_WR_BASE_IDX 1 +#define mmUVD_CXW_WR_INT_ID 0x00ca +#define mmUVD_CXW_WR_INT_ID_BASE_IDX 1 +#define mmUVD_CXW_WR_INT_CTX_ID 0x00cb +#define mmUVD_CXW_WR_INT_CTX_ID_BASE_IDX 1 +#define mmUVD_CXW_INT_ID 0x00cc +#define mmUVD_CXW_INT_ID_BASE_IDX 1 +#define mmUVD_TOP_CTRL 0x00cf +#define mmUVD_TOP_CTRL_BASE_IDX 1 +#define mmUVD_YBASE 0x00d0 +#define mmUVD_YBASE_BASE_IDX 1 +#define mmUVD_UVBASE 0x00d1 +#define mmUVD_UVBASE_BASE_IDX 1 +#define mmUVD_PITCH 0x00d2 +#define mmUVD_PITCH_BASE_IDX 1 +#define mmUVD_WIDTH 0x00d3 +#define mmUVD_WIDTH_BASE_IDX 1 +#define mmUVD_HEIGHT 0x00d4 +#define mmUVD_HEIGHT_BASE_IDX 1 +#define mmUVD_PICCOUNT 0x00d5 +#define mmUVD_PICCOUNT_BASE_IDX 1 +#define mmUVD_SCRATCH_NP 0x00db +#define mmUVD_SCRATCH_NP_BASE_IDX 1 +#define mmUVD_VERSION 0x00dd +#define mmUVD_VERSION_BASE_IDX 1 +#define mmUVD_GP_SCRATCH0 0x00de +#define mmUVD_GP_SCRATCH0_BASE_IDX 1 +#define mmUVD_GP_SCRATCH1 0x00df +#define mmUVD_GP_SCRATCH1_BASE_IDX 1 +#define mmUVD_GP_SCRATCH2 0x00e0 +#define mmUVD_GP_SCRATCH2_BASE_IDX 1 +#define mmUVD_GP_SCRATCH3 0x00e1 +#define mmUVD_GP_SCRATCH3_BASE_IDX 1 +#define mmUVD_GP_SCRATCH4 0x00e2 +#define mmUVD_GP_SCRATCH4_BASE_IDX 1 +#define mmUVD_GP_SCRATCH5 0x00e3 +#define mmUVD_GP_SCRATCH5_BASE_IDX 1 +#define mmUVD_GP_SCRATCH6 0x00e4 +#define mmUVD_GP_SCRATCH6_BASE_IDX 1 +#define mmUVD_GP_SCRATCH7 0x00e5 +#define mmUVD_GP_SCRATCH7_BASE_IDX 1 +#define mmUVD_GP_SCRATCH8 0x00e6 +#define mmUVD_GP_SCRATCH8_BASE_IDX 1 +#define mmUVD_GP_SCRATCH9 0x00e7 +#define mmUVD_GP_SCRATCH9_BASE_IDX 1 +#define mmUVD_GP_SCRATCH10 0x00e8 +#define mmUVD_GP_SCRATCH10_BASE_IDX 1 +#define mmUVD_GP_SCRATCH11 0x00e9 +#define mmUVD_GP_SCRATCH11_BASE_IDX 1 +#define mmUVD_GP_SCRATCH12 0x00ea +#define mmUVD_GP_SCRATCH12_BASE_IDX 1 +#define mmUVD_GP_SCRATCH13 0x00eb +#define mmUVD_GP_SCRATCH13_BASE_IDX 1 +#define mmUVD_GP_SCRATCH14 0x00ec +#define mmUVD_GP_SCRATCH14_BASE_IDX 1 +#define mmUVD_GP_SCRATCH15 0x00ed +#define mmUVD_GP_SCRATCH15_BASE_IDX 1 +#define mmUVD_GP_SCRATCH16 0x00ee +#define mmUVD_GP_SCRATCH16_BASE_IDX 1 +#define mmUVD_GP_SCRATCH17 0x00ef +#define mmUVD_GP_SCRATCH17_BASE_IDX 1 +#define mmUVD_GP_SCRATCH18 0x00f0 +#define mmUVD_GP_SCRATCH18_BASE_IDX 1 +#define mmUVD_GP_SCRATCH19 0x00f1 +#define mmUVD_GP_SCRATCH19_BASE_IDX 1 +#define mmUVD_GP_SCRATCH20 0x00f2 +#define mmUVD_GP_SCRATCH20_BASE_IDX 1 +#define mmUVD_GP_SCRATCH21 0x00f3 +#define mmUVD_GP_SCRATCH21_BASE_IDX 1 +#define mmUVD_GP_SCRATCH22 0x00f4 +#define mmUVD_GP_SCRATCH22_BASE_IDX 1 +#define mmUVD_GP_SCRATCH23 0x00f5 +#define mmUVD_GP_SCRATCH23_BASE_IDX 1 + + +// addressBlock: uvd0_ecpudec +// base address: 0x1fd00 +#define mmUVD_VCPU_CACHE_OFFSET0 0x0140 +#define mmUVD_VCPU_CACHE_OFFSET0_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE0 0x0141 +#define mmUVD_VCPU_CACHE_SIZE0_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET1 0x0142 +#define mmUVD_VCPU_CACHE_OFFSET1_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE1 0x0143 +#define mmUVD_VCPU_CACHE_SIZE1_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET2 0x0144 +#define mmUVD_VCPU_CACHE_OFFSET2_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE2 0x0145 +#define mmUVD_VCPU_CACHE_SIZE2_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET3 0x0146 +#define mmUVD_VCPU_CACHE_OFFSET3_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE3 0x0147 +#define mmUVD_VCPU_CACHE_SIZE3_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET4 0x0148 +#define mmUVD_VCPU_CACHE_OFFSET4_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE4 0x0149 +#define mmUVD_VCPU_CACHE_SIZE4_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET5 0x014a +#define mmUVD_VCPU_CACHE_OFFSET5_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE5 0x014b +#define mmUVD_VCPU_CACHE_SIZE5_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET6 0x014c +#define mmUVD_VCPU_CACHE_OFFSET6_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE6 0x014d +#define mmUVD_VCPU_CACHE_SIZE6_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET7 0x014e +#define mmUVD_VCPU_CACHE_OFFSET7_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE7 0x014f +#define mmUVD_VCPU_CACHE_SIZE7_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_OFFSET8 0x0150 +#define mmUVD_VCPU_CACHE_OFFSET8_BASE_IDX 1 +#define mmUVD_VCPU_CACHE_SIZE8 0x0151 +#define mmUVD_VCPU_CACHE_SIZE8_BASE_IDX 1 +#define mmUVD_VCPU_NONCACHE_OFFSET0 0x0152 +#define mmUVD_VCPU_NONCACHE_OFFSET0_BASE_IDX 1 +#define mmUVD_VCPU_NONCACHE_SIZE0 0x0153 +#define mmUVD_VCPU_NONCACHE_SIZE0_BASE_IDX 1 +#define mmUVD_VCPU_NONCACHE_OFFSET1 0x0154 +#define mmUVD_VCPU_NONCACHE_OFFSET1_BASE_IDX 1 +#define mmUVD_VCPU_NONCACHE_SIZE1 0x0155 +#define mmUVD_VCPU_NONCACHE_SIZE1_BASE_IDX 1 +#define mmUVD_VCPU_CNTL 0x0156 +#define mmUVD_VCPU_CNTL_BASE_IDX 1 +#define mmUVD_VCPU_PRID 0x0157 +#define mmUVD_VCPU_PRID_BASE_IDX 1 +#define mmUVD_VCPU_TRCE 0x0158 +#define mmUVD_VCPU_TRCE_BASE_IDX 1 +#define mmUVD_VCPU_TRCE_RD 0x0159 +#define mmUVD_VCPU_TRCE_RD_BASE_IDX 1 + + +// addressBlock: uvd0_uvd_mpcdec +// base address: 0x20310 +#define mmUVD_MP_SWAP_CNTL 0x02c4 +#define mmUVD_MP_SWAP_CNTL_BASE_IDX 1 +#define mmUVD_MP_SWAP_CNTL2 0x02c5 +#define mmUVD_MP_SWAP_CNTL2_BASE_IDX 1 +#define mmUVD_MPC_LUMA_SRCH 0x02c6 +#define mmUVD_MPC_LUMA_SRCH_BASE_IDX 1 +#define mmUVD_MPC_LUMA_HIT 0x02c7 +#define mmUVD_MPC_LUMA_HIT_BASE_IDX 1 +#define mmUVD_MPC_LUMA_HITPEND 0x02c8 +#define mmUVD_MPC_LUMA_HITPEND_BASE_IDX 1 +#define mmUVD_MPC_CHROMA_SRCH 0x02c9 +#define mmUVD_MPC_CHROMA_SRCH_BASE_IDX 1 +#define mmUVD_MPC_CHROMA_HIT 0x02ca +#define mmUVD_MPC_CHROMA_HIT_BASE_IDX 1 +#define mmUVD_MPC_CHROMA_HITPEND 0x02cb +#define mmUVD_MPC_CHROMA_HITPEND_BASE_IDX 1 +#define mmUVD_MPC_CNTL 0x02cc +#define mmUVD_MPC_CNTL_BASE_IDX 1 +#define mmUVD_MPC_PITCH 0x02cd +#define mmUVD_MPC_PITCH_BASE_IDX 1 +#define mmUVD_MPC_SET_MUXA0 0x02ce +#define mmUVD_MPC_SET_MUXA0_BASE_IDX 1 +#define mmUVD_MPC_SET_MUXA1 0x02cf +#define mmUVD_MPC_SET_MUXA1_BASE_IDX 1 +#define mmUVD_MPC_SET_MUXB0 0x02d0 +#define mmUVD_MPC_SET_MUXB0_BASE_IDX 1 +#define mmUVD_MPC_SET_MUXB1 0x02d1 +#define mmUVD_MPC_SET_MUXB1_BASE_IDX 1 +#define mmUVD_MPC_SET_MUX 0x02d2 +#define mmUVD_MPC_SET_MUX_BASE_IDX 1 +#define mmUVD_MPC_SET_ALU 0x02d3 +#define mmUVD_MPC_SET_ALU_BASE_IDX 1 +#define mmUVD_MPC_PERF0 0x02d4 +#define mmUVD_MPC_PERF0_BASE_IDX 1 +#define mmUVD_MPC_PERF1 0x02d5 +#define mmUVD_MPC_PERF1_BASE_IDX 1 + + +// addressBlock: uvd0_uvd_rbcdec +// base address: 0x20370 +#define mmUVD_RBC_IB_SIZE 0x02dc +#define mmUVD_RBC_IB_SIZE_BASE_IDX 1 +#define mmUVD_RBC_IB_SIZE_UPDATE 0x02dd +#define mmUVD_RBC_IB_SIZE_UPDATE_BASE_IDX 1 +#define mmUVD_RBC_RB_CNTL 0x02de +#define mmUVD_RBC_RB_CNTL_BASE_IDX 1 +#define mmUVD_RBC_RB_RPTR_ADDR 0x02df +#define mmUVD_RBC_RB_RPTR_ADDR_BASE_IDX 1 +#define mmUVD_RBC_RB_RPTR 0x02e0 +#define mmUVD_RBC_RB_RPTR_BASE_IDX 1 +#define mmUVD_RBC_RB_WPTR 0x02e1 +#define mmUVD_RBC_RB_WPTR_BASE_IDX 1 +#define mmUVD_RBC_VCPU_ACCESS 0x02e2 +#define mmUVD_RBC_VCPU_ACCESS_BASE_IDX 1 +#define mmUVD_RBC_READ_REQ_URGENT_CNTL 0x02e5 +#define mmUVD_RBC_READ_REQ_URGENT_CNTL_BASE_IDX 1 +#define mmUVD_RBC_RB_WPTR_CNTL 0x02e6 +#define mmUVD_RBC_RB_WPTR_CNTL_BASE_IDX 1 +#define mmUVD_RBC_WPTR_STATUS 0x02e7 +#define mmUVD_RBC_WPTR_STATUS_BASE_IDX 1 +#define mmUVD_RBC_WPTR_POLL_CNTL 0x02e8 +#define mmUVD_RBC_WPTR_POLL_CNTL_BASE_IDX 1 +#define mmUVD_RBC_WPTR_POLL_ADDR 0x02e9 +#define mmUVD_RBC_WPTR_POLL_ADDR_BASE_IDX 1 +#define mmUVD_SEMA_CMD 0x02ea +#define mmUVD_SEMA_CMD_BASE_IDX 1 +#define mmUVD_SEMA_ADDR_LOW 0x02eb +#define mmUVD_SEMA_ADDR_LOW_BASE_IDX 1 +#define mmUVD_SEMA_ADDR_HIGH 0x02ec +#define mmUVD_SEMA_ADDR_HIGH_BASE_IDX 1 +#define mmUVD_ENGINE_CNTL 0x02ed +#define mmUVD_ENGINE_CNTL_BASE_IDX 1 +#define mmUVD_SEMA_TIMEOUT_STATUS 0x02ee +#define mmUVD_SEMA_TIMEOUT_STATUS_BASE_IDX 1 +#define mmUVD_SEMA_CNTL 0x02ef +#define mmUVD_SEMA_CNTL_BASE_IDX 1 +#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0x02f0 +#define mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1 +#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0x02f1 +#define mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL_BASE_IDX 1 +#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0x02f2 +#define mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL_BASE_IDX 1 +#define mmUVD_JOB_START 0x02f3 +#define mmUVD_JOB_START_BASE_IDX 1 +#define mmUVD_RBC_BUF_STATUS 0x02f4 +#define mmUVD_RBC_BUF_STATUS_BASE_IDX 1 + + +// addressBlock: uvd0_uvdgendec +// base address: 0x20470 +#define mmUVD_LCM_CGC_CNTRL 0x033f +#define mmUVD_LCM_CGC_CNTRL_BASE_IDX 1 +#define mmUVD_MIF_CURR_UV_ADDR_CONFIG 0x03a0 +#define mmUVD_MIF_CURR_UV_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_REF_UV_ADDR_CONFIG 0x03a1 +#define mmUVD_MIF_REF_UV_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG 0x03a2 +#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_CURR_ADDR_CONFIG 0x03ae +#define mmUVD_MIF_CURR_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_REF_ADDR_CONFIG 0x03af +#define mmUVD_MIF_REF_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x03e1 +#define mmUVD_MIF_RECON1_ADDR_CONFIG_BASE_IDX 1 + + +// addressBlock: uvd0_lmi_adpdec +// base address: 0x20870 +#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW 0x0432 +#define mmUVD_LMI_RBC_RB_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH 0x0433 +#define mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW 0x0434 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH 0x0435 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW 0x0438 +#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH 0x0439 +#define mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_LOW 0x043a +#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH 0x043b +#define mmUVD_LMI_VCPU_NC1_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW 0x043c +#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH 0x043d +#define mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW 0x0468 +#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH 0x0469 +#define mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW 0x046a +#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH 0x046b +#define mmUVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW 0x046c +#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH 0x046d +#define mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW 0x046e +#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH 0x046f +#define mmUVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW 0x0470 +#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH 0x0471 +#define mmUVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW 0x0472 +#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH 0x0473 +#define mmUVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW 0x0474 +#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH 0x0475 +#define mmUVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW 0x0476 +#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH 0x0477 +#define mmUVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_SPH_64BIT_BAR_HIGH 0x047c +#define mmUVD_LMI_SPH_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW 0x047d +#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH 0x047e +#define mmUVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW 0x047f +#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH 0x0480 +#define mmUVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW 0x0481 +#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH 0x0482 +#define mmUVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW 0x0483 +#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH 0x0484 +#define mmUVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW 0x0485 +#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH 0x0486 +#define mmUVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW 0x0487 +#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH 0x0488 +#define mmUVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW 0x0489 +#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH 0x048a +#define mmUVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW 0x048b +#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_LOW_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH 0x048c +#define mmUVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_NC_VMID 0x048d +#define mmUVD_LMI_MMSCH_NC_VMID_BASE_IDX 1 +#define mmUVD_LMI_MMSCH_CTRL 0x048e +#define mmUVD_LMI_MMSCH_CTRL_BASE_IDX 1 +#define mmUVD_LMI_ARB_CTRL2 0x049a +#define mmUVD_LMI_ARB_CTRL2_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE_VMIDS_MULTI 0x049f +#define mmUVD_LMI_VCPU_CACHE_VMIDS_MULTI_BASE_IDX 1 +#define mmUVD_LMI_VCPU_NC_VMIDS_MULTI 0x04a0 +#define mmUVD_LMI_VCPU_NC_VMIDS_MULTI_BASE_IDX 1 +#define mmUVD_LMI_LAT_CTRL 0x04a1 +#define mmUVD_LMI_LAT_CTRL_BASE_IDX 1 +#define mmUVD_LMI_LAT_CNTR 0x04a2 +#define mmUVD_LMI_LAT_CNTR_BASE_IDX 1 +#define mmUVD_LMI_AVG_LAT_CNTR 0x04a3 +#define mmUVD_LMI_AVG_LAT_CNTR_BASE_IDX 1 +#define mmUVD_LMI_SPH 0x04a4 +#define mmUVD_LMI_SPH_BASE_IDX 1 +#define mmUVD_LMI_VCPU_CACHE_VMID 0x04a5 +#define mmUVD_LMI_VCPU_CACHE_VMID_BASE_IDX 1 +#define mmUVD_LMI_CTRL2 0x04a6 +#define mmUVD_LMI_CTRL2_BASE_IDX 1 +#define mmUVD_LMI_URGENT_CTRL 0x04a7 +#define mmUVD_LMI_URGENT_CTRL_BASE_IDX 1 +#define mmUVD_LMI_CTRL 0x04a8 +#define mmUVD_LMI_CTRL_BASE_IDX 1 +#define mmUVD_LMI_STATUS 0x04a9 +#define mmUVD_LMI_STATUS_BASE_IDX 1 +#define mmUVD_LMI_PERFMON_CTRL 0x04ac +#define mmUVD_LMI_PERFMON_CTRL_BASE_IDX 1 +#define mmUVD_LMI_PERFMON_COUNT_LO 0x04ad +#define mmUVD_LMI_PERFMON_COUNT_LO_BASE_IDX 1 +#define mmUVD_LMI_PERFMON_COUNT_HI 0x04ae +#define mmUVD_LMI_PERFMON_COUNT_HI_BASE_IDX 1 +#define mmUVD_LMI_RBC_RB_VMID 0x04b0 +#define mmUVD_LMI_RBC_RB_VMID_BASE_IDX 1 +#define mmUVD_LMI_RBC_IB_VMID 0x04b1 +#define mmUVD_LMI_RBC_IB_VMID_BASE_IDX 1 +#define mmUVD_LMI_MC_CREDITS 0x04b2 +#define mmUVD_LMI_MC_CREDITS_BASE_IDX 1 + + +// addressBlock: uvd0_uvdnpdec +// base address: 0x20bd0 +#define mmMDM_DMA_CMD 0x06f4 +#define mmMDM_DMA_CMD_BASE_IDX 1 +#define mmMDM_DMA_STATUS 0x06f5 +#define mmMDM_DMA_STATUS_BASE_IDX 1 +#define mmMDM_DMA_CTL 0x06f6 +#define mmMDM_DMA_CTL_BASE_IDX 1 +#define mmMDM_ENC_PIPE_BUSY 0x06f7 +#define mmMDM_ENC_PIPE_BUSY_BASE_IDX 1 +#define mmMDM_WIG_PIPE_BUSY 0x06f9 +#define mmMDM_WIG_PIPE_BUSY_BASE_IDX 1 + + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h new file mode 100644 index 000000000000..c41c59c30006 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h @@ -0,0 +1,3609 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _vcn_2_5_SH_MASK_HEADER +#define _vcn_2_5_SH_MASK_HEADER + +// addressBlock: uvd0_mmsch_dec +//MMSCH_UCODE_ADDR +#define MMSCH_UCODE_ADDR__UCODE_ADDR__SHIFT 0x2 +#define MMSCH_UCODE_ADDR__UCODE_LOCK__SHIFT 0x1f +#define MMSCH_UCODE_ADDR__UCODE_ADDR_MASK 0x00003FFCL +#define MMSCH_UCODE_ADDR__UCODE_LOCK_MASK 0x80000000L +//MMSCH_UCODE_DATA +#define MMSCH_UCODE_DATA__UCODE_DATA__SHIFT 0x0 +#define MMSCH_UCODE_DATA__UCODE_DATA_MASK 0xFFFFFFFFL +//MMSCH_SRAM_ADDR +#define MMSCH_SRAM_ADDR__SRAM_ADDR__SHIFT 0x2 +#define MMSCH_SRAM_ADDR__SRAM_LOCK__SHIFT 0x1f +#define MMSCH_SRAM_ADDR__SRAM_ADDR_MASK 0x00001FFCL +#define MMSCH_SRAM_ADDR__SRAM_LOCK_MASK 0x80000000L +//MMSCH_SRAM_DATA +#define MMSCH_SRAM_DATA__SRAM_DATA__SHIFT 0x0 +#define MMSCH_SRAM_DATA__SRAM_DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_SRAM_OFFSET +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET__SHIFT 0x2 +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF__SHIFT 0x10 +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_OFFSET_MASK 0x00001FFCL +#define MMSCH_VF_SRAM_OFFSET__VF_SRAM_NUM_DW_PER_VF_MASK 0x00FF0000L +//MMSCH_DB_SRAM_OFFSET +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET__SHIFT 0x2 +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG__SHIFT 0x10 +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG__SHIFT 0x18 +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_OFFSET_MASK 0x00001FFCL +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_ENG_MASK 0x00FF0000L +#define MMSCH_DB_SRAM_OFFSET__DB_SRAM_NUM_RING_PER_ENG_MASK 0xFF000000L +//MMSCH_CTX_SRAM_OFFSET +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET__SHIFT 0x2 +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE__SHIFT 0x10 +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_OFFSET_MASK 0x00001FFCL +#define MMSCH_CTX_SRAM_OFFSET__CTX_SRAM_SIZE_MASK 0xFFFF0000L +//MMSCH_CTL +#define MMSCH_CTL__P_RUNSTALL__SHIFT 0x0 +#define MMSCH_CTL__P_RESET__SHIFT 0x1 +#define MMSCH_CTL__VFID_FIFO_EN__SHIFT 0x4 +#define MMSCH_CTL__P_LOCK__SHIFT 0x1f +#define MMSCH_CTL__P_RUNSTALL_MASK 0x00000001L +#define MMSCH_CTL__P_RESET_MASK 0x00000002L +#define MMSCH_CTL__VFID_FIFO_EN_MASK 0x00000010L +#define MMSCH_CTL__P_LOCK_MASK 0x80000000L +//MMSCH_INTR +#define MMSCH_INTR__INTR__SHIFT 0x0 +#define MMSCH_INTR__INTR_MASK 0x00001FFFL +//MMSCH_INTR_ACK +#define MMSCH_INTR_ACK__INTR__SHIFT 0x0 +#define MMSCH_INTR_ACK__INTR_MASK 0x00001FFFL +//MMSCH_INTR_STATUS +#define MMSCH_INTR_STATUS__INTR__SHIFT 0x0 +#define MMSCH_INTR_STATUS__INTR_MASK 0x00001FFFL +//MMSCH_VF_VMID +#define MMSCH_VF_VMID__VF_CTX_VMID__SHIFT 0x0 +#define MMSCH_VF_VMID__VF_GPCOM_VMID__SHIFT 0x5 +#define MMSCH_VF_VMID__VF_CTX_VMID_MASK 0x0000001FL +#define MMSCH_VF_VMID__VF_GPCOM_VMID_MASK 0x000003E0L +//MMSCH_VF_CTX_ADDR_LO +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO__SHIFT 0x6 +#define MMSCH_VF_CTX_ADDR_LO__VF_CTX_ADDR_LO_MASK 0xFFFFFFC0L +//MMSCH_VF_CTX_ADDR_HI +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI__SHIFT 0x0 +#define MMSCH_VF_CTX_ADDR_HI__VF_CTX_ADDR_HI_MASK 0xFFFFFFFFL +//MMSCH_VF_CTX_SIZE +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE__SHIFT 0x0 +#define MMSCH_VF_CTX_SIZE__VF_CTX_SIZE_MASK 0xFFFFFFFFL +//MMSCH_VF_GPCOM_ADDR_LO +#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO__SHIFT 0x6 +#define MMSCH_VF_GPCOM_ADDR_LO__VF_GPCOM_ADDR_LO_MASK 0xFFFFFFC0L +//MMSCH_VF_GPCOM_ADDR_HI +#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI__SHIFT 0x0 +#define MMSCH_VF_GPCOM_ADDR_HI__VF_GPCOM_ADDR_HI_MASK 0xFFFFFFFFL +//MMSCH_VF_GPCOM_SIZE +#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE__SHIFT 0x0 +#define MMSCH_VF_GPCOM_SIZE__VF_GPCOM_SIZE_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_HOST +#define MMSCH_VF_MAILBOX_HOST__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_HOST__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_RESP +#define MMSCH_VF_MAILBOX_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_RESP__RESP_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_0 +#define MMSCH_VF_MAILBOX_0__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_0_RESP +#define MMSCH_VF_MAILBOX_0_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_0_RESP__RESP_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_1 +#define MMSCH_VF_MAILBOX_1__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX_1_RESP +#define MMSCH_VF_MAILBOX_1_RESP__RESP__SHIFT 0x0 +#define MMSCH_VF_MAILBOX_1_RESP__RESP_MASK 0xFFFFFFFFL +//MMSCH_CNTL +#define MMSCH_CNTL__CLK_EN__SHIFT 0x0 +#define MMSCH_CNTL__ED_ENABLE__SHIFT 0x1 +#define MMSCH_CNTL__MMSCH_IRQ_ERR__SHIFT 0x5 +#define MMSCH_CNTL__MMSCH_NACK_INTR_EN__SHIFT 0x9 +#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN__SHIFT 0xa +#define MMSCH_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14 +#define MMSCH_CNTL__TIMEOUT_DIS__SHIFT 0x1c +#define MMSCH_CNTL__CLK_EN_MASK 0x00000001L +#define MMSCH_CNTL__ED_ENABLE_MASK 0x00000002L +#define MMSCH_CNTL__MMSCH_IRQ_ERR_MASK 0x000001E0L +#define MMSCH_CNTL__MMSCH_NACK_INTR_EN_MASK 0x00000200L +#define MMSCH_CNTL__MMSCH_DB_BUSY_INTR_EN_MASK 0x00000400L +#define MMSCH_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L +#define MMSCH_CNTL__TIMEOUT_DIS_MASK 0x10000000L +//MMSCH_NONCACHE_OFFSET0 +#define MMSCH_NONCACHE_OFFSET0__OFFSET__SHIFT 0x0 +#define MMSCH_NONCACHE_OFFSET0__OFFSET_MASK 0x0FFFFFFFL +//MMSCH_NONCACHE_SIZE0 +#define MMSCH_NONCACHE_SIZE0__SIZE__SHIFT 0x0 +#define MMSCH_NONCACHE_SIZE0__SIZE_MASK 0x00FFFFFFL +//MMSCH_NONCACHE_OFFSET1 +#define MMSCH_NONCACHE_OFFSET1__OFFSET__SHIFT 0x0 +#define MMSCH_NONCACHE_OFFSET1__OFFSET_MASK 0x0FFFFFFFL +//MMSCH_NONCACHE_SIZE1 +#define MMSCH_NONCACHE_SIZE1__SIZE__SHIFT 0x0 +#define MMSCH_NONCACHE_SIZE1__SIZE_MASK 0x00FFFFFFL +//MMSCH_PROC_STATE1 +#define MMSCH_PROC_STATE1__PC__SHIFT 0x0 +#define MMSCH_PROC_STATE1__PC_MASK 0xFFFFFFFFL +//MMSCH_LAST_MC_ADDR +#define MMSCH_LAST_MC_ADDR__MC_ADDR__SHIFT 0x0 +#define MMSCH_LAST_MC_ADDR__RW__SHIFT 0x1f +#define MMSCH_LAST_MC_ADDR__MC_ADDR_MASK 0x0FFFFFFFL +#define MMSCH_LAST_MC_ADDR__RW_MASK 0x80000000L +//MMSCH_LAST_MEM_ACCESS_HI +#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD__SHIFT 0x0 +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR__SHIFT 0x8 +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR__SHIFT 0xc +#define MMSCH_LAST_MEM_ACCESS_HI__PROC_CMD_MASK 0x00000007L +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_RPTR_MASK 0x00000700L +#define MMSCH_LAST_MEM_ACCESS_HI__FIFO_WPTR_MASK 0x00007000L +//MMSCH_LAST_MEM_ACCESS_LO +#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR__SHIFT 0x0 +#define MMSCH_LAST_MEM_ACCESS_LO__PROC_ADDR_MASK 0xFFFFFFFFL +//MMSCH_IOV_ACTIVE_FCN_ID +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID__SHIFT 0x0 +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF__SHIFT 0x1f +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_VF_ID_MASK 0x0000001FL +#define MMSCH_IOV_ACTIVE_FCN_ID__ACTIVE_PF_VF_MASK 0x80000000L +//MMSCH_SCRATCH_0 +#define MMSCH_SCRATCH_0__SCRATCH_0__SHIFT 0x0 +#define MMSCH_SCRATCH_0__SCRATCH_0_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_1 +#define MMSCH_SCRATCH_1__SCRATCH_1__SHIFT 0x0 +#define MMSCH_SCRATCH_1__SCRATCH_1_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_0 +#define MMSCH_GPUIOV_SCH_BLOCK_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_0__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_0__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_0__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_CONTROL_0 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE__SHIFT 0x4 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN__SHIFT 0x5 +#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN__SHIFT 0x6 +#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID__SHIFT 0x8 +#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID__SHIFT 0x10 +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_TYPE_MASK 0x0000000FL +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_MASK 0x00000010L +#define MMSCH_GPUIOV_CMD_CONTROL_0__CMD_EXECUTE_INTR_EN_MASK 0x00000020L +#define MMSCH_GPUIOV_CMD_CONTROL_0__VM_BUSY_INTR_EN_MASK 0x00000040L +#define MMSCH_GPUIOV_CMD_CONTROL_0__FUNCTINO_ID_MASK 0x0000FF00L +#define MMSCH_GPUIOV_CMD_CONTROL_0__NEXT_FUNCTINO_ID_MASK 0x00FF0000L +//MMSCH_GPUIOV_CMD_STATUS_0 +#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_0__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_VM_BUSY_STATUS_0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCNS_0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_0__ACTIVE_FCNS_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_0__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_DW6_0 +#define MMSCH_GPUIOV_DW6_0__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW6_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW7_0 +#define MMSCH_GPUIOV_DW7_0__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW7_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW8_0 +#define MMSCH_GPUIOV_DW8_0__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW8_0__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_1 +#define MMSCH_GPUIOV_SCH_BLOCK_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_1__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_1__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_1__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_CONTROL_1 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE__SHIFT 0x4 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN__SHIFT 0x5 +#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN__SHIFT 0x6 +#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID__SHIFT 0x8 +#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID__SHIFT 0x10 +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_TYPE_MASK 0x0000000FL +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_MASK 0x00000010L +#define MMSCH_GPUIOV_CMD_CONTROL_1__CMD_EXECUTE_INTR_EN_MASK 0x00000020L +#define MMSCH_GPUIOV_CMD_CONTROL_1__VM_BUSY_INTR_EN_MASK 0x00000040L +#define MMSCH_GPUIOV_CMD_CONTROL_1__FUNCTINO_ID_MASK 0x0000FF00L +#define MMSCH_GPUIOV_CMD_CONTROL_1__NEXT_FUNCTINO_ID_MASK 0x00FF0000L +//MMSCH_GPUIOV_CMD_STATUS_1 +#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_1__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_VM_BUSY_STATUS_1 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCNS_1 +#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_1__ACTIVE_FCNS_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_1 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_1__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_DW6_1 +#define MMSCH_GPUIOV_DW6_1__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW6_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW7_1 +#define MMSCH_GPUIOV_DW7_1__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW7_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW8_1 +#define MMSCH_GPUIOV_DW8_1__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW8_1__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_CNTXT +#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE__SHIFT 0x0 +#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION__SHIFT 0x7 +#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET__SHIFT 0xa +#define MMSCH_GPUIOV_CNTXT__CNTXT_SIZE_MASK 0x0000007FL +#define MMSCH_GPUIOV_CNTXT__CNTXT_LOCATION_MASK 0x00000080L +#define MMSCH_GPUIOV_CNTXT__CNTXT_OFFSET_MASK 0xFFFFFC00L +//MMSCH_SCRATCH_2 +#define MMSCH_SCRATCH_2__SCRATCH_2__SHIFT 0x0 +#define MMSCH_SCRATCH_2__SCRATCH_2_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_3 +#define MMSCH_SCRATCH_3__SCRATCH_3__SHIFT 0x0 +#define MMSCH_SCRATCH_3__SCRATCH_3_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_4 +#define MMSCH_SCRATCH_4__SCRATCH_4__SHIFT 0x0 +#define MMSCH_SCRATCH_4__SCRATCH_4_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_5 +#define MMSCH_SCRATCH_5__SCRATCH_5__SHIFT 0x0 +#define MMSCH_SCRATCH_5__SCRATCH_5_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_6 +#define MMSCH_SCRATCH_6__SCRATCH_6__SHIFT 0x0 +#define MMSCH_SCRATCH_6__SCRATCH_6_MASK 0xFFFFFFFFL +//MMSCH_SCRATCH_7 +#define MMSCH_SCRATCH_7__SCRATCH_7__SHIFT 0x0 +#define MMSCH_SCRATCH_7__SCRATCH_7_MASK 0xFFFFFFFFL +//MMSCH_VFID_FIFO_HEAD_0 +#define MMSCH_VFID_FIFO_HEAD_0__HEAD__SHIFT 0x0 +#define MMSCH_VFID_FIFO_HEAD_0__HEAD_MASK 0x0000003FL +//MMSCH_VFID_FIFO_TAIL_0 +#define MMSCH_VFID_FIFO_TAIL_0__TAIL__SHIFT 0x0 +#define MMSCH_VFID_FIFO_TAIL_0__TAIL_MASK 0x0000003FL +//MMSCH_VFID_FIFO_HEAD_1 +#define MMSCH_VFID_FIFO_HEAD_1__HEAD__SHIFT 0x0 +#define MMSCH_VFID_FIFO_HEAD_1__HEAD_MASK 0x0000003FL +//MMSCH_VFID_FIFO_TAIL_1 +#define MMSCH_VFID_FIFO_TAIL_1__TAIL__SHIFT 0x0 +#define MMSCH_VFID_FIFO_TAIL_1__TAIL_MASK 0x0000003FL +//MMSCH_NACK_STATUS +#define MMSCH_NACK_STATUS__WR_NACK_STATUS__SHIFT 0x0 +#define MMSCH_NACK_STATUS__RD_NACK_STATUS__SHIFT 0x2 +#define MMSCH_NACK_STATUS__WR_NACK_STATUS_MASK 0x00000003L +#define MMSCH_NACK_STATUS__RD_NACK_STATUS_MASK 0x0000000CL +//MMSCH_VF_MAILBOX0_DATA +#define MMSCH_VF_MAILBOX0_DATA__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX0_DATA__DATA_MASK 0xFFFFFFFFL +//MMSCH_VF_MAILBOX1_DATA +#define MMSCH_VF_MAILBOX1_DATA__DATA__SHIFT 0x0 +#define MMSCH_VF_MAILBOX1_DATA__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_IP_0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_IP_0__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_STATUS_IP_0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_0__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_SCH_BLOCK_IP_1 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_IP_1__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_STATUS_IP_1 +#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_1__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_CNTXT_IP +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE__SHIFT 0x0 +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION__SHIFT 0x7 +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_SIZE_MASK 0x0000007FL +#define MMSCH_GPUIOV_CNTXT_IP__CNTXT_LOCATION_MASK 0x00000080L +//MMSCH_GPUIOV_SCH_BLOCK_2 +#define MMSCH_GPUIOV_SCH_BLOCK_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_2__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_2__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_2__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_CONTROL_2 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE__SHIFT 0x4 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN__SHIFT 0x5 +#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN__SHIFT 0x6 +#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID__SHIFT 0x8 +#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID__SHIFT 0x10 +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_TYPE_MASK 0x0000000FL +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_MASK 0x00000010L +#define MMSCH_GPUIOV_CMD_CONTROL_2__CMD_EXECUTE_INTR_EN_MASK 0x00000020L +#define MMSCH_GPUIOV_CMD_CONTROL_2__VM_BUSY_INTR_EN_MASK 0x00000040L +#define MMSCH_GPUIOV_CMD_CONTROL_2__FUNCTINO_ID_MASK 0x0000FF00L +#define MMSCH_GPUIOV_CMD_CONTROL_2__NEXT_FUNCTINO_ID_MASK 0x00FF0000L +//MMSCH_GPUIOV_CMD_STATUS_2 +#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_2__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_VM_BUSY_STATUS_2 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0 +#define MMSCH_GPUIOV_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCNS_2 +#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCNS_2__ACTIVE_FCNS_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_2 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_2__ID_STATUS_MASK 0x00000F00L +//MMSCH_GPUIOV_DW6_2 +#define MMSCH_GPUIOV_DW6_2__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW6_2__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW7_2 +#define MMSCH_GPUIOV_DW7_2__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW7_2__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_DW8_2 +#define MMSCH_GPUIOV_DW8_2__DATA__SHIFT 0x0 +#define MMSCH_GPUIOV_DW8_2__DATA_MASK 0xFFFFFFFFL +//MMSCH_GPUIOV_SCH_BLOCK_IP_2 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION__SHIFT 0x4 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE__SHIFT 0x8 +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__ID_MASK 0x0000000FL +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__VERSION_MASK 0x000000F0L +#define MMSCH_GPUIOV_SCH_BLOCK_IP_2__SIZE_MASK 0x0000FF00L +//MMSCH_GPUIOV_CMD_STATUS_IP_2 +#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS__SHIFT 0x0 +#define MMSCH_GPUIOV_CMD_STATUS_IP_2__CMD_STATUS_MASK 0x0000000FL +//MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID__SHIFT 0x0 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS__SHIFT 0x8 +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_MASK 0x000000FFL +#define MMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2__ID_STATUS_MASK 0x00000F00L +//MMSCH_VFID_FIFO_HEAD_2 +#define MMSCH_VFID_FIFO_HEAD_2__HEAD__SHIFT 0x0 +#define MMSCH_VFID_FIFO_HEAD_2__HEAD_MASK 0x0000003FL +//MMSCH_VFID_FIFO_TAIL_2 +#define MMSCH_VFID_FIFO_TAIL_2__TAIL__SHIFT 0x0 +#define MMSCH_VFID_FIFO_TAIL_2__TAIL_MASK 0x0000003FL +//MMSCH_VM_BUSY_STATUS_0 +#define MMSCH_VM_BUSY_STATUS_0__BUSY__SHIFT 0x0 +#define MMSCH_VM_BUSY_STATUS_0__BUSY_MASK 0xFFFFFFFFL +//MMSCH_VM_BUSY_STATUS_1 +#define MMSCH_VM_BUSY_STATUS_1__BUSY__SHIFT 0x0 +#define MMSCH_VM_BUSY_STATUS_1__BUSY_MASK 0xFFFFFFFFL +//MMSCH_VM_BUSY_STATUS_2 +#define MMSCH_VM_BUSY_STATUS_2__BUSY__SHIFT 0x0 +#define MMSCH_VM_BUSY_STATUS_2__BUSY_MASK 0xFFFFFFFFL + + +// addressBlock: uvd0_jpegnpdec +//UVD_JPEG_CNTL +#define UVD_JPEG_CNTL__REQUEST_EN__SHIFT 0x1 +#define UVD_JPEG_CNTL__ERR_RST_EN__SHIFT 0x2 +#define UVD_JPEG_CNTL__HUFF_SPEED_EN__SHIFT 0x3 +#define UVD_JPEG_CNTL__HUFF_SPEED_STATUS__SHIFT 0x4 +#define UVD_JPEG_CNTL__DBG_MUX_SEL__SHIFT 0x8 +#define UVD_JPEG_CNTL__REQUEST_EN_MASK 0x00000002L +#define UVD_JPEG_CNTL__ERR_RST_EN_MASK 0x00000004L +#define UVD_JPEG_CNTL__HUFF_SPEED_EN_MASK 0x00000008L +#define UVD_JPEG_CNTL__HUFF_SPEED_STATUS_MASK 0x00000010L +#define UVD_JPEG_CNTL__DBG_MUX_SEL_MASK 0x00007F00L +//UVD_JPEG_RB_BASE +#define UVD_JPEG_RB_BASE__RB_BYTE_OFF__SHIFT 0x0 +#define UVD_JPEG_RB_BASE__RB_BASE__SHIFT 0x6 +#define UVD_JPEG_RB_BASE__RB_BYTE_OFF_MASK 0x0000003FL +#define UVD_JPEG_RB_BASE__RB_BASE_MASK 0xFFFFFFC0L +//UVD_JPEG_RB_WPTR +#define UVD_JPEG_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JPEG_RB_WPTR__RB_WPTR_MASK 0x3FFFFFF0L +//UVD_JPEG_RB_RPTR +#define UVD_JPEG_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JPEG_RB_RPTR__RB_RPTR_MASK 0x3FFFFFF0L +//UVD_JPEG_RB_SIZE +#define UVD_JPEG_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JPEG_RB_SIZE__RB_SIZE_MASK 0x3FFFFFF0L +//UVD_JPEG_DEC_SCRATCH0 +#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JPEG_DEC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL +//UVD_JPEG_INT_EN +#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN__SHIFT 0x0 +#define UVD_JPEG_INT_EN__JOB_AVAIL_EN__SHIFT 0x1 +#define UVD_JPEG_INT_EN__FENCE_VAL_EN__SHIFT 0x2 +#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN__SHIFT 0x6 +#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN__SHIFT 0x7 +#define UVD_JPEG_INT_EN__EOI_ERR_EN__SHIFT 0x8 +#define UVD_JPEG_INT_EN__HFM_ERR_EN__SHIFT 0x9 +#define UVD_JPEG_INT_EN__RST_ERR_EN__SHIFT 0xa +#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN__SHIFT 0xb +#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN__SHIFT 0xc +#define UVD_JPEG_INT_EN__MARKER_ERR_EN__SHIFT 0xd +#define UVD_JPEG_INT_EN__FMT_ERR_EN__SHIFT 0xe +#define UVD_JPEG_INT_EN__PROFILE_ERR_EN__SHIFT 0xf +#define UVD_JPEG_INT_EN__OUTBUF_WPTR_INC_EN_MASK 0x00000001L +#define UVD_JPEG_INT_EN__JOB_AVAIL_EN_MASK 0x00000002L +#define UVD_JPEG_INT_EN__FENCE_VAL_EN_MASK 0x00000004L +#define UVD_JPEG_INT_EN__FIFO_OVERFLOW_ERR_EN_MASK 0x00000040L +#define UVD_JPEG_INT_EN__BLK_CNT_OUT_OF_SYNC_ERR_EN_MASK 0x00000080L +#define UVD_JPEG_INT_EN__EOI_ERR_EN_MASK 0x00000100L +#define UVD_JPEG_INT_EN__HFM_ERR_EN_MASK 0x00000200L +#define UVD_JPEG_INT_EN__RST_ERR_EN_MASK 0x00000400L +#define UVD_JPEG_INT_EN__ECS_MK_ERR_EN_MASK 0x00000800L +#define UVD_JPEG_INT_EN__TIMEOUT_ERR_EN_MASK 0x00001000L +#define UVD_JPEG_INT_EN__MARKER_ERR_EN_MASK 0x00002000L +#define UVD_JPEG_INT_EN__FMT_ERR_EN_MASK 0x00004000L +#define UVD_JPEG_INT_EN__PROFILE_ERR_EN_MASK 0x00008000L +//UVD_JPEG_INT_STAT +#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT__SHIFT 0x0 +#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT__SHIFT 0x1 +#define UVD_JPEG_INT_STAT__FENCE_VAL_INT__SHIFT 0x2 +#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT__SHIFT 0x6 +#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT__SHIFT 0x7 +#define UVD_JPEG_INT_STAT__EOI_ERR_INT__SHIFT 0x8 +#define UVD_JPEG_INT_STAT__HFM_ERR_INT__SHIFT 0x9 +#define UVD_JPEG_INT_STAT__RST_ERR_INT__SHIFT 0xa +#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT__SHIFT 0xb +#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT__SHIFT 0xc +#define UVD_JPEG_INT_STAT__MARKER_ERR_INT__SHIFT 0xd +#define UVD_JPEG_INT_STAT__FMT_ERR_INT__SHIFT 0xe +#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT__SHIFT 0xf +#define UVD_JPEG_INT_STAT__OUTBUF_WPTR_INC_INT_MASK 0x00000001L +#define UVD_JPEG_INT_STAT__JOB_AVAIL_INT_MASK 0x00000002L +#define UVD_JPEG_INT_STAT__FENCE_VAL_INT_MASK 0x00000004L +#define UVD_JPEG_INT_STAT__FIFO_OVERFLOW_ERR_INT_MASK 0x00000040L +#define UVD_JPEG_INT_STAT__BLK_CNT_OUT_OF_SYNC_ERR_INT_MASK 0x00000080L +#define UVD_JPEG_INT_STAT__EOI_ERR_INT_MASK 0x00000100L +#define UVD_JPEG_INT_STAT__HFM_ERR_INT_MASK 0x00000200L +#define UVD_JPEG_INT_STAT__RST_ERR_INT_MASK 0x00000400L +#define UVD_JPEG_INT_STAT__ECS_MK_ERR_INT_MASK 0x00000800L +#define UVD_JPEG_INT_STAT__TIMEOUT_ERR_INT_MASK 0x00001000L +#define UVD_JPEG_INT_STAT__MARKER_ERR_INT_MASK 0x00002000L +#define UVD_JPEG_INT_STAT__FMT_ERR_INT_MASK 0x00004000L +#define UVD_JPEG_INT_STAT__PROFILE_ERR_INT_MASK 0x00008000L +//UVD_JPEG_PITCH +#define UVD_JPEG_PITCH__PITCH__SHIFT 0x0 +#define UVD_JPEG_PITCH__PITCH_MASK 0xFFFFFFFFL +//UVD_JPEG_UV_PITCH +#define UVD_JPEG_UV_PITCH__UV_PITCH__SHIFT 0x0 +#define UVD_JPEG_UV_PITCH__UV_PITCH_MASK 0xFFFFFFFFL +//JPEG_DEC_Y_GFX8_TILING_SURFACE +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10 +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L +#define JPEG_DEC_Y_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L +//JPEG_DEC_UV_GFX8_TILING_SURFACE +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH__SHIFT 0x0 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT__SHIFT 0x2 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT__SHIFT 0x4 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS__SHIFT 0x6 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG__SHIFT 0x8 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT__SHIFT 0xd +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE__SHIFT 0x10 +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_WIDTH_MASK 0x00000003L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__BANK_HEIGHT_MASK 0x0000000CL +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__MACRO_TILE_ASPECT_MASK 0x00000030L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__NUM_BANKS_MASK 0x000000C0L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__PIPE_CONFIG_MASK 0x00001F00L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__TILE_SPLIT_MASK 0x0000E000L +#define JPEG_DEC_UV_GFX8_TILING_SURFACE__ARRAY_MODE_MASK 0x000F0000L +//JPEG_DEC_GFX8_ADDR_CONFIG +#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4 +#define JPEG_DEC_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L +//JPEG_DEC_Y_GFX10_TILING_SURFACE +#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_DEC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_DEC_UV_GFX10_TILING_SURFACE +#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_DEC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_DEC_GFX10_ADDR_CONFIG +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define JPEG_DEC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define JPEG_DEC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//JPEG_DEC_ADDR_MODE +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0 +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2 +#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L +#define JPEG_DEC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL +#define JPEG_DEC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L +//UVD_JPEG_OUTPUT_XY +//UVD_JPEG_GPCOM_CMD +#define UVD_JPEG_GPCOM_CMD__CMD__SHIFT 0x1 +#define UVD_JPEG_GPCOM_CMD__CMD_MASK 0x0000000EL +//UVD_JPEG_GPCOM_DATA0 +#define UVD_JPEG_GPCOM_DATA0__DATA0__SHIFT 0x0 +#define UVD_JPEG_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_JPEG_GPCOM_DATA1 +#define UVD_JPEG_GPCOM_DATA1__DATA1__SHIFT 0x0 +#define UVD_JPEG_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_JPEG_SCRATCH1 +#define UVD_JPEG_SCRATCH1__SCRATCH1__SHIFT 0x0 +#define UVD_JPEG_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL +//UVD_JPEG_DEC_SOFT_RST +#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET__SHIFT 0x0 +#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS__SHIFT 0x10 +#define UVD_JPEG_DEC_SOFT_RST__SOFT_RESET_MASK 0x00000001L +#define UVD_JPEG_DEC_SOFT_RST__RESET_STATUS_MASK 0x00010000L + + +// addressBlock: uvd0_uvd_jpeg_enc_dec +//UVD_JPEG_ENC_INT_EN +#define UVD_JPEG_ENC_INT_EN__HUFF_JOB_DONE_INT_EN__SHIFT 0x0 +#define UVD_JPEG_ENC_INT_EN__SCLR_JOB_DONE_INT_EN__SHIFT 0x1 +#define UVD_JPEG_ENC_INT_EN__HUFF_ERROR_INT_EN__SHIFT 0x2 +#define UVD_JPEG_ENC_INT_EN__SCLR_ERROR_INT_EN__SHIFT 0x3 +#define UVD_JPEG_ENC_INT_EN__QTBL_ERROR_INT_EN__SHIFT 0x4 +#define UVD_JPEG_ENC_INT_EN__PIC_SIZE_ERROR_INT_EN__SHIFT 0x5 +#define UVD_JPEG_ENC_INT_EN__FENCE_VAL_INT_EN__SHIFT 0x6 +#define UVD_JPEG_ENC_INT_EN__HUFF_JOB_DONE_INT_EN_MASK 0x00000001L +#define UVD_JPEG_ENC_INT_EN__SCLR_JOB_DONE_INT_EN_MASK 0x00000002L +#define UVD_JPEG_ENC_INT_EN__HUFF_ERROR_INT_EN_MASK 0x00000004L +#define UVD_JPEG_ENC_INT_EN__SCLR_ERROR_INT_EN_MASK 0x00000008L +#define UVD_JPEG_ENC_INT_EN__QTBL_ERROR_INT_EN_MASK 0x00000010L +#define UVD_JPEG_ENC_INT_EN__PIC_SIZE_ERROR_INT_EN_MASK 0x00000020L +#define UVD_JPEG_ENC_INT_EN__FENCE_VAL_INT_EN_MASK 0x00000040L +//UVD_JPEG_ENC_INT_STATUS +#define UVD_JPEG_ENC_INT_STATUS__HUFF_JOB_DONE_STATUS__SHIFT 0x0 +#define UVD_JPEG_ENC_INT_STATUS__SCLR_JOB_DONE_STATUS__SHIFT 0x1 +#define UVD_JPEG_ENC_INT_STATUS__HUFF_ERROR_STATUS__SHIFT 0x2 +#define UVD_JPEG_ENC_INT_STATUS__SCLR_ERROR_STATUS__SHIFT 0x3 +#define UVD_JPEG_ENC_INT_STATUS__QTBL_ERROR_STATUS__SHIFT 0x4 +#define UVD_JPEG_ENC_INT_STATUS__PIC_SIZE_ERROR_STATUS__SHIFT 0x5 +#define UVD_JPEG_ENC_INT_STATUS__FENCE_VAL_STATUS__SHIFT 0x6 +#define UVD_JPEG_ENC_INT_STATUS__HUFF_JOB_DONE_STATUS_MASK 0x00000001L +#define UVD_JPEG_ENC_INT_STATUS__SCLR_JOB_DONE_STATUS_MASK 0x00000002L +#define UVD_JPEG_ENC_INT_STATUS__HUFF_ERROR_STATUS_MASK 0x00000004L +#define UVD_JPEG_ENC_INT_STATUS__SCLR_ERROR_STATUS_MASK 0x00000008L +#define UVD_JPEG_ENC_INT_STATUS__QTBL_ERROR_STATUS_MASK 0x00000010L +#define UVD_JPEG_ENC_INT_STATUS__PIC_SIZE_ERROR_STATUS_MASK 0x00000020L +#define UVD_JPEG_ENC_INT_STATUS__FENCE_VAL_STATUS_MASK 0x00000040L +//UVD_JPEG_ENC_ENGINE_CNTL +#define UVD_JPEG_ENC_ENGINE_CNTL__HUFF_WR_COMB_DIS__SHIFT 0x0 +#define UVD_JPEG_ENC_ENGINE_CNTL__DISTINCT_CHROMA_QUANT_TABLES__SHIFT 0x1 +#define UVD_JPEG_ENC_ENGINE_CNTL__SCALAR_EN__SHIFT 0x2 +#define UVD_JPEG_ENC_ENGINE_CNTL__ENCODE_EN__SHIFT 0x3 +#define UVD_JPEG_ENC_ENGINE_CNTL__CMP_NEEDED__SHIFT 0x4 +#define UVD_JPEG_ENC_ENGINE_CNTL__ECS_RESTRICT_32B_EN__SHIFT 0x9 +#define UVD_JPEG_ENC_ENGINE_CNTL__HUFF_WR_COMB_DIS_MASK 0x00000001L +#define UVD_JPEG_ENC_ENGINE_CNTL__DISTINCT_CHROMA_QUANT_TABLES_MASK 0x00000002L +#define UVD_JPEG_ENC_ENGINE_CNTL__SCALAR_EN_MASK 0x00000004L +#define UVD_JPEG_ENC_ENGINE_CNTL__ENCODE_EN_MASK 0x00000008L +#define UVD_JPEG_ENC_ENGINE_CNTL__CMP_NEEDED_MASK 0x00000010L +#define UVD_JPEG_ENC_ENGINE_CNTL__ECS_RESTRICT_32B_EN_MASK 0x00000200L +//UVD_JPEG_ENC_SCRATCH1 +#define UVD_JPEG_ENC_SCRATCH1__SCRATCH1__SHIFT 0x0 +#define UVD_JPEG_ENC_SCRATCH1__SCRATCH1_MASK 0xFFFFFFFFL + + +// addressBlock: uvd0_uvd_jpeg_enc_sclk_dec +//UVD_JPEG_ENC_STATUS +#define UVD_JPEG_ENC_STATUS__PEL_FETCH_IDLE__SHIFT 0x0 +#define UVD_JPEG_ENC_STATUS__HUFF_CORE_IDLE__SHIFT 0x1 +#define UVD_JPEG_ENC_STATUS__FDCT_IDLE__SHIFT 0x2 +#define UVD_JPEG_ENC_STATUS__SCALAR_IDLE__SHIFT 0x3 +#define UVD_JPEG_ENC_STATUS__PEL_FETCH_IDLE_MASK 0x00000001L +#define UVD_JPEG_ENC_STATUS__HUFF_CORE_IDLE_MASK 0x00000002L +#define UVD_JPEG_ENC_STATUS__FDCT_IDLE_MASK 0x00000004L +#define UVD_JPEG_ENC_STATUS__SCALAR_IDLE_MASK 0x00000008L +//UVD_JPEG_ENC_PITCH +#define UVD_JPEG_ENC_PITCH__PITCH_Y__SHIFT 0x0 +#define UVD_JPEG_ENC_PITCH__PITCH_UV__SHIFT 0x10 +#define UVD_JPEG_ENC_PITCH__PITCH_Y_MASK 0x00000FFFL +#define UVD_JPEG_ENC_PITCH__PITCH_UV_MASK 0x0FFF0000L +//UVD_JPEG_ENC_LUMA_BASE +#define UVD_JPEG_ENC_LUMA_BASE__LUMA_BASE__SHIFT 0x0 +#define UVD_JPEG_ENC_LUMA_BASE__LUMA_BASE_MASK 0xFFFFFFFFL +//UVD_JPEG_ENC_CHROMAU_BASE +#define UVD_JPEG_ENC_CHROMAU_BASE__CHROMAU_BASE__SHIFT 0x0 +#define UVD_JPEG_ENC_CHROMAU_BASE__CHROMAU_BASE_MASK 0xFFFFFFFFL +//UVD_JPEG_ENC_CHROMAV_BASE +#define UVD_JPEG_ENC_CHROMAV_BASE__CHROMAV_BASE__SHIFT 0x0 +#define UVD_JPEG_ENC_CHROMAV_BASE__CHROMAV_BASE_MASK 0xFFFFFFFFL +//JPEG_ENC_Y_GFX10_TILING_SURFACE +#define JPEG_ENC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_ENC_Y_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_ENC_UV_GFX10_TILING_SURFACE +#define JPEG_ENC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE__SHIFT 0x0 +#define JPEG_ENC_UV_GFX10_TILING_SURFACE__SWIZZLE_MODE_MASK 0x0000001FL +//JPEG_ENC_GFX10_ADDR_CONFIG +#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define JPEG_ENC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define JPEG_ENC_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define JPEG_ENC_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//JPEG_ENC_ADDR_MODE +#define JPEG_ENC_ADDR_MODE__ADDR_MODE_Y__SHIFT 0x0 +#define JPEG_ENC_ADDR_MODE__ADDR_MODE_UV__SHIFT 0x2 +#define JPEG_ENC_ADDR_MODE__ADDR_LIB_SEL__SHIFT 0xc +#define JPEG_ENC_ADDR_MODE__ADDR_MODE_Y_MASK 0x00000003L +#define JPEG_ENC_ADDR_MODE__ADDR_MODE_UV_MASK 0x0000000CL +#define JPEG_ENC_ADDR_MODE__ADDR_LIB_SEL_MASK 0x00007000L +//UVD_JPEG_ENC_GPCOM_CMD +#define UVD_JPEG_ENC_GPCOM_CMD__CMD__SHIFT 0x1 +#define UVD_JPEG_ENC_GPCOM_CMD__CMD_MASK 0x0000000EL +//UVD_JPEG_ENC_GPCOM_DATA0 +#define UVD_JPEG_ENC_GPCOM_DATA0__DATA0__SHIFT 0x0 +#define UVD_JPEG_ENC_GPCOM_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_JPEG_ENC_GPCOM_DATA1 +#define UVD_JPEG_ENC_GPCOM_DATA1__DATA1__SHIFT 0x0 +#define UVD_JPEG_ENC_GPCOM_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_JPEG_ENC_CGC_CNTL +#define UVD_JPEG_ENC_CGC_CNTL__CGC_EN__SHIFT 0x0 +#define UVD_JPEG_ENC_CGC_CNTL__CGC_EN_MASK 0x00000001L +//UVD_JPEG_ENC_SCRATCH0 +#define UVD_JPEG_ENC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JPEG_ENC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL +//UVD_JPEG_ENC_SOFT_RST +#define UVD_JPEG_ENC_SOFT_RST__SOFT_RST__SHIFT 0x0 +#define UVD_JPEG_ENC_SOFT_RST__RESET_STATUS__SHIFT 0x10 +#define UVD_JPEG_ENC_SOFT_RST__SOFT_RST_MASK 0x00000001L +#define UVD_JPEG_ENC_SOFT_RST__RESET_STATUS_MASK 0x00010000L + + +// addressBlock: uvd0_uvd_jrbc_dec +//UVD_JRBC_RB_WPTR +#define UVD_JRBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC_RB_CNTL +#define UVD_JRBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC_IB_SIZE +#define UVD_JRBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC_URGENT_CNTL +#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC_RB_REF_DATA +#define UVD_JRBC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC_RB_COND_RD_TIMER +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC_SOFT_RESET +#define UVD_JRBC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC_STATUS +#define UVD_JRBC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC_RB_RPTR +#define UVD_JRBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC_RB_BUF_STATUS +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC_IB_BUF_STATUS +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC_IB_SIZE_UPDATE +#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC_IB_COND_RD_TIMER +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC_IB_REF_DATA +#define UVD_JRBC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JPEG_PREEMPT_CMD +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JPEG_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JPEG_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JPEG_PREEMPT_FENCE_DATA0 +#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JPEG_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JPEG_PREEMPT_FENCE_DATA1 +#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JPEG_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC_RB_SIZE +#define UVD_JRBC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC_SCRATCH0 +#define UVD_JRBC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: uvd0_uvd_jrbc_enc_dec +//UVD_JRBC_ENC_RB_WPTR +#define UVD_JRBC_ENC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_JRBC_ENC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_JRBC_ENC_RB_CNTL +#define UVD_JRBC_ENC_RB_CNTL__RB_NO_FETCH__SHIFT 0x0 +#define UVD_JRBC_ENC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1 +#define UVD_JRBC_ENC_RB_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x4 +#define UVD_JRBC_ENC_RB_CNTL__RB_NO_FETCH_MASK 0x00000001L +#define UVD_JRBC_ENC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x00000002L +#define UVD_JRBC_ENC_RB_CNTL__RB_PRE_WRITE_TIMER_MASK 0x0007FFF0L +//UVD_JRBC_ENC_IB_SIZE +#define UVD_JRBC_ENC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_JRBC_ENC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC_ENC_URGENT_CNTL +#define UVD_JRBC_ENC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_JRBC_ENC_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_JRBC_ENC_RB_REF_DATA +#define UVD_JRBC_ENC_RB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC_ENC_RB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JRBC_ENC_RB_COND_RD_TIMER +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC_ENC_RB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC_ENC_SOFT_RESET +#define UVD_JRBC_ENC_SOFT_RESET__RESET__SHIFT 0x0 +#define UVD_JRBC_ENC_SOFT_RESET__SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_JRBC_ENC_SOFT_RESET__RESET_MASK 0x00000001L +#define UVD_JRBC_ENC_SOFT_RESET__SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_JRBC_ENC_STATUS +#define UVD_JRBC_ENC_STATUS__RB_JOB_DONE__SHIFT 0x0 +#define UVD_JRBC_ENC_STATUS__IB_JOB_DONE__SHIFT 0x1 +#define UVD_JRBC_ENC_STATUS__RB_ILLEGAL_CMD__SHIFT 0x2 +#define UVD_JRBC_ENC_STATUS__RB_COND_REG_RD_TIMEOUT__SHIFT 0x3 +#define UVD_JRBC_ENC_STATUS__RB_MEM_WR_TIMEOUT__SHIFT 0x4 +#define UVD_JRBC_ENC_STATUS__RB_MEM_RD_TIMEOUT__SHIFT 0x5 +#define UVD_JRBC_ENC_STATUS__IB_ILLEGAL_CMD__SHIFT 0x6 +#define UVD_JRBC_ENC_STATUS__IB_COND_REG_RD_TIMEOUT__SHIFT 0x7 +#define UVD_JRBC_ENC_STATUS__IB_MEM_WR_TIMEOUT__SHIFT 0x8 +#define UVD_JRBC_ENC_STATUS__IB_MEM_RD_TIMEOUT__SHIFT 0x9 +#define UVD_JRBC_ENC_STATUS__RB_TRAP_STATUS__SHIFT 0xa +#define UVD_JRBC_ENC_STATUS__PREEMPT_STATUS__SHIFT 0xb +#define UVD_JRBC_ENC_STATUS__IB_TRAP_STATUS__SHIFT 0xc +#define UVD_JRBC_ENC_STATUS__INT_EN__SHIFT 0x10 +#define UVD_JRBC_ENC_STATUS__INT_ACK__SHIFT 0x11 +#define UVD_JRBC_ENC_STATUS__RB_JOB_DONE_MASK 0x00000001L +#define UVD_JRBC_ENC_STATUS__IB_JOB_DONE_MASK 0x00000002L +#define UVD_JRBC_ENC_STATUS__RB_ILLEGAL_CMD_MASK 0x00000004L +#define UVD_JRBC_ENC_STATUS__RB_COND_REG_RD_TIMEOUT_MASK 0x00000008L +#define UVD_JRBC_ENC_STATUS__RB_MEM_WR_TIMEOUT_MASK 0x00000010L +#define UVD_JRBC_ENC_STATUS__RB_MEM_RD_TIMEOUT_MASK 0x00000020L +#define UVD_JRBC_ENC_STATUS__IB_ILLEGAL_CMD_MASK 0x00000040L +#define UVD_JRBC_ENC_STATUS__IB_COND_REG_RD_TIMEOUT_MASK 0x00000080L +#define UVD_JRBC_ENC_STATUS__IB_MEM_WR_TIMEOUT_MASK 0x00000100L +#define UVD_JRBC_ENC_STATUS__IB_MEM_RD_TIMEOUT_MASK 0x00000200L +#define UVD_JRBC_ENC_STATUS__RB_TRAP_STATUS_MASK 0x00000400L +#define UVD_JRBC_ENC_STATUS__PREEMPT_STATUS_MASK 0x00000800L +#define UVD_JRBC_ENC_STATUS__IB_TRAP_STATUS_MASK 0x00001000L +#define UVD_JRBC_ENC_STATUS__INT_EN_MASK 0x00010000L +#define UVD_JRBC_ENC_STATUS__INT_ACK_MASK 0x00020000L +//UVD_JRBC_ENC_RB_RPTR +#define UVD_JRBC_ENC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_JRBC_ENC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_JRBC_ENC_RB_BUF_STATUS +#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC_ENC_RB_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC_ENC_IB_BUF_STATUS +#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_VALID__SHIFT 0x0 +#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x18 +#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FFFFL +#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x000F0000L +#define UVD_JRBC_ENC_IB_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x03000000L +//UVD_JRBC_ENC_IB_SIZE_UPDATE +#define UVD_JRBC_ENC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_JRBC_ENC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_JRBC_ENC_IB_COND_RD_TIMER +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_TIMER_CNT__SHIFT 0x0 +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT__SHIFT 0x10 +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN__SHIFT 0x18 +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN__SHIFT 0x19 +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_TIMER_CNT_MASK 0x0000FFFFL +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__RETRY_INTERVAL_CNT_MASK 0x00FF0000L +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__CONTINUOUS_POLL_EN_MASK 0x01000000L +#define UVD_JRBC_ENC_IB_COND_RD_TIMER__MEM_TIMEOUT_EN_MASK 0x02000000L +//UVD_JRBC_ENC_IB_REF_DATA +#define UVD_JRBC_ENC_IB_REF_DATA__REF_DATA__SHIFT 0x0 +#define UVD_JRBC_ENC_IB_REF_DATA__REF_DATA_MASK 0xFFFFFFFFL +//UVD_JPEG_ENC_PREEMPT_CMD +#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_EN__SHIFT 0x0 +#define UVD_JPEG_ENC_PREEMPT_CMD__WAIT_JPEG_JOB_DONE__SHIFT 0x1 +#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_FENCE_CMD__SHIFT 0x2 +#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_EN_MASK 0x00000001L +#define UVD_JPEG_ENC_PREEMPT_CMD__WAIT_JPEG_JOB_DONE_MASK 0x00000002L +#define UVD_JPEG_ENC_PREEMPT_CMD__PREEMPT_FENCE_CMD_MASK 0x00000004L +//UVD_JPEG_ENC_PREEMPT_FENCE_DATA0 +#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0__SHIFT 0x0 +#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA0__PREEMPT_FENCE_DATA0_MASK 0xFFFFFFFFL +//UVD_JPEG_ENC_PREEMPT_FENCE_DATA1 +#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1__SHIFT 0x0 +#define UVD_JPEG_ENC_PREEMPT_FENCE_DATA1__PREEMPT_FENCE_DATA1_MASK 0xFFFFFFFFL +//UVD_JRBC_ENC_RB_SIZE +#define UVD_JRBC_ENC_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_JRBC_ENC_RB_SIZE__RB_SIZE_MASK 0x00FFFFF0L +//UVD_JRBC_ENC_SCRATCH0 +#define UVD_JRBC_ENC_SCRATCH0__SCRATCH0__SHIFT 0x0 +#define UVD_JRBC_ENC_SCRATCH0__SCRATCH0_MASK 0xFFFFFFFFL + + +// addressBlock: uvd0_uvd_jmi_dec +//UVD_JMI_CTRL +#define UVD_JMI_CTRL__STALL_MC_ARB__SHIFT 0x0 +#define UVD_JMI_CTRL__MASK_MC_URGENT__SHIFT 0x1 +#define UVD_JMI_CTRL__ASSERT_MC_URGENT__SHIFT 0x2 +#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER__SHIFT 0x8 +#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER__SHIFT 0x10 +#define UVD_JMI_CTRL__CRC_RESET__SHIFT 0x18 +#define UVD_JMI_CTRL__CRC_SEL__SHIFT 0x19 +#define UVD_JMI_CTRL__STALL_MC_ARB_MASK 0x00000001L +#define UVD_JMI_CTRL__MASK_MC_URGENT_MASK 0x00000002L +#define UVD_JMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000004L +#define UVD_JMI_CTRL__MC_RD_ARB_WAIT_TIMER_MASK 0x0000FF00L +#define UVD_JMI_CTRL__MC_WR_ARB_WAIT_TIMER_MASK 0x00FF0000L +#define UVD_JMI_CTRL__CRC_RESET_MASK 0x01000000L +#define UVD_JMI_CTRL__CRC_SEL_MASK 0x1E000000L +//UVD_LMI_JRBC_CTRL +#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_LMI_JRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_LMI_JRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_LMI_JRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_JRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_JRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_LMI_JRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_LMI_JRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_LMI_JRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_LMI_JPEG_CTRL +#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_LMI_JPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_LMI_JPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_LMI_JPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_JPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_JPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_LMI_JPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_LMI_JPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_LMI_JPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_JMI_EJRBC_CTRL +#define UVD_JMI_EJRBC_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_JMI_EJRBC_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_JMI_EJRBC_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_JMI_EJRBC_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_JMI_EJRBC_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_JMI_EJRBC_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_JMI_EJRBC_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_JMI_EJRBC_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_JMI_EJRBC_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_JMI_EJRBC_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_JMI_EJRBC_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_JMI_EJRBC_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_LMI_EJPEG_CTRL +#define UVD_LMI_EJPEG_CTRL__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_EJPEG_CTRL__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_EJPEG_CTRL__RD_MAX_BURST__SHIFT 0x4 +#define UVD_LMI_EJPEG_CTRL__WR_MAX_BURST__SHIFT 0x8 +#define UVD_LMI_EJPEG_CTRL__RD_SWAP__SHIFT 0x14 +#define UVD_LMI_EJPEG_CTRL__WR_SWAP__SHIFT 0x16 +#define UVD_LMI_EJPEG_CTRL__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_EJPEG_CTRL__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_EJPEG_CTRL__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_LMI_EJPEG_CTRL__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_LMI_EJPEG_CTRL__RD_SWAP_MASK 0x00300000L +#define UVD_LMI_EJPEG_CTRL__WR_SWAP_MASK 0x00C00000L +//UVD_LMI_JRBC_IB_VMID +#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_LMI_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_LMI_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_LMI_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_LMI_JRBC_RB_VMID +#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_LMI_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_LMI_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_LMI_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_LMI_JPEG_VMID +#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID__SHIFT 0x0 +#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID__SHIFT 0x4 +#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID__SHIFT 0x8 +#define UVD_LMI_JPEG_VMID__JPEG_RD_VMID_MASK 0x0000000FL +#define UVD_LMI_JPEG_VMID__JPEG_WR_VMID_MASK 0x000000F0L +#define UVD_LMI_JPEG_VMID__ATOMIC_USER0_WR_VMID_MASK 0x00000F00L +//UVD_JMI_ENC_JRBC_IB_VMID +#define UVD_JMI_ENC_JRBC_IB_VMID__IB_WR_VMID__SHIFT 0x0 +#define UVD_JMI_ENC_JRBC_IB_VMID__IB_RD_VMID__SHIFT 0x4 +#define UVD_JMI_ENC_JRBC_IB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI_ENC_JRBC_IB_VMID__IB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI_ENC_JRBC_IB_VMID__IB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI_ENC_JRBC_IB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI_ENC_JRBC_RB_VMID +#define UVD_JMI_ENC_JRBC_RB_VMID__RB_WR_VMID__SHIFT 0x0 +#define UVD_JMI_ENC_JRBC_RB_VMID__RB_RD_VMID__SHIFT 0x4 +#define UVD_JMI_ENC_JRBC_RB_VMID__MEM_RD_VMID__SHIFT 0x8 +#define UVD_JMI_ENC_JRBC_RB_VMID__RB_WR_VMID_MASK 0x0000000FL +#define UVD_JMI_ENC_JRBC_RB_VMID__RB_RD_VMID_MASK 0x000000F0L +#define UVD_JMI_ENC_JRBC_RB_VMID__MEM_RD_VMID_MASK 0x00000F00L +//UVD_JMI_ENC_JPEG_VMID +#define UVD_JMI_ENC_JPEG_VMID__PEL_RD_VMID__SHIFT 0x0 +#define UVD_JMI_ENC_JPEG_VMID__BS_WR_VMID__SHIFT 0x5 +#define UVD_JMI_ENC_JPEG_VMID__SCALAR_RD_VMID__SHIFT 0xa +#define UVD_JMI_ENC_JPEG_VMID__SCALAR_WR_VMID__SHIFT 0xf +#define UVD_JMI_ENC_JPEG_VMID__HUFF_FENCE_VMID__SHIFT 0x13 +#define UVD_JMI_ENC_JPEG_VMID__ATOMIC_USER1_WR_VMID__SHIFT 0x17 +#define UVD_JMI_ENC_JPEG_VMID__PEL_RD_VMID_MASK 0x0000000FL +#define UVD_JMI_ENC_JPEG_VMID__BS_WR_VMID_MASK 0x000001E0L +#define UVD_JMI_ENC_JPEG_VMID__SCALAR_RD_VMID_MASK 0x00003C00L +#define UVD_JMI_ENC_JPEG_VMID__SCALAR_WR_VMID_MASK 0x00078000L +#define UVD_JMI_ENC_JPEG_VMID__HUFF_FENCE_VMID_MASK 0x00780000L +#define UVD_JMI_ENC_JPEG_VMID__ATOMIC_USER1_WR_VMID_MASK 0x07800000L +//UVD_JMI_PERFMON_CTRL +#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0 +#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8 +#define UVD_JMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L +#define UVD_JMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00000F00L +//UVD_JMI_PERFMON_COUNT_LO +#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0 +#define UVD_JMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL +//UVD_JMI_PERFMON_COUNT_HI +#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0 +#define UVD_JMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL +//UVD_LMI_JPEG_READ_64BIT_BAR_LOW +#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_READ_64BIT_BAR_HIGH +#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_64BIT_BAR_LOW +#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_64BIT_BAR_LOW +#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW +#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW +#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH +#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW +#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH +#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_EJPEG_PREEMPT_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_RB_64BIT_BAR_LOW +#define UVD_LMI_EJRBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_EJRBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_RB_64BIT_BAR_HIGH +#define UVD_LMI_EJRBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_EJRBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_IB_64BIT_BAR_LOW +#define UVD_LMI_EJRBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_EJRBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_IB_64BIT_BAR_HIGH +#define UVD_LMI_EJRBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_EJRBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW +#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH +#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_EJRBC_RB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW +#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH +#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_EJRBC_RB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW +#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH +#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_EJRBC_IB_MEM_WR_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW +#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH +#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_EJRBC_IB_MEM_RD_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_PREEMPT_VMID +#define UVD_LMI_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_LMI_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_LMI_ENC_JPEG_PREEMPT_VMID +#define UVD_LMI_ENC_JPEG_PREEMPT_VMID__VMID__SHIFT 0x0 +#define UVD_LMI_ENC_JPEG_PREEMPT_VMID__VMID_MASK 0x0000000FL +//UVD_LMI_JPEG2_VMID +#define UVD_LMI_JPEG2_VMID__JPEG2_RD_VMID__SHIFT 0x0 +#define UVD_LMI_JPEG2_VMID__JPEG2_WR_VMID__SHIFT 0x4 +#define UVD_LMI_JPEG2_VMID__JPEG2_RD_VMID_MASK 0x0000000FL +#define UVD_LMI_JPEG2_VMID__JPEG2_WR_VMID_MASK 0x000000F0L +//UVD_LMI_JPEG2_READ_64BIT_BAR_LOW +#define UVD_LMI_JPEG2_READ_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG2_READ_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG2_READ_64BIT_BAR_HIGH +#define UVD_LMI_JPEG2_READ_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG2_READ_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW +#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH +#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_JPEG2_WRITE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_JPEG_CTRL2 +#define UVD_LMI_JPEG_CTRL2__ARB_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_JPEG_CTRL2__ARB_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_JPEG_CTRL2__RD_MAX_BURST__SHIFT 0x4 +#define UVD_LMI_JPEG_CTRL2__WR_MAX_BURST__SHIFT 0x8 +#define UVD_LMI_JPEG_CTRL2__RD_SWAP__SHIFT 0x14 +#define UVD_LMI_JPEG_CTRL2__WR_SWAP__SHIFT 0x16 +#define UVD_LMI_JPEG_CTRL2__ARB_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_JPEG_CTRL2__ARB_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_JPEG_CTRL2__RD_MAX_BURST_MASK 0x000000F0L +#define UVD_LMI_JPEG_CTRL2__WR_MAX_BURST_MASK 0x00000F00L +#define UVD_LMI_JPEG_CTRL2__RD_SWAP_MASK 0x00300000L +#define UVD_LMI_JPEG_CTRL2__WR_SWAP_MASK 0x00C00000L +//UVD_JMI_DEC_SWAP_CNTL +#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI_DEC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI_DEC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI_DEC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI_DEC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI_DEC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI_DEC_SWAP_CNTL__JPEG_WR_MC_SWAP_MASK 0x00030000L +//UVD_JMI_ENC_SWAP_CNTL +#define UVD_JMI_ENC_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 +#define UVD_JMI_ENC_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 +#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_WR_MC_SWAP__SHIFT 0x4 +#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_WR_MC_SWAP__SHIFT 0x6 +#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_RD_MC_SWAP__SHIFT 0x8 +#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_RD_MC_SWAP__SHIFT 0xa +#define UVD_JMI_ENC_SWAP_CNTL__PREEMPT_WR_MC_SWAP__SHIFT 0xc +#define UVD_JMI_ENC_SWAP_CNTL__PEL_RD_MC_SWAP__SHIFT 0xe +#define UVD_JMI_ENC_SWAP_CNTL__BS_WR_MC_SWAP__SHIFT 0x10 +#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_RD_MC_SWAP__SHIFT 0x12 +#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_WR_MC_SWAP__SHIFT 0x14 +#define UVD_JMI_ENC_SWAP_CNTL__HUFF_FENCE_MC_SWAP__SHIFT 0x16 +#define UVD_JMI_ENC_SWAP_CNTL__RB_MC_SWAP_MASK 0x00000003L +#define UVD_JMI_ENC_SWAP_CNTL__IB_MC_SWAP_MASK 0x0000000CL +#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_WR_MC_SWAP_MASK 0x00000030L +#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_WR_MC_SWAP_MASK 0x000000C0L +#define UVD_JMI_ENC_SWAP_CNTL__RB_MEM_RD_MC_SWAP_MASK 0x00000300L +#define UVD_JMI_ENC_SWAP_CNTL__IB_MEM_RD_MC_SWAP_MASK 0x00000C00L +#define UVD_JMI_ENC_SWAP_CNTL__PREEMPT_WR_MC_SWAP_MASK 0x00003000L +#define UVD_JMI_ENC_SWAP_CNTL__PEL_RD_MC_SWAP_MASK 0x0000C000L +#define UVD_JMI_ENC_SWAP_CNTL__BS_WR_MC_SWAP_MASK 0x00030000L +#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_RD_MC_SWAP_MASK 0x000C0000L +#define UVD_JMI_ENC_SWAP_CNTL__SCALAR_WR_MC_SWAP_MASK 0x00300000L +#define UVD_JMI_ENC_SWAP_CNTL__HUFF_FENCE_MC_SWAP_MASK 0x00C00000L +//UVD_JMI_CNTL +#define UVD_JMI_CNTL__SOFT_RESET__SHIFT 0x0 +#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX__SHIFT 0x8 +#define UVD_JMI_CNTL__SOFT_RESET_MASK 0x00000001L +#define UVD_JMI_CNTL__MC_RD_REQ_RET_MAX_MASK 0x0003FF00L +//UVD_JMI_HUFF_FENCE_64BIT_BAR_LOW +#define UVD_JMI_HUFF_FENCE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_JMI_HUFF_FENCE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH +#define UVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_JMI_HUFF_FENCE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_JMI_DEC_SWAP_CNTL2 +#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_RD_MC_SWAP__SHIFT 0x0 +#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_WR_MC_SWAP__SHIFT 0x2 +#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_RD_MC_SWAP_MASK 0x00000003L +#define UVD_JMI_DEC_SWAP_CNTL2__JPEG2_WR_MC_SWAP_MASK 0x0000000CL + + +// addressBlock: uvd0_uvd_jpeg_common_dec +//JPEG_SOFT_RESET_STATUS +#define JPEG_SOFT_RESET_STATUS__JPEG_DEC_RESET_STATUS__SHIFT 0x0 +#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS__SHIFT 0x1 +#define JPEG_SOFT_RESET_STATUS__DJRBC_RESET_STATUS__SHIFT 0x2 +#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS__SHIFT 0x3 +#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS__SHIFT 0x4 +#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS__SHIFT 0x5 +#define JPEG_SOFT_RESET_STATUS__JPEG_DEC_RESET_STATUS_MASK 0x00000001L +#define JPEG_SOFT_RESET_STATUS__JPEG2_DEC_RESET_STATUS_MASK 0x00000002L +#define JPEG_SOFT_RESET_STATUS__DJRBC_RESET_STATUS_MASK 0x00000004L +#define JPEG_SOFT_RESET_STATUS__JPEG_ENC_RESET_STATUS_MASK 0x00000008L +#define JPEG_SOFT_RESET_STATUS__EJRBC_RESET_STATUS_MASK 0x00000010L +#define JPEG_SOFT_RESET_STATUS__JMCIF_RESET_STATUS_MASK 0x00000020L +//JPEG_SYS_INT_EN +#define JPEG_SYS_INT_EN__DJPEG_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_EN__DJRBC__SHIFT 0x1 +#define JPEG_SYS_INT_EN__DJPEG_PF_RPT__SHIFT 0x2 +#define JPEG_SYS_INT_EN__EJPEG_PF_RPT__SHIFT 0x3 +#define JPEG_SYS_INT_EN__EJPEG_CORE__SHIFT 0x4 +#define JPEG_SYS_INT_EN__EJRBC__SHIFT 0x5 +#define JPEG_SYS_INT_EN__DJPEG_CORE2__SHIFT 0x6 +#define JPEG_SYS_INT_EN__DJPEG_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_EN__DJRBC_MASK 0x00000002L +#define JPEG_SYS_INT_EN__DJPEG_PF_RPT_MASK 0x00000004L +#define JPEG_SYS_INT_EN__EJPEG_PF_RPT_MASK 0x00000008L +#define JPEG_SYS_INT_EN__EJPEG_CORE_MASK 0x00000010L +#define JPEG_SYS_INT_EN__EJRBC_MASK 0x00000020L +#define JPEG_SYS_INT_EN__DJPEG_CORE2_MASK 0x00000040L +//JPEG_SYS_INT_STATUS +#define JPEG_SYS_INT_STATUS__DJPEG_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_STATUS__DJRBC__SHIFT 0x1 +#define JPEG_SYS_INT_STATUS__DJPEG_PF_RPT__SHIFT 0x2 +#define JPEG_SYS_INT_STATUS__EJPEG_PF_RPT__SHIFT 0x3 +#define JPEG_SYS_INT_STATUS__EJPEG_CORE__SHIFT 0x4 +#define JPEG_SYS_INT_STATUS__EJRBC__SHIFT 0x5 +#define JPEG_SYS_INT_STATUS__DJPEG_CORE2__SHIFT 0x6 +#define JPEG_SYS_INT_STATUS__DJPEG_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_STATUS__DJRBC_MASK 0x00000002L +#define JPEG_SYS_INT_STATUS__DJPEG_PF_RPT_MASK 0x00000004L +#define JPEG_SYS_INT_STATUS__EJPEG_PF_RPT_MASK 0x00000008L +#define JPEG_SYS_INT_STATUS__EJPEG_CORE_MASK 0x00000010L +#define JPEG_SYS_INT_STATUS__EJRBC_MASK 0x00000020L +#define JPEG_SYS_INT_STATUS__DJPEG_CORE2_MASK 0x00000040L +//JPEG_SYS_INT_ACK +#define JPEG_SYS_INT_ACK__DJPEG_CORE__SHIFT 0x0 +#define JPEG_SYS_INT_ACK__DJRBC__SHIFT 0x1 +#define JPEG_SYS_INT_ACK__DJPEG_PF_RPT__SHIFT 0x2 +#define JPEG_SYS_INT_ACK__EJPEG_PF_RPT__SHIFT 0x3 +#define JPEG_SYS_INT_ACK__EJPEG_CORE__SHIFT 0x4 +#define JPEG_SYS_INT_ACK__EJRBC__SHIFT 0x5 +#define JPEG_SYS_INT_ACK__DJPEG_CORE2__SHIFT 0x6 +#define JPEG_SYS_INT_ACK__DJPEG_CORE_MASK 0x00000001L +#define JPEG_SYS_INT_ACK__DJRBC_MASK 0x00000002L +#define JPEG_SYS_INT_ACK__DJPEG_PF_RPT_MASK 0x00000004L +#define JPEG_SYS_INT_ACK__EJPEG_PF_RPT_MASK 0x00000008L +#define JPEG_SYS_INT_ACK__EJPEG_CORE_MASK 0x00000010L +#define JPEG_SYS_INT_ACK__EJRBC_MASK 0x00000020L +#define JPEG_SYS_INT_ACK__DJPEG_CORE2_MASK 0x00000040L +//JPEG_MASTINT_EN +#define JPEG_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 +#define JPEG_MASTINT_EN__INT_OVERRUN__SHIFT 0x4 +#define JPEG_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L +#define JPEG_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L +//JPEG_IH_CTRL +#define JPEG_IH_CTRL__IH_SOFT_RESET__SHIFT 0x0 +#define JPEG_IH_CTRL__IH_STALL_EN__SHIFT 0x1 +#define JPEG_IH_CTRL__IH_STATUS_CLEAN__SHIFT 0x2 +#define JPEG_IH_CTRL__IH_VMID__SHIFT 0x3 +#define JPEG_IH_CTRL__IH_USER_DATA__SHIFT 0x7 +#define JPEG_IH_CTRL__IH_RINGID__SHIFT 0x13 +#define JPEG_IH_CTRL__IH_SOFT_RESET_MASK 0x00000001L +#define JPEG_IH_CTRL__IH_STALL_EN_MASK 0x00000002L +#define JPEG_IH_CTRL__IH_STATUS_CLEAN_MASK 0x00000004L +#define JPEG_IH_CTRL__IH_VMID_MASK 0x00000078L +#define JPEG_IH_CTRL__IH_USER_DATA_MASK 0x0007FF80L +#define JPEG_IH_CTRL__IH_RINGID_MASK 0x07F80000L +//JRBBM_ARB_CTRL +#define JRBBM_ARB_CTRL__DJRBC_DROP__SHIFT 0x0 +#define JRBBM_ARB_CTRL__EJRBC_DROP__SHIFT 0x1 +#define JRBBM_ARB_CTRL__SRBM_DROP__SHIFT 0x2 +#define JRBBM_ARB_CTRL__DJRBC_DROP_MASK 0x00000001L +#define JRBBM_ARB_CTRL__EJRBC_DROP_MASK 0x00000002L +#define JRBBM_ARB_CTRL__SRBM_DROP_MASK 0x00000004L + + +// addressBlock: uvd0_uvd_jpeg_common_sclk_dec +//JPEG_CGC_GATE +#define JPEG_CGC_GATE__JPEG_DEC__SHIFT 0x0 +#define JPEG_CGC_GATE__JPEG2_DEC__SHIFT 0x1 +#define JPEG_CGC_GATE__JPEG_ENC__SHIFT 0x2 +#define JPEG_CGC_GATE__JMCIF__SHIFT 0x3 +#define JPEG_CGC_GATE__JRBBM__SHIFT 0x4 +#define JPEG_CGC_GATE__JPEG_DEC_MASK 0x00000001L +#define JPEG_CGC_GATE__JPEG2_DEC_MASK 0x00000002L +#define JPEG_CGC_GATE__JPEG_ENC_MASK 0x00000004L +#define JPEG_CGC_GATE__JMCIF_MASK 0x00000008L +#define JPEG_CGC_GATE__JRBBM_MASK 0x00000010L +//JPEG_CGC_CTRL +#define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 +#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x1 +#define JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x5 +#define JPEG_CGC_CTRL__DYN_OCLK_RAMP_EN__SHIFT 0xa +#define JPEG_CGC_CTRL__DYN_RCLK_RAMP_EN__SHIFT 0xb +#define JPEG_CGC_CTRL__GATER_DIV_ID__SHIFT 0xc +#define JPEG_CGC_CTRL__JPEG_DEC_MODE__SHIFT 0x10 +#define JPEG_CGC_CTRL__JPEG2_DEC_MODE__SHIFT 0x11 +#define JPEG_CGC_CTRL__JPEG_ENC_MODE__SHIFT 0x12 +#define JPEG_CGC_CTRL__JMCIF_MODE__SHIFT 0x13 +#define JPEG_CGC_CTRL__JRBBM_MODE__SHIFT 0x14 +#define JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L +#define JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000001EL +#define JPEG_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000003E0L +#define JPEG_CGC_CTRL__DYN_OCLK_RAMP_EN_MASK 0x00000400L +#define JPEG_CGC_CTRL__DYN_RCLK_RAMP_EN_MASK 0x00000800L +#define JPEG_CGC_CTRL__GATER_DIV_ID_MASK 0x00007000L +#define JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK 0x00010000L +#define JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK 0x00020000L +#define JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK 0x00040000L +#define JPEG_CGC_CTRL__JMCIF_MODE_MASK 0x00080000L +#define JPEG_CGC_CTRL__JRBBM_MODE_MASK 0x00100000L +//JPEG_CGC_STATUS +#define JPEG_CGC_STATUS__JPEG_DEC_VCLK_ACTIVE__SHIFT 0x0 +#define JPEG_CGC_STATUS__JPEG_DEC_SCLK_ACTIVE__SHIFT 0x1 +#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE__SHIFT 0x2 +#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE__SHIFT 0x3 +#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE__SHIFT 0x4 +#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE__SHIFT 0x5 +#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE__SHIFT 0x6 +#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE__SHIFT 0x7 +#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE__SHIFT 0x8 +#define JPEG_CGC_STATUS__JPEG_DEC_VCLK_ACTIVE_MASK 0x00000001L +#define JPEG_CGC_STATUS__JPEG_DEC_SCLK_ACTIVE_MASK 0x00000002L +#define JPEG_CGC_STATUS__JPEG2_DEC_VCLK_ACTIVE_MASK 0x00000004L +#define JPEG_CGC_STATUS__JPEG2_DEC_SCLK_ACTIVE_MASK 0x00000008L +#define JPEG_CGC_STATUS__JPEG_ENC_VCLK_ACTIVE_MASK 0x00000010L +#define JPEG_CGC_STATUS__JPEG_ENC_SCLK_ACTIVE_MASK 0x00000020L +#define JPEG_CGC_STATUS__JMCIF_SCLK_ACTIVE_MASK 0x00000040L +#define JPEG_CGC_STATUS__JRBBM_VCLK_ACTIVE_MASK 0x00000080L +#define JPEG_CGC_STATUS__JRBBM_SCLK_ACTIVE_MASK 0x00000100L +//JPEG_COMN_CGC_MEM_CTRL +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN__SHIFT 0x0 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN__SHIFT 0x1 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN__SHIFT 0x2 +#define JPEG_COMN_CGC_MEM_CTRL__LS_SET_DELAY__SHIFT 0x10 +#define JPEG_COMN_CGC_MEM_CTRL__LS_CLEAR_DELAY__SHIFT 0x14 +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_LS_EN_MASK 0x00000001L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_DS_EN_MASK 0x00000002L +#define JPEG_COMN_CGC_MEM_CTRL__JMCIF_SD_EN_MASK 0x00000004L +#define JPEG_COMN_CGC_MEM_CTRL__LS_SET_DELAY_MASK 0x000F0000L +#define JPEG_COMN_CGC_MEM_CTRL__LS_CLEAR_DELAY_MASK 0x00F00000L +//JPEG_DEC_CGC_MEM_CTRL +#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_LS_EN__SHIFT 0x0 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_DS_EN__SHIFT 0x1 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_SD_EN__SHIFT 0x2 +#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_LS_EN_MASK 0x00000001L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_DS_EN_MASK 0x00000002L +#define JPEG_DEC_CGC_MEM_CTRL__JPEG_DEC_SD_EN_MASK 0x00000004L +//JPEG2_DEC_CGC_MEM_CTRL +#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN__SHIFT 0x0 +#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN__SHIFT 0x1 +#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN__SHIFT 0x2 +#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_LS_EN_MASK 0x00000001L +#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_DS_EN_MASK 0x00000002L +#define JPEG2_DEC_CGC_MEM_CTRL__JPEG2_DEC_SD_EN_MASK 0x00000004L +//JPEG_ENC_CGC_MEM_CTRL +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN__SHIFT 0x0 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN__SHIFT 0x1 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN__SHIFT 0x2 +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_LS_EN_MASK 0x00000001L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_DS_EN_MASK 0x00000002L +#define JPEG_ENC_CGC_MEM_CTRL__JPEG_ENC_SD_EN_MASK 0x00000004L +//JPEG_SOFT_RESET2 +#define JPEG_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0 +#define JPEG_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L +//JPEG_PERF_BANK_CONF +#define JPEG_PERF_BANK_CONF__RESET__SHIFT 0x0 +#define JPEG_PERF_BANK_CONF__PEEK__SHIFT 0x8 +#define JPEG_PERF_BANK_CONF__CONCATENATE__SHIFT 0x10 +#define JPEG_PERF_BANK_CONF__RESET_MASK 0x0000000FL +#define JPEG_PERF_BANK_CONF__PEEK_MASK 0x00000F00L +#define JPEG_PERF_BANK_CONF__CONCATENATE_MASK 0x00030000L +//JPEG_PERF_BANK_EVENT_SEL +#define JPEG_PERF_BANK_EVENT_SEL__SEL0__SHIFT 0x0 +#define JPEG_PERF_BANK_EVENT_SEL__SEL1__SHIFT 0x8 +#define JPEG_PERF_BANK_EVENT_SEL__SEL2__SHIFT 0x10 +#define JPEG_PERF_BANK_EVENT_SEL__SEL3__SHIFT 0x18 +#define JPEG_PERF_BANK_EVENT_SEL__SEL0_MASK 0x000000FFL +#define JPEG_PERF_BANK_EVENT_SEL__SEL1_MASK 0x0000FF00L +#define JPEG_PERF_BANK_EVENT_SEL__SEL2_MASK 0x00FF0000L +#define JPEG_PERF_BANK_EVENT_SEL__SEL3_MASK 0xFF000000L +//JPEG_PERF_BANK_COUNT0 +#define JPEG_PERF_BANK_COUNT0__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT0__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT1 +#define JPEG_PERF_BANK_COUNT1__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT1__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT2 +#define JPEG_PERF_BANK_COUNT2__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT2__COUNT_MASK 0xFFFFFFFFL +//JPEG_PERF_BANK_COUNT3 +#define JPEG_PERF_BANK_COUNT3__COUNT__SHIFT 0x0 +#define JPEG_PERF_BANK_COUNT3__COUNT_MASK 0xFFFFFFFFL + + +// addressBlock: uvd0_uvd_pg_dec +//UVD_PGFSM_CONFIG +#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT 0x0 +#define UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT 0x2 +#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT 0x4 +#define UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT 0x6 +#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT 0x8 +#define UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT 0xa +#define UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT 0xc +#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT 0xe +#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT 0x10 +#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT 0x12 +#define UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT 0x14 +#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT 0x16 +#define UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG_MASK 0x00000003L +#define UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG_MASK 0x0000000CL +#define UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG_MASK 0x00000030L +#define UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG_MASK 0x000000C0L +#define UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG_MASK 0x00000300L +#define UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG_MASK 0x00000C00L +#define UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG_MASK 0x00003000L +#define UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG_MASK 0x0000C000L +#define UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG_MASK 0x00030000L +#define UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG_MASK 0x000C0000L +#define UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG_MASK 0x00300000L +#define UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG_MASK 0x00C00000L +//UVD_PGFSM_STATUS +#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT 0x0 +#define UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT 0x2 +#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT 0x4 +#define UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT 0x6 +#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT 0x8 +#define UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT 0xa +#define UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT 0xc +#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT 0xe +#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT 0x10 +#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT 0x12 +#define UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT 0x14 +#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT 0x16 +#define UVD_PGFSM_STATUS__UVDM_PWR_STATUS_MASK 0x00000003L +#define UVD_PGFSM_STATUS__UVDU_PWR_STATUS_MASK 0x0000000CL +#define UVD_PGFSM_STATUS__UVDF_PWR_STATUS_MASK 0x00000030L +#define UVD_PGFSM_STATUS__UVDC_PWR_STATUS_MASK 0x000000C0L +#define UVD_PGFSM_STATUS__UVDB_PWR_STATUS_MASK 0x00000300L +#define UVD_PGFSM_STATUS__UVDIL_PWR_STATUS_MASK 0x00000C00L +#define UVD_PGFSM_STATUS__UVDIR_PWR_STATUS_MASK 0x00003000L +#define UVD_PGFSM_STATUS__UVDTD_PWR_STATUS_MASK 0x0000C000L +#define UVD_PGFSM_STATUS__UVDTE_PWR_STATUS_MASK 0x00030000L +#define UVD_PGFSM_STATUS__UVDE_PWR_STATUS_MASK 0x000C0000L +#define UVD_PGFSM_STATUS__UVDW_PWR_STATUS_MASK 0x00300000L +#define UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK 0x00C00000L +//UVD_POWER_STATUS +#define UVD_POWER_STATUS__UVD_POWER_STATUS__SHIFT 0x0 +#define UVD_POWER_STATUS__UVD_PG_MODE__SHIFT 0x2 +#define UVD_POWER_STATUS__UVD_CG_MODE__SHIFT 0x4 +#define UVD_POWER_STATUS__UVD_PG_EN__SHIFT 0x8 +#define UVD_POWER_STATUS__RBC_SNOOP_DIS__SHIFT 0x9 +#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS__SHIFT 0xb +#define UVD_POWER_STATUS__STALL_DPG_POWER_UP__SHIFT 0x1f +#define UVD_POWER_STATUS__UVD_POWER_STATUS_MASK 0x00000003L +#define UVD_POWER_STATUS__UVD_PG_MODE_MASK 0x00000004L +#define UVD_POWER_STATUS__UVD_CG_MODE_MASK 0x00000030L +#define UVD_POWER_STATUS__UVD_PG_EN_MASK 0x00000100L +#define UVD_POWER_STATUS__RBC_SNOOP_DIS_MASK 0x00000200L +#define UVD_POWER_STATUS__SW_RB_SNOOP_DIS_MASK 0x00000800L +#define UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK 0x80000000L +//UVD_PG_IND_INDEX +#define UVD_PG_IND_INDEX__INDEX__SHIFT 0x0 +#define UVD_PG_IND_INDEX__INDEX_MASK 0x0000003FL +//UVD_PG_IND_DATA +#define UVD_PG_IND_DATA__DATA__SHIFT 0x0 +#define UVD_PG_IND_DATA__DATA_MASK 0xFFFFFFFFL +//CC_UVD_HARVESTING +#define CC_UVD_HARVESTING__MMSCH_DISABLE__SHIFT 0x0 +#define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1 +#define CC_UVD_HARVESTING__MMSCH_DISABLE_MASK 0x00000001L +#define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L +//UVD_JPEG_POWER_STATUS +#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS__SHIFT 0x0 +#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE__SHIFT 0x4 +#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS__SHIFT 0x8 +#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS__SHIFT 0x9 +#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP__SHIFT 0x1f +#define UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK 0x00000001L +#define UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK 0x00000010L +#define UVD_JPEG_POWER_STATUS__JRBC_DEC_SNOOP_DIS_MASK 0x00000100L +#define UVD_JPEG_POWER_STATUS__JRBC_ENC_SNOOP_DIS_MASK 0x00000200L +#define UVD_JPEG_POWER_STATUS__STALL_JDPG_POWER_UP_MASK 0x80000000L +//UVD_DPG_LMA_CTL +#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0 +#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1 +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2 +#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4 +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0x10 +#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L +#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L +#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFF0000L +//UVD_DPG_LMA_DATA +#define UVD_DPG_LMA_DATA__LMA_DATA__SHIFT 0x0 +#define UVD_DPG_LMA_DATA__LMA_DATA_MASK 0xFFFFFFFFL +//UVD_DPG_LMA_MASK +#define UVD_DPG_LMA_MASK__LMA_MASK__SHIFT 0x0 +#define UVD_DPG_LMA_MASK__LMA_MASK_MASK 0xFFFFFFFFL +//UVD_DPG_PAUSE +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L +//UVD_SCRATCH1 +#define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0 +#define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH2 +#define UVD_SCRATCH2__SCRATCH2_DATA__SHIFT 0x0 +#define UVD_SCRATCH2__SCRATCH2_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH3 +#define UVD_SCRATCH3__SCRATCH3_DATA__SHIFT 0x0 +#define UVD_SCRATCH3__SCRATCH3_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH4 +#define UVD_SCRATCH4__SCRATCH4_DATA__SHIFT 0x0 +#define UVD_SCRATCH4__SCRATCH4_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH5 +#define UVD_SCRATCH5__SCRATCH5_DATA__SHIFT 0x0 +#define UVD_SCRATCH5__SCRATCH5_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH6 +#define UVD_SCRATCH6__SCRATCH6_DATA__SHIFT 0x0 +#define UVD_SCRATCH6__SCRATCH6_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH7 +#define UVD_SCRATCH7__SCRATCH7_DATA__SHIFT 0x0 +#define UVD_SCRATCH7__SCRATCH7_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH8 +#define UVD_SCRATCH8__SCRATCH8_DATA__SHIFT 0x0 +#define UVD_SCRATCH8__SCRATCH8_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH9 +#define UVD_SCRATCH9__SCRATCH9_DATA__SHIFT 0x0 +#define UVD_SCRATCH9__SCRATCH9_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH10 +#define UVD_SCRATCH10__SCRATCH10_DATA__SHIFT 0x0 +#define UVD_SCRATCH10__SCRATCH10_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH11 +#define UVD_SCRATCH11__SCRATCH11_DATA__SHIFT 0x0 +#define UVD_SCRATCH11__SCRATCH11_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH12 +#define UVD_SCRATCH12__SCRATCH12_DATA__SHIFT 0x0 +#define UVD_SCRATCH12__SCRATCH12_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH13 +#define UVD_SCRATCH13__SCRATCH13_DATA__SHIFT 0x0 +#define UVD_SCRATCH13__SCRATCH13_DATA_MASK 0xFFFFFFFFL +//UVD_SCRATCH14 +#define UVD_SCRATCH14__SCRATCH14_DATA__SHIFT 0x0 +#define UVD_SCRATCH14__SCRATCH14_DATA_MASK 0xFFFFFFFFL +//UVD_FREE_COUNTER_REG +#define UVD_FREE_COUNTER_REG__FREE_COUNTER__SHIFT 0x0 +#define UVD_FREE_COUNTER_REG__FREE_COUNTER_MASK 0xFFFFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_DPG_VCPU_CACHE_OFFSET0 +#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0 +#define UVD_DPG_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x01FFFFFFL +//UVD_DPG_LMI_VCPU_CACHE_VMID +#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0 +#define UVD_DPG_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL +//UVD_PF_STATUS +#define UVD_PF_STATUS__JPEG_PF_OCCURED__SHIFT 0x0 +#define UVD_PF_STATUS__NJ_PF_OCCURED__SHIFT 0x1 +#define UVD_PF_STATUS__ENCODER0_PF_OCCURED__SHIFT 0x2 +#define UVD_PF_STATUS__ENCODER1_PF_OCCURED__SHIFT 0x3 +#define UVD_PF_STATUS__ENCODER2_PF_OCCURED__SHIFT 0x4 +#define UVD_PF_STATUS__ENCODER3_PF_OCCURED__SHIFT 0x5 +#define UVD_PF_STATUS__ENCODER4_PF_OCCURED__SHIFT 0x6 +#define UVD_PF_STATUS__EJPEG_PF_OCCURED__SHIFT 0x7 +#define UVD_PF_STATUS__JPEG_PF_CLEAR__SHIFT 0x8 +#define UVD_PF_STATUS__NJ_PF_CLEAR__SHIFT 0x9 +#define UVD_PF_STATUS__ENCODER0_PF_CLEAR__SHIFT 0xa +#define UVD_PF_STATUS__ENCODER1_PF_CLEAR__SHIFT 0xb +#define UVD_PF_STATUS__ENCODER2_PF_CLEAR__SHIFT 0xc +#define UVD_PF_STATUS__ENCODER3_PF_CLEAR__SHIFT 0xd +#define UVD_PF_STATUS__ENCODER4_PF_CLEAR__SHIFT 0xe +#define UVD_PF_STATUS__EJPEG_PF_CLEAR__SHIFT 0xf +#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED__SHIFT 0x10 +#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED__SHIFT 0x11 +#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED__SHIFT 0x12 +#define UVD_PF_STATUS__JPEG_PF_OCCURED_MASK 0x00000001L +#define UVD_PF_STATUS__NJ_PF_OCCURED_MASK 0x00000002L +#define UVD_PF_STATUS__ENCODER0_PF_OCCURED_MASK 0x00000004L +#define UVD_PF_STATUS__ENCODER1_PF_OCCURED_MASK 0x00000008L +#define UVD_PF_STATUS__ENCODER2_PF_OCCURED_MASK 0x00000010L +#define UVD_PF_STATUS__ENCODER3_PF_OCCURED_MASK 0x00000020L +#define UVD_PF_STATUS__ENCODER4_PF_OCCURED_MASK 0x00000040L +#define UVD_PF_STATUS__EJPEG_PF_OCCURED_MASK 0x00000080L +#define UVD_PF_STATUS__JPEG_PF_CLEAR_MASK 0x00000100L +#define UVD_PF_STATUS__NJ_PF_CLEAR_MASK 0x00000200L +#define UVD_PF_STATUS__ENCODER0_PF_CLEAR_MASK 0x00000400L +#define UVD_PF_STATUS__ENCODER1_PF_CLEAR_MASK 0x00000800L +#define UVD_PF_STATUS__ENCODER2_PF_CLEAR_MASK 0x00001000L +#define UVD_PF_STATUS__ENCODER3_PF_CLEAR_MASK 0x00002000L +#define UVD_PF_STATUS__ENCODER4_PF_CLEAR_MASK 0x00004000L +#define UVD_PF_STATUS__EJPEG_PF_CLEAR_MASK 0x00008000L +#define UVD_PF_STATUS__NJ_ATM_PF_OCCURED_MASK 0x00010000L +#define UVD_PF_STATUS__DJ_ATM_PF_OCCURED_MASK 0x00020000L +#define UVD_PF_STATUS__EJ_ATM_PF_OCCURED_MASK 0x00040000L +//UVD_DPG_CLK_EN_VCPU_REPORT +#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN__SHIFT 0x0 +#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT__SHIFT 0x1 +#define UVD_DPG_CLK_EN_VCPU_REPORT__CLK_EN_MASK 0x00000001L +#define UVD_DPG_CLK_EN_VCPU_REPORT__VCPU_REPORT_MASK 0x000000FEL +//UVD_GFX8_ADDR_CONFIG +#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x4 +#define UVD_GFX8_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000070L +//UVD_GFX10_ADDR_CONFIG +#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0 +#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3 +#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS__SHIFT 0xc +#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT 0x13 +#define UVD_GFX10_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L +#define UVD_GFX10_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE_MASK 0x00000038L +#define UVD_GFX10_ADDR_CONFIG__NUM_BANKS_MASK 0x00007000L +#define UVD_GFX10_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK 0x00180000L +//UVD_GPCNT2_CNTL +#define UVD_GPCNT2_CNTL__CLR__SHIFT 0x0 +#define UVD_GPCNT2_CNTL__START__SHIFT 0x1 +#define UVD_GPCNT2_CNTL__COUNTUP__SHIFT 0x2 +#define UVD_GPCNT2_CNTL__CLR_MASK 0x00000001L +#define UVD_GPCNT2_CNTL__START_MASK 0x00000002L +#define UVD_GPCNT2_CNTL__COUNTUP_MASK 0x00000004L +//UVD_GPCNT2_TARGET_LOWER +#define UVD_GPCNT2_TARGET_LOWER__TARGET__SHIFT 0x0 +#define UVD_GPCNT2_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL +//UVD_GPCNT2_STATUS_LOWER +#define UVD_GPCNT2_STATUS_LOWER__COUNT__SHIFT 0x0 +#define UVD_GPCNT2_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_GPCNT2_TARGET_UPPER +#define UVD_GPCNT2_TARGET_UPPER__TARGET__SHIFT 0x0 +#define UVD_GPCNT2_TARGET_UPPER__TARGET_MASK 0x0000FFFFL +//UVD_GPCNT2_STATUS_UPPER +#define UVD_GPCNT2_STATUS_UPPER__COUNT__SHIFT 0x0 +#define UVD_GPCNT2_STATUS_UPPER__COUNT_MASK 0x0000FFFFL +//UVD_GPCNT3_CNTL +#define UVD_GPCNT3_CNTL__CLR__SHIFT 0x0 +#define UVD_GPCNT3_CNTL__START__SHIFT 0x1 +#define UVD_GPCNT3_CNTL__COUNTUP__SHIFT 0x2 +#define UVD_GPCNT3_CNTL__FREQ__SHIFT 0x3 +#define UVD_GPCNT3_CNTL__DIV__SHIFT 0xa +#define UVD_GPCNT3_CNTL__CLR_MASK 0x00000001L +#define UVD_GPCNT3_CNTL__START_MASK 0x00000002L +#define UVD_GPCNT3_CNTL__COUNTUP_MASK 0x00000004L +#define UVD_GPCNT3_CNTL__FREQ_MASK 0x000003F8L +#define UVD_GPCNT3_CNTL__DIV_MASK 0x0001FC00L +//UVD_GPCNT3_TARGET_LOWER +#define UVD_GPCNT3_TARGET_LOWER__TARGET__SHIFT 0x0 +#define UVD_GPCNT3_TARGET_LOWER__TARGET_MASK 0xFFFFFFFFL +//UVD_GPCNT3_STATUS_LOWER +#define UVD_GPCNT3_STATUS_LOWER__COUNT__SHIFT 0x0 +#define UVD_GPCNT3_STATUS_LOWER__COUNT_MASK 0xFFFFFFFFL +//UVD_GPCNT3_TARGET_UPPER +#define UVD_GPCNT3_TARGET_UPPER__TARGET__SHIFT 0x0 +#define UVD_GPCNT3_TARGET_UPPER__TARGET_MASK 0x0000FFFFL +//UVD_GPCNT3_STATUS_UPPER +#define UVD_GPCNT3_STATUS_UPPER__COUNT__SHIFT 0x0 +#define UVD_GPCNT3_STATUS_UPPER__COUNT_MASK 0x0000FFFFL + + +// addressBlock: uvd0_uvddec +//UVD_STATUS +#define UVD_STATUS__RBC_BUSY__SHIFT 0x0 +#define UVD_STATUS__VCPU_REPORT__SHIFT 0x1 +#define UVD_STATUS__RBC_ACCESS_GPCOM__SHIFT 0x10 +#define UVD_STATUS__SYS_GPCOM_REQ__SHIFT 0x1f +#define UVD_STATUS__RBC_BUSY_MASK 0x00000001L +#define UVD_STATUS__VCPU_REPORT_MASK 0x000000FEL +#define UVD_STATUS__RBC_ACCESS_GPCOM_MASK 0x00010000L +#define UVD_STATUS__SYS_GPCOM_REQ_MASK 0x80000000L +//UVD_ENC_PIPE_BUSY +#define UVD_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0 +#define UVD_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1 +#define UVD_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2 +#define UVD_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3 +#define UVD_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4 +#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5 +#define UVD_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6 +#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7 +#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8 +#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9 +#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa +#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb +#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10 +#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11 +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12 +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13 +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14 +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15 +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16 +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19 +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x1d +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x1e +#define UVD_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L +#define UVD_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L +#define UVD_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L +#define UVD_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L +#define UVD_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L +#define UVD_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L +#define UVD_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L +#define UVD_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L +#define UVD_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L +#define UVD_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L +#define UVD_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L +#define UVD_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L +#define UVD_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L +#define UVD_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x20000000L +#define UVD_ENC_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x40000000L +//UVD_SOFT_RESET +#define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0 +#define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1 +#define UVD_SOFT_RESET__LMI_SOFT_RESET__SHIFT 0x2 +#define UVD_SOFT_RESET__VCPU_SOFT_RESET__SHIFT 0x3 +#define UVD_SOFT_RESET__UDEC_SOFT_RESET__SHIFT 0x4 +#define UVD_SOFT_RESET__CXW_SOFT_RESET__SHIFT 0x6 +#define UVD_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x7 +#define UVD_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x8 +#define UVD_SOFT_RESET__EFC_SOFT_RESET__SHIFT 0x9 +#define UVD_SOFT_RESET__IH_SOFT_RESET__SHIFT 0xa +#define UVD_SOFT_RESET__MPRD_SOFT_RESET__SHIFT 0xb +#define UVD_SOFT_RESET__IDCT_SOFT_RESET__SHIFT 0xc +#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET__SHIFT 0xd +#define UVD_SOFT_RESET__SPH_SOFT_RESET__SHIFT 0xe +#define UVD_SOFT_RESET__MIF_SOFT_RESET__SHIFT 0xf +#define UVD_SOFT_RESET__LCM_SOFT_RESET__SHIFT 0x10 +#define UVD_SOFT_RESET__SUVD_SOFT_RESET__SHIFT 0x11 +#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS__SHIFT 0x12 +#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS__SHIFT 0x13 +#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS__SHIFT 0x14 +#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS__SHIFT 0x15 +#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS__SHIFT 0x16 +#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS__SHIFT 0x17 +#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS__SHIFT 0x18 +#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS__SHIFT 0x19 +#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS__SHIFT 0x1a +#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS__SHIFT 0x1b +#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS__SHIFT 0x1c +#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS__SHIFT 0x1d +#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS__SHIFT 0x1e +#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS__SHIFT 0x1f +#define UVD_SOFT_RESET__RBC_SOFT_RESET_MASK 0x00000001L +#define UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK 0x00000002L +#define UVD_SOFT_RESET__LMI_SOFT_RESET_MASK 0x00000004L +#define UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK 0x00000008L +#define UVD_SOFT_RESET__UDEC_SOFT_RESET_MASK 0x00000010L +#define UVD_SOFT_RESET__CXW_SOFT_RESET_MASK 0x00000040L +#define UVD_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000080L +#define UVD_SOFT_RESET__MPC_SOFT_RESET_MASK 0x00000100L +#define UVD_SOFT_RESET__EFC_SOFT_RESET_MASK 0x00000200L +#define UVD_SOFT_RESET__IH_SOFT_RESET_MASK 0x00000400L +#define UVD_SOFT_RESET__MPRD_SOFT_RESET_MASK 0x00000800L +#define UVD_SOFT_RESET__IDCT_SOFT_RESET_MASK 0x00001000L +#define UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK 0x00002000L +#define UVD_SOFT_RESET__SPH_SOFT_RESET_MASK 0x00004000L +#define UVD_SOFT_RESET__MIF_SOFT_RESET_MASK 0x00008000L +#define UVD_SOFT_RESET__LCM_SOFT_RESET_MASK 0x00010000L +#define UVD_SOFT_RESET__SUVD_SOFT_RESET_MASK 0x00020000L +#define UVD_SOFT_RESET__LBSI_VCLK_RESET_STATUS_MASK 0x00040000L +#define UVD_SOFT_RESET__VCPU_VCLK_RESET_STATUS_MASK 0x00080000L +#define UVD_SOFT_RESET__UDEC_VCLK_RESET_STATUS_MASK 0x00100000L +#define UVD_SOFT_RESET__UDEC_DCLK_RESET_STATUS_MASK 0x00200000L +#define UVD_SOFT_RESET__MPC_DCLK_RESET_STATUS_MASK 0x00400000L +#define UVD_SOFT_RESET__MPRD_VCLK_RESET_STATUS_MASK 0x00800000L +#define UVD_SOFT_RESET__MPRD_DCLK_RESET_STATUS_MASK 0x01000000L +#define UVD_SOFT_RESET__IDCT_VCLK_RESET_STATUS_MASK 0x02000000L +#define UVD_SOFT_RESET__MIF_DCLK_RESET_STATUS_MASK 0x04000000L +#define UVD_SOFT_RESET__LCM_DCLK_RESET_STATUS_MASK 0x08000000L +#define UVD_SOFT_RESET__SUVD_VCLK_RESET_STATUS_MASK 0x10000000L +#define UVD_SOFT_RESET__SUVD_DCLK_RESET_STATUS_MASK 0x20000000L +#define UVD_SOFT_RESET__RE_DCLK_RESET_STATUS_MASK 0x40000000L +#define UVD_SOFT_RESET__SRE_DCLK_RESET_STATUS_MASK 0x80000000L +//UVD_SOFT_RESET2 +#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET__SHIFT 0x0 +#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS__SHIFT 0x10 +#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS__SHIFT 0x11 +#define UVD_SOFT_RESET2__ATOMIC_SOFT_RESET_MASK 0x00000001L +#define UVD_SOFT_RESET2__MMSCH_VCLK_RESET_STATUS_MASK 0x00010000L +#define UVD_SOFT_RESET2__MMSCH_SCLK_RESET_STATUS_MASK 0x00020000L +//UVD_MMSCH_SOFT_RESET +#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET__SHIFT 0x0 +#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET__SHIFT 0x1 +#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK__SHIFT 0x1f +#define UVD_MMSCH_SOFT_RESET__MMSCH_RESET_MASK 0x00000001L +#define UVD_MMSCH_SOFT_RESET__TAP_SOFT_RESET_MASK 0x00000002L +#define UVD_MMSCH_SOFT_RESET__MMSCH_LOCK_MASK 0x80000000L +//UVD_CGC_GATE +#define UVD_CGC_GATE__SYS__SHIFT 0x0 +#define UVD_CGC_GATE__UDEC__SHIFT 0x1 +#define UVD_CGC_GATE__MPEG2__SHIFT 0x2 +#define UVD_CGC_GATE__REGS__SHIFT 0x3 +#define UVD_CGC_GATE__RBC__SHIFT 0x4 +#define UVD_CGC_GATE__LMI_MC__SHIFT 0x5 +#define UVD_CGC_GATE__LMI_UMC__SHIFT 0x6 +#define UVD_CGC_GATE__IDCT__SHIFT 0x7 +#define UVD_CGC_GATE__MPRD__SHIFT 0x8 +#define UVD_CGC_GATE__MPC__SHIFT 0x9 +#define UVD_CGC_GATE__LBSI__SHIFT 0xa +#define UVD_CGC_GATE__LRBBM__SHIFT 0xb +#define UVD_CGC_GATE__UDEC_RE__SHIFT 0xc +#define UVD_CGC_GATE__UDEC_CM__SHIFT 0xd +#define UVD_CGC_GATE__UDEC_IT__SHIFT 0xe +#define UVD_CGC_GATE__UDEC_DB__SHIFT 0xf +#define UVD_CGC_GATE__UDEC_MP__SHIFT 0x10 +#define UVD_CGC_GATE__WCB__SHIFT 0x11 +#define UVD_CGC_GATE__VCPU__SHIFT 0x12 +#define UVD_CGC_GATE__MMSCH__SHIFT 0x14 +#define UVD_CGC_GATE__SYS_MASK 0x00000001L +#define UVD_CGC_GATE__UDEC_MASK 0x00000002L +#define UVD_CGC_GATE__MPEG2_MASK 0x00000004L +#define UVD_CGC_GATE__REGS_MASK 0x00000008L +#define UVD_CGC_GATE__RBC_MASK 0x00000010L +#define UVD_CGC_GATE__LMI_MC_MASK 0x00000020L +#define UVD_CGC_GATE__LMI_UMC_MASK 0x00000040L +#define UVD_CGC_GATE__IDCT_MASK 0x00000080L +#define UVD_CGC_GATE__MPRD_MASK 0x00000100L +#define UVD_CGC_GATE__MPC_MASK 0x00000200L +#define UVD_CGC_GATE__LBSI_MASK 0x00000400L +#define UVD_CGC_GATE__LRBBM_MASK 0x00000800L +#define UVD_CGC_GATE__UDEC_RE_MASK 0x00001000L +#define UVD_CGC_GATE__UDEC_CM_MASK 0x00002000L +#define UVD_CGC_GATE__UDEC_IT_MASK 0x00004000L +#define UVD_CGC_GATE__UDEC_DB_MASK 0x00008000L +#define UVD_CGC_GATE__UDEC_MP_MASK 0x00010000L +#define UVD_CGC_GATE__WCB_MASK 0x00020000L +#define UVD_CGC_GATE__VCPU_MASK 0x00040000L +#define UVD_CGC_GATE__MMSCH_MASK 0x00100000L +//UVD_CGC_STATUS +#define UVD_CGC_STATUS__SYS_SCLK__SHIFT 0x0 +#define UVD_CGC_STATUS__SYS_DCLK__SHIFT 0x1 +#define UVD_CGC_STATUS__SYS_VCLK__SHIFT 0x2 +#define UVD_CGC_STATUS__UDEC_SCLK__SHIFT 0x3 +#define UVD_CGC_STATUS__UDEC_DCLK__SHIFT 0x4 +#define UVD_CGC_STATUS__UDEC_VCLK__SHIFT 0x5 +#define UVD_CGC_STATUS__MPEG2_SCLK__SHIFT 0x6 +#define UVD_CGC_STATUS__MPEG2_DCLK__SHIFT 0x7 +#define UVD_CGC_STATUS__MPEG2_VCLK__SHIFT 0x8 +#define UVD_CGC_STATUS__REGS_SCLK__SHIFT 0x9 +#define UVD_CGC_STATUS__REGS_VCLK__SHIFT 0xa +#define UVD_CGC_STATUS__RBC_SCLK__SHIFT 0xb +#define UVD_CGC_STATUS__LMI_MC_SCLK__SHIFT 0xc +#define UVD_CGC_STATUS__LMI_UMC_SCLK__SHIFT 0xd +#define UVD_CGC_STATUS__IDCT_SCLK__SHIFT 0xe +#define UVD_CGC_STATUS__IDCT_VCLK__SHIFT 0xf +#define UVD_CGC_STATUS__MPRD_SCLK__SHIFT 0x10 +#define UVD_CGC_STATUS__MPRD_DCLK__SHIFT 0x11 +#define UVD_CGC_STATUS__MPRD_VCLK__SHIFT 0x12 +#define UVD_CGC_STATUS__MPC_SCLK__SHIFT 0x13 +#define UVD_CGC_STATUS__MPC_DCLK__SHIFT 0x14 +#define UVD_CGC_STATUS__LBSI_SCLK__SHIFT 0x15 +#define UVD_CGC_STATUS__LBSI_VCLK__SHIFT 0x16 +#define UVD_CGC_STATUS__LRBBM_SCLK__SHIFT 0x17 +#define UVD_CGC_STATUS__WCB_SCLK__SHIFT 0x18 +#define UVD_CGC_STATUS__VCPU_SCLK__SHIFT 0x19 +#define UVD_CGC_STATUS__VCPU_VCLK__SHIFT 0x1a +#define UVD_CGC_STATUS__MMSCH_SCLK__SHIFT 0x1b +#define UVD_CGC_STATUS__MMSCH_VCLK__SHIFT 0x1c +#define UVD_CGC_STATUS__ALL_ENC_ACTIVE__SHIFT 0x1d +#define UVD_CGC_STATUS__ALL_DEC_ACTIVE__SHIFT 0x1f +#define UVD_CGC_STATUS__SYS_SCLK_MASK 0x00000001L +#define UVD_CGC_STATUS__SYS_DCLK_MASK 0x00000002L +#define UVD_CGC_STATUS__SYS_VCLK_MASK 0x00000004L +#define UVD_CGC_STATUS__UDEC_SCLK_MASK 0x00000008L +#define UVD_CGC_STATUS__UDEC_DCLK_MASK 0x00000010L +#define UVD_CGC_STATUS__UDEC_VCLK_MASK 0x00000020L +#define UVD_CGC_STATUS__MPEG2_SCLK_MASK 0x00000040L +#define UVD_CGC_STATUS__MPEG2_DCLK_MASK 0x00000080L +#define UVD_CGC_STATUS__MPEG2_VCLK_MASK 0x00000100L +#define UVD_CGC_STATUS__REGS_SCLK_MASK 0x00000200L +#define UVD_CGC_STATUS__REGS_VCLK_MASK 0x00000400L +#define UVD_CGC_STATUS__RBC_SCLK_MASK 0x00000800L +#define UVD_CGC_STATUS__LMI_MC_SCLK_MASK 0x00001000L +#define UVD_CGC_STATUS__LMI_UMC_SCLK_MASK 0x00002000L +#define UVD_CGC_STATUS__IDCT_SCLK_MASK 0x00004000L +#define UVD_CGC_STATUS__IDCT_VCLK_MASK 0x00008000L +#define UVD_CGC_STATUS__MPRD_SCLK_MASK 0x00010000L +#define UVD_CGC_STATUS__MPRD_DCLK_MASK 0x00020000L +#define UVD_CGC_STATUS__MPRD_VCLK_MASK 0x00040000L +#define UVD_CGC_STATUS__MPC_SCLK_MASK 0x00080000L +#define UVD_CGC_STATUS__MPC_DCLK_MASK 0x00100000L +#define UVD_CGC_STATUS__LBSI_SCLK_MASK 0x00200000L +#define UVD_CGC_STATUS__LBSI_VCLK_MASK 0x00400000L +#define UVD_CGC_STATUS__LRBBM_SCLK_MASK 0x00800000L +#define UVD_CGC_STATUS__WCB_SCLK_MASK 0x01000000L +#define UVD_CGC_STATUS__VCPU_SCLK_MASK 0x02000000L +#define UVD_CGC_STATUS__VCPU_VCLK_MASK 0x04000000L +#define UVD_CGC_STATUS__MMSCH_SCLK_MASK 0x08000000L +#define UVD_CGC_STATUS__MMSCH_VCLK_MASK 0x10000000L +#define UVD_CGC_STATUS__ALL_ENC_ACTIVE_MASK 0x20000000L +#define UVD_CGC_STATUS__ALL_DEC_ACTIVE_MASK 0x80000000L +//UVD_CGC_CTRL +#define UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 +#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT 0x2 +#define UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT 0x6 +#define UVD_CGC_CTRL__UDEC_RE_MODE__SHIFT 0xb +#define UVD_CGC_CTRL__UDEC_CM_MODE__SHIFT 0xc +#define UVD_CGC_CTRL__UDEC_IT_MODE__SHIFT 0xd +#define UVD_CGC_CTRL__UDEC_DB_MODE__SHIFT 0xe +#define UVD_CGC_CTRL__UDEC_MP_MODE__SHIFT 0xf +#define UVD_CGC_CTRL__SYS_MODE__SHIFT 0x10 +#define UVD_CGC_CTRL__UDEC_MODE__SHIFT 0x11 +#define UVD_CGC_CTRL__MPEG2_MODE__SHIFT 0x12 +#define UVD_CGC_CTRL__REGS_MODE__SHIFT 0x13 +#define UVD_CGC_CTRL__RBC_MODE__SHIFT 0x14 +#define UVD_CGC_CTRL__LMI_MC_MODE__SHIFT 0x15 +#define UVD_CGC_CTRL__LMI_UMC_MODE__SHIFT 0x16 +#define UVD_CGC_CTRL__IDCT_MODE__SHIFT 0x17 +#define UVD_CGC_CTRL__MPRD_MODE__SHIFT 0x18 +#define UVD_CGC_CTRL__MPC_MODE__SHIFT 0x19 +#define UVD_CGC_CTRL__LBSI_MODE__SHIFT 0x1a +#define UVD_CGC_CTRL__LRBBM_MODE__SHIFT 0x1b +#define UVD_CGC_CTRL__WCB_MODE__SHIFT 0x1c +#define UVD_CGC_CTRL__VCPU_MODE__SHIFT 0x1d +#define UVD_CGC_CTRL__MMSCH_MODE__SHIFT 0x1f +#define UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK 0x00000001L +#define UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK 0x0000003CL +#define UVD_CGC_CTRL__CLK_OFF_DELAY_MASK 0x000007C0L +#define UVD_CGC_CTRL__UDEC_RE_MODE_MASK 0x00000800L +#define UVD_CGC_CTRL__UDEC_CM_MODE_MASK 0x00001000L +#define UVD_CGC_CTRL__UDEC_IT_MODE_MASK 0x00002000L +#define UVD_CGC_CTRL__UDEC_DB_MODE_MASK 0x00004000L +#define UVD_CGC_CTRL__UDEC_MP_MODE_MASK 0x00008000L +#define UVD_CGC_CTRL__SYS_MODE_MASK 0x00010000L +#define UVD_CGC_CTRL__UDEC_MODE_MASK 0x00020000L +#define UVD_CGC_CTRL__MPEG2_MODE_MASK 0x00040000L +#define UVD_CGC_CTRL__REGS_MODE_MASK 0x00080000L +#define UVD_CGC_CTRL__RBC_MODE_MASK 0x00100000L +#define UVD_CGC_CTRL__LMI_MC_MODE_MASK 0x00200000L +#define UVD_CGC_CTRL__LMI_UMC_MODE_MASK 0x00400000L +#define UVD_CGC_CTRL__IDCT_MODE_MASK 0x00800000L +#define UVD_CGC_CTRL__MPRD_MODE_MASK 0x01000000L +#define UVD_CGC_CTRL__MPC_MODE_MASK 0x02000000L +#define UVD_CGC_CTRL__LBSI_MODE_MASK 0x04000000L +#define UVD_CGC_CTRL__LRBBM_MODE_MASK 0x08000000L +#define UVD_CGC_CTRL__WCB_MODE_MASK 0x10000000L +#define UVD_CGC_CTRL__VCPU_MODE_MASK 0x20000000L +#define UVD_CGC_CTRL__MMSCH_MODE_MASK 0x80000000L +//UVD_CGC_UDEC_STATUS +#define UVD_CGC_UDEC_STATUS__RE_SCLK__SHIFT 0x0 +#define UVD_CGC_UDEC_STATUS__RE_DCLK__SHIFT 0x1 +#define UVD_CGC_UDEC_STATUS__RE_VCLK__SHIFT 0x2 +#define UVD_CGC_UDEC_STATUS__CM_SCLK__SHIFT 0x3 +#define UVD_CGC_UDEC_STATUS__CM_DCLK__SHIFT 0x4 +#define UVD_CGC_UDEC_STATUS__CM_VCLK__SHIFT 0x5 +#define UVD_CGC_UDEC_STATUS__IT_SCLK__SHIFT 0x6 +#define UVD_CGC_UDEC_STATUS__IT_DCLK__SHIFT 0x7 +#define UVD_CGC_UDEC_STATUS__IT_VCLK__SHIFT 0x8 +#define UVD_CGC_UDEC_STATUS__DB_SCLK__SHIFT 0x9 +#define UVD_CGC_UDEC_STATUS__DB_DCLK__SHIFT 0xa +#define UVD_CGC_UDEC_STATUS__DB_VCLK__SHIFT 0xb +#define UVD_CGC_UDEC_STATUS__MP_SCLK__SHIFT 0xc +#define UVD_CGC_UDEC_STATUS__MP_DCLK__SHIFT 0xd +#define UVD_CGC_UDEC_STATUS__MP_VCLK__SHIFT 0xe +#define UVD_CGC_UDEC_STATUS__RE_SCLK_MASK 0x00000001L +#define UVD_CGC_UDEC_STATUS__RE_DCLK_MASK 0x00000002L +#define UVD_CGC_UDEC_STATUS__RE_VCLK_MASK 0x00000004L +#define UVD_CGC_UDEC_STATUS__CM_SCLK_MASK 0x00000008L +#define UVD_CGC_UDEC_STATUS__CM_DCLK_MASK 0x00000010L +#define UVD_CGC_UDEC_STATUS__CM_VCLK_MASK 0x00000020L +#define UVD_CGC_UDEC_STATUS__IT_SCLK_MASK 0x00000040L +#define UVD_CGC_UDEC_STATUS__IT_DCLK_MASK 0x00000080L +#define UVD_CGC_UDEC_STATUS__IT_VCLK_MASK 0x00000100L +#define UVD_CGC_UDEC_STATUS__DB_SCLK_MASK 0x00000200L +#define UVD_CGC_UDEC_STATUS__DB_DCLK_MASK 0x00000400L +#define UVD_CGC_UDEC_STATUS__DB_VCLK_MASK 0x00000800L +#define UVD_CGC_UDEC_STATUS__MP_SCLK_MASK 0x00001000L +#define UVD_CGC_UDEC_STATUS__MP_DCLK_MASK 0x00002000L +#define UVD_CGC_UDEC_STATUS__MP_VCLK_MASK 0x00004000L +//UVD_SUVD_CGC_GATE +#define UVD_SUVD_CGC_GATE__SRE__SHIFT 0x0 +#define UVD_SUVD_CGC_GATE__SIT__SHIFT 0x1 +#define UVD_SUVD_CGC_GATE__SMP__SHIFT 0x2 +#define UVD_SUVD_CGC_GATE__SCM__SHIFT 0x3 +#define UVD_SUVD_CGC_GATE__SDB__SHIFT 0x4 +#define UVD_SUVD_CGC_GATE__SRE_H264__SHIFT 0x5 +#define UVD_SUVD_CGC_GATE__SRE_HEVC__SHIFT 0x6 +#define UVD_SUVD_CGC_GATE__SIT_H264__SHIFT 0x7 +#define UVD_SUVD_CGC_GATE__SIT_HEVC__SHIFT 0x8 +#define UVD_SUVD_CGC_GATE__SCM_H264__SHIFT 0x9 +#define UVD_SUVD_CGC_GATE__SCM_HEVC__SHIFT 0xa +#define UVD_SUVD_CGC_GATE__SDB_H264__SHIFT 0xb +#define UVD_SUVD_CGC_GATE__SDB_HEVC__SHIFT 0xc +#define UVD_SUVD_CGC_GATE__SCLR__SHIFT 0xd +#define UVD_SUVD_CGC_GATE__UVD_SC__SHIFT 0xe +#define UVD_SUVD_CGC_GATE__ENT__SHIFT 0xf +#define UVD_SUVD_CGC_GATE__IME__SHIFT 0x10 +#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC__SHIFT 0x11 +#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC__SHIFT 0x12 +#define UVD_SUVD_CGC_GATE__SITE__SHIFT 0x13 +#define UVD_SUVD_CGC_GATE__SRE_VP9__SHIFT 0x14 +#define UVD_SUVD_CGC_GATE__SCM_VP9__SHIFT 0x15 +#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC__SHIFT 0x16 +#define UVD_SUVD_CGC_GATE__SDB_VP9__SHIFT 0x17 +#define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 +#define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19 +#define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L +#define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L +#define UVD_SUVD_CGC_GATE__SMP_MASK 0x00000004L +#define UVD_SUVD_CGC_GATE__SCM_MASK 0x00000008L +#define UVD_SUVD_CGC_GATE__SDB_MASK 0x00000010L +#define UVD_SUVD_CGC_GATE__SRE_H264_MASK 0x00000020L +#define UVD_SUVD_CGC_GATE__SRE_HEVC_MASK 0x00000040L +#define UVD_SUVD_CGC_GATE__SIT_H264_MASK 0x00000080L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_MASK 0x00000100L +#define UVD_SUVD_CGC_GATE__SCM_H264_MASK 0x00000200L +#define UVD_SUVD_CGC_GATE__SCM_HEVC_MASK 0x00000400L +#define UVD_SUVD_CGC_GATE__SDB_H264_MASK 0x00000800L +#define UVD_SUVD_CGC_GATE__SDB_HEVC_MASK 0x00001000L +#define UVD_SUVD_CGC_GATE__SCLR_MASK 0x00002000L +#define UVD_SUVD_CGC_GATE__UVD_SC_MASK 0x00004000L +#define UVD_SUVD_CGC_GATE__ENT_MASK 0x00008000L +#define UVD_SUVD_CGC_GATE__IME_MASK 0x00010000L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK 0x00020000L +#define UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK 0x00040000L +#define UVD_SUVD_CGC_GATE__SITE_MASK 0x00080000L +#define UVD_SUVD_CGC_GATE__SRE_VP9_MASK 0x00100000L +#define UVD_SUVD_CGC_GATE__SCM_VP9_MASK 0x00200000L +#define UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK 0x00400000L +#define UVD_SUVD_CGC_GATE__SDB_VP9_MASK 0x00800000L +#define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L +#define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L +//UVD_SUVD_CGC_STATUS +#define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0 +#define UVD_SUVD_CGC_STATUS__SRE_DCLK__SHIFT 0x1 +#define UVD_SUVD_CGC_STATUS__SIT_DCLK__SHIFT 0x2 +#define UVD_SUVD_CGC_STATUS__SMP_DCLK__SHIFT 0x3 +#define UVD_SUVD_CGC_STATUS__SCM_DCLK__SHIFT 0x4 +#define UVD_SUVD_CGC_STATUS__SDB_DCLK__SHIFT 0x5 +#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK__SHIFT 0x6 +#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK__SHIFT 0x7 +#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK__SHIFT 0x8 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK__SHIFT 0x9 +#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK__SHIFT 0xa +#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK__SHIFT 0xb +#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK__SHIFT 0xc +#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK__SHIFT 0xd +#define UVD_SUVD_CGC_STATUS__SCLR_DCLK__SHIFT 0xe +#define UVD_SUVD_CGC_STATUS__UVD_SC__SHIFT 0xf +#define UVD_SUVD_CGC_STATUS__ENT_DCLK__SHIFT 0x10 +#define UVD_SUVD_CGC_STATUS__IME_DCLK__SHIFT 0x11 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK__SHIFT 0x12 +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK__SHIFT 0x13 +#define UVD_SUVD_CGC_STATUS__SITE_DCLK__SHIFT 0x14 +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK__SHIFT 0x15 +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK__SHIFT 0x16 +#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK__SHIFT 0x17 +#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK__SHIFT 0x18 +#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK__SHIFT 0x19 +#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK__SHIFT 0x1a +#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b +#define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c +#define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L +#define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L +#define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L +#define UVD_SUVD_CGC_STATUS__SMP_DCLK_MASK 0x00000008L +#define UVD_SUVD_CGC_STATUS__SCM_DCLK_MASK 0x00000010L +#define UVD_SUVD_CGC_STATUS__SDB_DCLK_MASK 0x00000020L +#define UVD_SUVD_CGC_STATUS__SRE_H264_VCLK_MASK 0x00000040L +#define UVD_SUVD_CGC_STATUS__SRE_HEVC_VCLK_MASK 0x00000080L +#define UVD_SUVD_CGC_STATUS__SIT_H264_DCLK_MASK 0x00000100L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DCLK_MASK 0x00000200L +#define UVD_SUVD_CGC_STATUS__SCM_H264_DCLK_MASK 0x00000400L +#define UVD_SUVD_CGC_STATUS__SCM_HEVC_DCLK_MASK 0x00000800L +#define UVD_SUVD_CGC_STATUS__SDB_H264_DCLK_MASK 0x00001000L +#define UVD_SUVD_CGC_STATUS__SDB_HEVC_DCLK_MASK 0x00002000L +#define UVD_SUVD_CGC_STATUS__SCLR_DCLK_MASK 0x00004000L +#define UVD_SUVD_CGC_STATUS__UVD_SC_MASK 0x00008000L +#define UVD_SUVD_CGC_STATUS__ENT_DCLK_MASK 0x00010000L +#define UVD_SUVD_CGC_STATUS__IME_DCLK_MASK 0x00020000L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_DEC_DCLK_MASK 0x00040000L +#define UVD_SUVD_CGC_STATUS__SIT_HEVC_ENC_DCLK_MASK 0x00080000L +#define UVD_SUVD_CGC_STATUS__SITE_DCLK_MASK 0x00100000L +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_DCLK_MASK 0x00200000L +#define UVD_SUVD_CGC_STATUS__SITE_HEVC_ENC_DCLK_MASK 0x00400000L +#define UVD_SUVD_CGC_STATUS__SRE_VP9_VCLK_MASK 0x00800000L +#define UVD_SUVD_CGC_STATUS__SCM_VP9_VCLK_MASK 0x01000000L +#define UVD_SUVD_CGC_STATUS__SIT_VP9_DEC_DCLK_MASK 0x02000000L +#define UVD_SUVD_CGC_STATUS__SDB_VP9_DCLK_MASK 0x04000000L +#define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L +#define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L +//UVD_SUVD_CGC_CTRL +#define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 +#define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 +#define UVD_SUVD_CGC_CTRL__SMP_MODE__SHIFT 0x2 +#define UVD_SUVD_CGC_CTRL__SCM_MODE__SHIFT 0x3 +#define UVD_SUVD_CGC_CTRL__SDB_MODE__SHIFT 0x4 +#define UVD_SUVD_CGC_CTRL__SCLR_MODE__SHIFT 0x5 +#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE__SHIFT 0x6 +#define UVD_SUVD_CGC_CTRL__ENT_MODE__SHIFT 0x7 +#define UVD_SUVD_CGC_CTRL__IME_MODE__SHIFT 0x8 +#define UVD_SUVD_CGC_CTRL__SITE_MODE__SHIFT 0x9 +#define UVD_SUVD_CGC_CTRL__EFC_MODE__SHIFT 0xa +#define UVD_SUVD_CGC_CTRL__SRE_MODE_MASK 0x00000001L +#define UVD_SUVD_CGC_CTRL__SIT_MODE_MASK 0x00000002L +#define UVD_SUVD_CGC_CTRL__SMP_MODE_MASK 0x00000004L +#define UVD_SUVD_CGC_CTRL__SCM_MODE_MASK 0x00000008L +#define UVD_SUVD_CGC_CTRL__SDB_MODE_MASK 0x00000010L +#define UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK 0x00000020L +#define UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK 0x00000040L +#define UVD_SUVD_CGC_CTRL__ENT_MODE_MASK 0x00000080L +#define UVD_SUVD_CGC_CTRL__IME_MODE_MASK 0x00000100L +#define UVD_SUVD_CGC_CTRL__SITE_MODE_MASK 0x00000200L +#define UVD_SUVD_CGC_CTRL__EFC_MODE_MASK 0x00000400L +//UVD_GPCOM_VCPU_CMD +#define UVD_GPCOM_VCPU_CMD__CMD_SEND__SHIFT 0x0 +#define UVD_GPCOM_VCPU_CMD__CMD__SHIFT 0x1 +#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE__SHIFT 0x1f +#define UVD_GPCOM_VCPU_CMD__CMD_SEND_MASK 0x00000001L +#define UVD_GPCOM_VCPU_CMD__CMD_MASK 0x7FFFFFFEL +#define UVD_GPCOM_VCPU_CMD__CMD_SOURCE_MASK 0x80000000L +//UVD_GPCOM_VCPU_DATA0 +#define UVD_GPCOM_VCPU_DATA0__DATA0__SHIFT 0x0 +#define UVD_GPCOM_VCPU_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_GPCOM_VCPU_DATA1 +#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0 +#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_GPCOM_SYS_CMD +#define UVD_GPCOM_SYS_CMD__CMD_SEND__SHIFT 0x0 +#define UVD_GPCOM_SYS_CMD__CMD__SHIFT 0x1 +#define UVD_GPCOM_SYS_CMD__CMD_SOURCE__SHIFT 0x1f +#define UVD_GPCOM_SYS_CMD__CMD_SEND_MASK 0x00000001L +#define UVD_GPCOM_SYS_CMD__CMD_MASK 0x7FFFFFFEL +#define UVD_GPCOM_SYS_CMD__CMD_SOURCE_MASK 0x80000000L +//UVD_GPCOM_SYS_DATA0 +#define UVD_GPCOM_SYS_DATA0__DATA0__SHIFT 0x0 +#define UVD_GPCOM_SYS_DATA0__DATA0_MASK 0xFFFFFFFFL +//UVD_GPCOM_SYS_DATA1 +#define UVD_GPCOM_SYS_DATA1__DATA1__SHIFT 0x0 +#define UVD_GPCOM_SYS_DATA1__DATA1_MASK 0xFFFFFFFFL +//UVD_VCPU_INT_EN +#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0 +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1 +#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2 +#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN__SHIFT 0x3 +#define UVD_VCPU_INT_EN__SW_RB1_INT_EN__SHIFT 0x4 +#define UVD_VCPU_INT_EN__SW_RB2_INT_EN__SHIFT 0x5 +#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6 +#define UVD_VCPU_INT_EN__SW_RB3_INT_EN__SHIFT 0x7 +#define UVD_VCPU_INT_EN__SW_RB4_INT_EN__SHIFT 0x9 +#define UVD_VCPU_INT_EN__SW_RB5_INT_EN__SHIFT 0xa +#define UVD_VCPU_INT_EN__LBSI_EN__SHIFT 0xb +#define UVD_VCPU_INT_EN__UDEC_EN__SHIFT 0xc +#define UVD_VCPU_INT_EN__RPTR_WR_EN__SHIFT 0x10 +#define UVD_VCPU_INT_EN__JOB_START_EN__SHIFT 0x11 +#define UVD_VCPU_INT_EN__NJ_PF_EN__SHIFT 0x12 +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17 +#define UVD_VCPU_INT_EN__IDCT_EN__SHIFT 0x18 +#define UVD_VCPU_INT_EN__MPRD_EN__SHIFT 0x19 +#define UVD_VCPU_INT_EN__AVM_INT_EN__SHIFT 0x1a +#define UVD_VCPU_INT_EN__CLK_SWT_EN__SHIFT 0x1b +#define UVD_VCPU_INT_EN__MIF_HWINT_EN__SHIFT 0x1c +#define UVD_VCPU_INT_EN__MPRD_ERR_EN__SHIFT 0x1d +#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN__SHIFT 0x1e +#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN__SHIFT 0x1f +#define UVD_VCPU_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L +#define UVD_VCPU_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L +#define UVD_VCPU_INT_EN__NJ_PF_RPT_EN_MASK 0x00000008L +#define UVD_VCPU_INT_EN__SW_RB1_INT_EN_MASK 0x00000010L +#define UVD_VCPU_INT_EN__SW_RB2_INT_EN_MASK 0x00000020L +#define UVD_VCPU_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L +#define UVD_VCPU_INT_EN__SW_RB3_INT_EN_MASK 0x00000080L +#define UVD_VCPU_INT_EN__SW_RB4_INT_EN_MASK 0x00000200L +#define UVD_VCPU_INT_EN__SW_RB5_INT_EN_MASK 0x00000400L +#define UVD_VCPU_INT_EN__LBSI_EN_MASK 0x00000800L +#define UVD_VCPU_INT_EN__UDEC_EN_MASK 0x00001000L +#define UVD_VCPU_INT_EN__RPTR_WR_EN_MASK 0x00010000L +#define UVD_VCPU_INT_EN__JOB_START_EN_MASK 0x00020000L +#define UVD_VCPU_INT_EN__NJ_PF_EN_MASK 0x00040000L +#define UVD_VCPU_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L +#define UVD_VCPU_INT_EN__IDCT_EN_MASK 0x01000000L +#define UVD_VCPU_INT_EN__MPRD_EN_MASK 0x02000000L +#define UVD_VCPU_INT_EN__AVM_INT_EN_MASK 0x04000000L +#define UVD_VCPU_INT_EN__CLK_SWT_EN_MASK 0x08000000L +#define UVD_VCPU_INT_EN__MIF_HWINT_EN_MASK 0x10000000L +#define UVD_VCPU_INT_EN__MPRD_ERR_EN_MASK 0x20000000L +#define UVD_VCPU_INT_EN__DRV_FW_REQ_EN_MASK 0x40000000L +#define UVD_VCPU_INT_EN__DRV_FW_ACK_EN_MASK 0x80000000L +//UVD_VCPU_INT_ACK +#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0 +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1 +#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2 +#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK__SHIFT 0x3 +#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK__SHIFT 0x4 +#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK__SHIFT 0x5 +#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6 +#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK__SHIFT 0x7 +#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK__SHIFT 0x9 +#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK__SHIFT 0xa +#define UVD_VCPU_INT_ACK__LBSI_ACK__SHIFT 0xb +#define UVD_VCPU_INT_ACK__UDEC_ACK__SHIFT 0xc +#define UVD_VCPU_INT_ACK__RPTR_WR_ACK__SHIFT 0x10 +#define UVD_VCPU_INT_ACK__JOB_START_ACK__SHIFT 0x11 +#define UVD_VCPU_INT_ACK__NJ_PF_ACK__SHIFT 0x12 +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17 +#define UVD_VCPU_INT_ACK__IDCT_ACK__SHIFT 0x18 +#define UVD_VCPU_INT_ACK__MPRD_ACK__SHIFT 0x19 +#define UVD_VCPU_INT_ACK__AVM_INT_ACK__SHIFT 0x1a +#define UVD_VCPU_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b +#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c +#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d +#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK__SHIFT 0x1e +#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK__SHIFT 0x1f +#define UVD_VCPU_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L +#define UVD_VCPU_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L +#define UVD_VCPU_INT_ACK__NJ_PF_RPT_ACK_MASK 0x00000008L +#define UVD_VCPU_INT_ACK__SW_RB1_INT_ACK_MASK 0x00000010L +#define UVD_VCPU_INT_ACK__SW_RB2_INT_ACK_MASK 0x00000020L +#define UVD_VCPU_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L +#define UVD_VCPU_INT_ACK__SW_RB3_INT_ACK_MASK 0x00000080L +#define UVD_VCPU_INT_ACK__SW_RB4_INT_ACK_MASK 0x00000200L +#define UVD_VCPU_INT_ACK__SW_RB5_INT_ACK_MASK 0x00000400L +#define UVD_VCPU_INT_ACK__LBSI_ACK_MASK 0x00000800L +#define UVD_VCPU_INT_ACK__UDEC_ACK_MASK 0x00001000L +#define UVD_VCPU_INT_ACK__RPTR_WR_ACK_MASK 0x00010000L +#define UVD_VCPU_INT_ACK__JOB_START_ACK_MASK 0x00020000L +#define UVD_VCPU_INT_ACK__NJ_PF_ACK_MASK 0x00040000L +#define UVD_VCPU_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L +#define UVD_VCPU_INT_ACK__IDCT_ACK_MASK 0x01000000L +#define UVD_VCPU_INT_ACK__MPRD_ACK_MASK 0x02000000L +#define UVD_VCPU_INT_ACK__AVM_INT_ACK_MASK 0x04000000L +#define UVD_VCPU_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L +#define UVD_VCPU_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L +#define UVD_VCPU_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L +#define UVD_VCPU_INT_ACK__DRV_FW_REQ_ACK_MASK 0x40000000L +#define UVD_VCPU_INT_ACK__DRV_FW_ACK_ACK_MASK 0x80000000L +//UVD_VCPU_INT_ROUTE +#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG__SHIFT 0x0 +#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK__SHIFT 0x1 +#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM__SHIFT 0x2 +#define UVD_VCPU_INT_ROUTE__DRV_FW_MSG_MASK 0x00000001L +#define UVD_VCPU_INT_ROUTE__FW_DRV_MSG_ACK_MASK 0x00000002L +#define UVD_VCPU_INT_ROUTE__VCPU_GPCOM_MASK 0x00000004L +//UVD_ENC_VCPU_INT_EN +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR_EN_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR2_EN_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_EN__DCE_UVD_SCAN_IN_BUFMGR3_EN_MASK 0x00000004L +//UVD_ENC_VCPU_INT_ACK +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK__SHIFT 0x0 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK__SHIFT 0x1 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK__SHIFT 0x2 +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR_ACK_MASK 0x00000001L +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR2_ACK_MASK 0x00000002L +#define UVD_ENC_VCPU_INT_ACK__DCE_UVD_SCAN_IN_BUFMGR3_ACK_MASK 0x00000004L +//UVD_MASTINT_EN +#define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 +#define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1 +#define UVD_MASTINT_EN__SYS_EN__SHIFT 0x2 +#define UVD_MASTINT_EN__INT_OVERRUN__SHIFT 0x4 +#define UVD_MASTINT_EN__OVERRUN_RST_MASK 0x00000001L +#define UVD_MASTINT_EN__VCPU_EN_MASK 0x00000002L +#define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L +#define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L +//UVD_SYS_INT_EN +#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN__SHIFT 0x0 +#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN__SHIFT 0x1 +#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN__SHIFT 0x2 +#define UVD_SYS_INT_EN__CXW_WR_EN__SHIFT 0x3 +#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN__SHIFT 0x6 +#define UVD_SYS_INT_EN__LBSI_EN__SHIFT 0xb +#define UVD_SYS_INT_EN__UDEC_EN__SHIFT 0xc +#define UVD_SYS_INT_EN__JOB_DONE_EN__SHIFT 0x10 +#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN__SHIFT 0x17 +#define UVD_SYS_INT_EN__IDCT_EN__SHIFT 0x18 +#define UVD_SYS_INT_EN__MPRD_EN__SHIFT 0x19 +#define UVD_SYS_INT_EN__CLK_SWT_EN__SHIFT 0x1b +#define UVD_SYS_INT_EN__MIF_HWINT_EN__SHIFT 0x1c +#define UVD_SYS_INT_EN__MPRD_ERR_EN__SHIFT 0x1d +#define UVD_SYS_INT_EN__AVM_INT_EN__SHIFT 0x1f +#define UVD_SYS_INT_EN__PIF_ADDR_ERR_EN_MASK 0x00000001L +#define UVD_SYS_INT_EN__SEMA_WAIT_FAULT_TIMEOUT_EN_MASK 0x00000002L +#define UVD_SYS_INT_EN__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_EN_MASK 0x00000004L +#define UVD_SYS_INT_EN__CXW_WR_EN_MASK 0x00000008L +#define UVD_SYS_INT_EN__RBC_REG_PRIV_FAULT_EN_MASK 0x00000040L +#define UVD_SYS_INT_EN__LBSI_EN_MASK 0x00000800L +#define UVD_SYS_INT_EN__UDEC_EN_MASK 0x00001000L +#define UVD_SYS_INT_EN__JOB_DONE_EN_MASK 0x00010000L +#define UVD_SYS_INT_EN__SEMA_WAIT_FAIL_SIG_EN_MASK 0x00800000L +#define UVD_SYS_INT_EN__IDCT_EN_MASK 0x01000000L +#define UVD_SYS_INT_EN__MPRD_EN_MASK 0x02000000L +#define UVD_SYS_INT_EN__CLK_SWT_EN_MASK 0x08000000L +#define UVD_SYS_INT_EN__MIF_HWINT_EN_MASK 0x10000000L +#define UVD_SYS_INT_EN__MPRD_ERR_EN_MASK 0x20000000L +#define UVD_SYS_INT_EN__AVM_INT_EN_MASK 0x80000000L +//UVD_SYS_INT_STATUS +#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT__SHIFT 0x0 +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT__SHIFT 0x1 +#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT__SHIFT 0x2 +#define UVD_SYS_INT_STATUS__CXW_WR_INT__SHIFT 0x3 +#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT__SHIFT 0x6 +#define UVD_SYS_INT_STATUS__LBSI_INT__SHIFT 0xb +#define UVD_SYS_INT_STATUS__UDEC_INT__SHIFT 0xc +#define UVD_SYS_INT_STATUS__JOB_DONE_INT__SHIFT 0x10 +#define UVD_SYS_INT_STATUS__GPCOM_INT__SHIFT 0x12 +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT__SHIFT 0x17 +#define UVD_SYS_INT_STATUS__IDCT_INT__SHIFT 0x18 +#define UVD_SYS_INT_STATUS__MPRD_INT__SHIFT 0x19 +#define UVD_SYS_INT_STATUS__CLK_SWT_INT__SHIFT 0x1b +#define UVD_SYS_INT_STATUS__MIF_HWINT__SHIFT 0x1c +#define UVD_SYS_INT_STATUS__MPRD_ERR_INT__SHIFT 0x1d +#define UVD_SYS_INT_STATUS__AVM_INT__SHIFT 0x1f +#define UVD_SYS_INT_STATUS__PIF_ADDR_ERR_INT_MASK 0x00000001L +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAULT_TIMEOUT_INT_MASK 0x00000002L +#define UVD_SYS_INT_STATUS__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_INT_MASK 0x00000004L +#define UVD_SYS_INT_STATUS__CXW_WR_INT_MASK 0x00000008L +#define UVD_SYS_INT_STATUS__RBC_REG_PRIV_FAULT_INT_MASK 0x00000040L +#define UVD_SYS_INT_STATUS__LBSI_INT_MASK 0x00000800L +#define UVD_SYS_INT_STATUS__UDEC_INT_MASK 0x00001000L +#define UVD_SYS_INT_STATUS__JOB_DONE_INT_MASK 0x00010000L +#define UVD_SYS_INT_STATUS__GPCOM_INT_MASK 0x00040000L +#define UVD_SYS_INT_STATUS__SEMA_WAIT_FAIL_SIG_INT_MASK 0x00800000L +#define UVD_SYS_INT_STATUS__IDCT_INT_MASK 0x01000000L +#define UVD_SYS_INT_STATUS__MPRD_INT_MASK 0x02000000L +#define UVD_SYS_INT_STATUS__CLK_SWT_INT_MASK 0x08000000L +#define UVD_SYS_INT_STATUS__MIF_HWINT_MASK 0x10000000L +#define UVD_SYS_INT_STATUS__MPRD_ERR_INT_MASK 0x20000000L +#define UVD_SYS_INT_STATUS__AVM_INT_MASK 0x80000000L +//UVD_SYS_INT_ACK +#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK__SHIFT 0x0 +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK__SHIFT 0x1 +#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK__SHIFT 0x2 +#define UVD_SYS_INT_ACK__CXW_WR_ACK__SHIFT 0x3 +#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK__SHIFT 0x6 +#define UVD_SYS_INT_ACK__LBSI_ACK__SHIFT 0xb +#define UVD_SYS_INT_ACK__UDEC_ACK__SHIFT 0xc +#define UVD_SYS_INT_ACK__JOB_DONE_ACK__SHIFT 0x10 +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK__SHIFT 0x17 +#define UVD_SYS_INT_ACK__IDCT_ACK__SHIFT 0x18 +#define UVD_SYS_INT_ACK__MPRD_ACK__SHIFT 0x19 +#define UVD_SYS_INT_ACK__CLK_SWT_ACK__SHIFT 0x1b +#define UVD_SYS_INT_ACK__MIF_HWINT_ACK__SHIFT 0x1c +#define UVD_SYS_INT_ACK__MPRD_ERR_ACK__SHIFT 0x1d +#define UVD_SYS_INT_ACK__AVM_INT_ACK__SHIFT 0x1f +#define UVD_SYS_INT_ACK__PIF_ADDR_ERR_ACK_MASK 0x00000001L +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAULT_TIMEOUT_ACK_MASK 0x00000002L +#define UVD_SYS_INT_ACK__SEMA_SIGNAL_INCOMPLETE_TIMEOUT_ACK_MASK 0x00000004L +#define UVD_SYS_INT_ACK__CXW_WR_ACK_MASK 0x00000008L +#define UVD_SYS_INT_ACK__RBC_REG_PRIV_FAULT_ACK_MASK 0x00000040L +#define UVD_SYS_INT_ACK__LBSI_ACK_MASK 0x00000800L +#define UVD_SYS_INT_ACK__UDEC_ACK_MASK 0x00001000L +#define UVD_SYS_INT_ACK__JOB_DONE_ACK_MASK 0x00010000L +#define UVD_SYS_INT_ACK__SEMA_WAIT_FAIL_SIG_ACK_MASK 0x00800000L +#define UVD_SYS_INT_ACK__IDCT_ACK_MASK 0x01000000L +#define UVD_SYS_INT_ACK__MPRD_ACK_MASK 0x02000000L +#define UVD_SYS_INT_ACK__CLK_SWT_ACK_MASK 0x08000000L +#define UVD_SYS_INT_ACK__MIF_HWINT_ACK_MASK 0x10000000L +#define UVD_SYS_INT_ACK__MPRD_ERR_ACK_MASK 0x20000000L +#define UVD_SYS_INT_ACK__AVM_INT_ACK_MASK 0x80000000L +//UVD_JOB_DONE +#define UVD_JOB_DONE__JOB_DONE__SHIFT 0x0 +#define UVD_JOB_DONE__JOB_DONE_MASK 0x00000003L +//UVD_CBUF_ID +#define UVD_CBUF_ID__CBUF_ID__SHIFT 0x0 +#define UVD_CBUF_ID__CBUF_ID_MASK 0xFFFFFFFFL +//UVD_CONTEXT_ID +#define UVD_CONTEXT_ID__CONTEXT_ID__SHIFT 0x0 +#define UVD_CONTEXT_ID__CONTEXT_ID_MASK 0xFFFFFFFFL +//UVD_CONTEXT_ID2 +#define UVD_CONTEXT_ID2__CONTEXT_ID2__SHIFT 0x0 +#define UVD_CONTEXT_ID2__CONTEXT_ID2_MASK 0xFFFFFFFFL +//UVD_NO_OP +#define UVD_NO_OP__NO_OP__SHIFT 0x0 +#define UVD_NO_OP__NO_OP_MASK 0xFFFFFFFFL +//UVD_RB_BASE_LO +#define UVD_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI +#define UVD_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE +#define UVD_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_RPTR +#define UVD_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR +#define UVD_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_BASE_LO2 +#define UVD_RB_BASE_LO2__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO2__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI2 +#define UVD_RB_BASE_HI2__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI2__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE2 +#define UVD_RB_SIZE2__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE2__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_RPTR2 +#define UVD_RB_RPTR2__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR2__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR2 +#define UVD_RB_WPTR2__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR2__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_BASE_LO3 +#define UVD_RB_BASE_LO3__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO3__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI3 +#define UVD_RB_BASE_HI3__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI3__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE3 +#define UVD_RB_SIZE3__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE3__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_RPTR3 +#define UVD_RB_RPTR3__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR3__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR3 +#define UVD_RB_WPTR3__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR3__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_BASE_LO4 +#define UVD_RB_BASE_LO4__RB_BASE_LO__SHIFT 0x6 +#define UVD_RB_BASE_LO4__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_RB_BASE_HI4 +#define UVD_RB_BASE_HI4__RB_BASE_HI__SHIFT 0x0 +#define UVD_RB_BASE_HI4__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_RB_SIZE4 +#define UVD_RB_SIZE4__RB_SIZE__SHIFT 0x4 +#define UVD_RB_SIZE4__RB_SIZE_MASK 0x007FFFF0L +//UVD_RB_RPTR4 +#define UVD_RB_RPTR4__RB_RPTR__SHIFT 0x4 +#define UVD_RB_RPTR4__RB_RPTR_MASK 0x007FFFF0L +//UVD_RB_WPTR4 +#define UVD_RB_WPTR4__RB_WPTR__SHIFT 0x4 +#define UVD_RB_WPTR4__RB_WPTR_MASK 0x007FFFF0L +//UVD_OUT_RB_BASE_LO +#define UVD_OUT_RB_BASE_LO__RB_BASE_LO__SHIFT 0x6 +#define UVD_OUT_RB_BASE_LO__RB_BASE_LO_MASK 0xFFFFFFC0L +//UVD_OUT_RB_BASE_HI +#define UVD_OUT_RB_BASE_HI__RB_BASE_HI__SHIFT 0x0 +#define UVD_OUT_RB_BASE_HI__RB_BASE_HI_MASK 0xFFFFFFFFL +//UVD_OUT_RB_SIZE +#define UVD_OUT_RB_SIZE__RB_SIZE__SHIFT 0x4 +#define UVD_OUT_RB_SIZE__RB_SIZE_MASK 0x007FFFF0L +//UVD_OUT_RB_RPTR +#define UVD_OUT_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_OUT_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_OUT_RB_WPTR +#define UVD_OUT_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_OUT_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_RB_ARB_CTRL +#define UVD_RB_ARB_CTRL__SRBM_DROP__SHIFT 0x0 +#define UVD_RB_ARB_CTRL__SRBM_DIS__SHIFT 0x1 +#define UVD_RB_ARB_CTRL__VCPU_DROP__SHIFT 0x2 +#define UVD_RB_ARB_CTRL__VCPU_DIS__SHIFT 0x3 +#define UVD_RB_ARB_CTRL__RBC_DROP__SHIFT 0x4 +#define UVD_RB_ARB_CTRL__RBC_DIS__SHIFT 0x5 +#define UVD_RB_ARB_CTRL__FWOFLD_DROP__SHIFT 0x6 +#define UVD_RB_ARB_CTRL__FWOFLD_DIS__SHIFT 0x7 +#define UVD_RB_ARB_CTRL__FAST_PATH_EN__SHIFT 0x8 +#define UVD_RB_ARB_CTRL__SRBM_DROP_MASK 0x00000001L +#define UVD_RB_ARB_CTRL__SRBM_DIS_MASK 0x00000002L +#define UVD_RB_ARB_CTRL__VCPU_DROP_MASK 0x00000004L +#define UVD_RB_ARB_CTRL__VCPU_DIS_MASK 0x00000008L +#define UVD_RB_ARB_CTRL__RBC_DROP_MASK 0x00000010L +#define UVD_RB_ARB_CTRL__RBC_DIS_MASK 0x00000020L +#define UVD_RB_ARB_CTRL__FWOFLD_DROP_MASK 0x00000040L +#define UVD_RB_ARB_CTRL__FWOFLD_DIS_MASK 0x00000080L +#define UVD_RB_ARB_CTRL__FAST_PATH_EN_MASK 0x00000100L +//UVD_CTX_INDEX +#define UVD_CTX_INDEX__INDEX__SHIFT 0x0 +#define UVD_CTX_INDEX__INDEX_MASK 0x000001FFL +//UVD_CTX_DATA +#define UVD_CTX_DATA__DATA__SHIFT 0x0 +#define UVD_CTX_DATA__DATA_MASK 0xFFFFFFFFL +//UVD_CXW_WR +#define UVD_CXW_WR__DAT__SHIFT 0x0 +#define UVD_CXW_WR__STAT__SHIFT 0x1f +#define UVD_CXW_WR__DAT_MASK 0x0FFFFFFFL +#define UVD_CXW_WR__STAT_MASK 0x80000000L +//UVD_CXW_WR_INT_ID +#define UVD_CXW_WR_INT_ID__ID__SHIFT 0x0 +#define UVD_CXW_WR_INT_ID__ID_MASK 0x000000FFL +//UVD_CXW_WR_INT_CTX_ID +#define UVD_CXW_WR_INT_CTX_ID__ID__SHIFT 0x0 +#define UVD_CXW_WR_INT_CTX_ID__ID_MASK 0x0FFFFFFFL +//UVD_CXW_INT_ID +#define UVD_CXW_INT_ID__ID__SHIFT 0x0 +#define UVD_CXW_INT_ID__ID_MASK 0x000000FFL +//UVD_TOP_CTRL +#define UVD_TOP_CTRL__STANDARD__SHIFT 0x0 +#define UVD_TOP_CTRL__STD_VERSION__SHIFT 0x4 +#define UVD_TOP_CTRL__STANDARD_MASK 0x0000000FL +#define UVD_TOP_CTRL__STD_VERSION_MASK 0x000000F0L +//UVD_YBASE +#define UVD_YBASE__DUM__SHIFT 0x0 +#define UVD_YBASE__DUM_MASK 0xFFFFFFFFL +//UVD_UVBASE +#define UVD_UVBASE__DUM__SHIFT 0x0 +#define UVD_UVBASE__DUM_MASK 0xFFFFFFFFL +//UVD_PITCH +#define UVD_PITCH__DUM__SHIFT 0x0 +#define UVD_PITCH__DUM_MASK 0xFFFFFFFFL +//UVD_WIDTH +#define UVD_WIDTH__DUM__SHIFT 0x0 +#define UVD_WIDTH__DUM_MASK 0xFFFFFFFFL +//UVD_HEIGHT +#define UVD_HEIGHT__DUM__SHIFT 0x0 +#define UVD_HEIGHT__DUM_MASK 0xFFFFFFFFL +//UVD_PICCOUNT +#define UVD_PICCOUNT__DUM__SHIFT 0x0 +#define UVD_PICCOUNT__DUM_MASK 0xFFFFFFFFL +//UVD_SCRATCH_NP +#define UVD_SCRATCH_NP__DATA__SHIFT 0x0 +#define UVD_SCRATCH_NP__DATA_MASK 0xFFFFFFFFL +//UVD_VERSION +#define UVD_VERSION__MINOR_VERSION__SHIFT 0x0 +#define UVD_VERSION__MAJOR_VERSION__SHIFT 0x10 +#define UVD_VERSION__MINOR_VERSION_MASK 0x0000FFFFL +#define UVD_VERSION__MAJOR_VERSION_MASK 0x0FFF0000L +//UVD_GP_SCRATCH0 +#define UVD_GP_SCRATCH0__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH0__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH1 +#define UVD_GP_SCRATCH1__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH1__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH2 +#define UVD_GP_SCRATCH2__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH2__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH3 +#define UVD_GP_SCRATCH3__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH3__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH4 +#define UVD_GP_SCRATCH4__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH4__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH5 +#define UVD_GP_SCRATCH5__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH5__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH6 +#define UVD_GP_SCRATCH6__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH6__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH7 +#define UVD_GP_SCRATCH7__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH7__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH8 +#define UVD_GP_SCRATCH8__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH8__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH9 +#define UVD_GP_SCRATCH9__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH9__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH10 +#define UVD_GP_SCRATCH10__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH10__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH11 +#define UVD_GP_SCRATCH11__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH11__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH12 +#define UVD_GP_SCRATCH12__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH12__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH13 +#define UVD_GP_SCRATCH13__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH13__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH14 +#define UVD_GP_SCRATCH14__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH14__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH15 +#define UVD_GP_SCRATCH15__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH15__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH16 +#define UVD_GP_SCRATCH16__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH16__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH17 +#define UVD_GP_SCRATCH17__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH17__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH18 +#define UVD_GP_SCRATCH18__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH18__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH19 +#define UVD_GP_SCRATCH19__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH19__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH20 +#define UVD_GP_SCRATCH20__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH20__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH21 +#define UVD_GP_SCRATCH21__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH21__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH22 +#define UVD_GP_SCRATCH22__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH22__DATA_MASK 0xFFFFFFFFL +//UVD_GP_SCRATCH23 +#define UVD_GP_SCRATCH23__DATA__SHIFT 0x0 +#define UVD_GP_SCRATCH23__DATA_MASK 0xFFFFFFFFL + + +// addressBlock: uvd0_ecpudec +//UVD_VCPU_CACHE_OFFSET0 +#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET0__CACHE_OFFSET0_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE0 +#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE0__CACHE_SIZE0_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET1 +#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET1__CACHE_OFFSET1_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE1 +#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE1__CACHE_SIZE1_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET2 +#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET2__CACHE_OFFSET2_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE2 +#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET3 +#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET3__CACHE_OFFSET3_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE3 +#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE3__CACHE_SIZE3_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET4 +#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET4__CACHE_OFFSET4_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE4 +#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE4__CACHE_SIZE4_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET5 +#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET5__CACHE_OFFSET5_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE5 +#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE5__CACHE_SIZE5_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET6 +#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET6__CACHE_OFFSET6_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE6 +#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE6__CACHE_SIZE6_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET7 +#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET7__CACHE_OFFSET7_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE7 +#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE7__CACHE_SIZE7_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_OFFSET8 +#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8__SHIFT 0x0 +#define UVD_VCPU_CACHE_OFFSET8__CACHE_OFFSET8_MASK 0x001FFFFFL +//UVD_VCPU_CACHE_SIZE8 +#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8__SHIFT 0x0 +#define UVD_VCPU_CACHE_SIZE8__CACHE_SIZE8_MASK 0x001FFFFFL +//UVD_VCPU_NONCACHE_OFFSET0 +#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_OFFSET0__NONCACHE_OFFSET0_MASK 0x01FFFFFFL +//UVD_VCPU_NONCACHE_SIZE0 +#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_SIZE0__NONCACHE_SIZE0_MASK 0x001FFFFFL +//UVD_VCPU_NONCACHE_OFFSET1 +#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_OFFSET1__NONCACHE_OFFSET1_MASK 0x01FFFFFFL +//UVD_VCPU_NONCACHE_SIZE1 +#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1__SHIFT 0x0 +#define UVD_VCPU_NONCACHE_SIZE1__NONCACHE_SIZE1_MASK 0x001FFFFFL +//UVD_VCPU_CNTL +#define UVD_VCPU_CNTL__IRQ_ERR__SHIFT 0x0 +#define UVD_VCPU_CNTL__PMB_ED_ENABLE__SHIFT 0x5 +#define UVD_VCPU_CNTL__PMB_SOFT_RESET__SHIFT 0x6 +#define UVD_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x7 +#define UVD_VCPU_CNTL__ABORT_REQ__SHIFT 0x8 +#define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9 +#define UVD_VCPU_CNTL__TRCE_EN__SHIFT 0xa +#define UVD_VCPU_CNTL__TRCE_MUX__SHIFT 0xb +#define UVD_VCPU_CNTL__JTAG_EN__SHIFT 0x10 +#define UVD_VCPU_CNTL__TIMEOUT_DIS__SHIFT 0x12 +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14 +#define UVD_VCPU_CNTL__BLK_RST__SHIFT 0x1c +#define UVD_VCPU_CNTL__IRQ_ERR_MASK 0x0000000FL +#define UVD_VCPU_CNTL__PMB_ED_ENABLE_MASK 0x00000020L +#define UVD_VCPU_CNTL__PMB_SOFT_RESET_MASK 0x00000040L +#define UVD_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00000080L +#define UVD_VCPU_CNTL__ABORT_REQ_MASK 0x00000100L +#define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L +#define UVD_VCPU_CNTL__TRCE_EN_MASK 0x00000400L +#define UVD_VCPU_CNTL__TRCE_MUX_MASK 0x00001800L +#define UVD_VCPU_CNTL__JTAG_EN_MASK 0x00010000L +#define UVD_VCPU_CNTL__TIMEOUT_DIS_MASK 0x00040000L +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L +#define UVD_VCPU_CNTL__BLK_RST_MASK 0x10000000L +//UVD_VCPU_PRID +#define UVD_VCPU_PRID__PRID__SHIFT 0x0 +#define UVD_VCPU_PRID__PRID_MASK 0x0000FFFFL +//UVD_VCPU_TRCE +#define UVD_VCPU_TRCE__PC__SHIFT 0x0 +#define UVD_VCPU_TRCE__PC_MASK 0x0FFFFFFFL +//UVD_VCPU_TRCE_RD +#define UVD_VCPU_TRCE_RD__DATA__SHIFT 0x0 +#define UVD_VCPU_TRCE_RD__DATA_MASK 0xFFFFFFFFL + + +// addressBlock: uvd0_uvd_mpcdec +//UVD_MP_SWAP_CNTL +#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP__SHIFT 0x0 +#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP__SHIFT 0x2 +#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP__SHIFT 0x4 +#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP__SHIFT 0x6 +#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP__SHIFT 0x8 +#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP__SHIFT 0xa +#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP__SHIFT 0xc +#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP__SHIFT 0xe +#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP__SHIFT 0x10 +#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP__SHIFT 0x12 +#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP__SHIFT 0x14 +#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP__SHIFT 0x16 +#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP__SHIFT 0x18 +#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP__SHIFT 0x1a +#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP__SHIFT 0x1c +#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP__SHIFT 0x1e +#define UVD_MP_SWAP_CNTL__MP_REF0_MC_SWAP_MASK 0x00000003L +#define UVD_MP_SWAP_CNTL__MP_REF1_MC_SWAP_MASK 0x0000000CL +#define UVD_MP_SWAP_CNTL__MP_REF2_MC_SWAP_MASK 0x00000030L +#define UVD_MP_SWAP_CNTL__MP_REF3_MC_SWAP_MASK 0x000000C0L +#define UVD_MP_SWAP_CNTL__MP_REF4_MC_SWAP_MASK 0x00000300L +#define UVD_MP_SWAP_CNTL__MP_REF5_MC_SWAP_MASK 0x00000C00L +#define UVD_MP_SWAP_CNTL__MP_REF6_MC_SWAP_MASK 0x00003000L +#define UVD_MP_SWAP_CNTL__MP_REF7_MC_SWAP_MASK 0x0000C000L +#define UVD_MP_SWAP_CNTL__MP_REF8_MC_SWAP_MASK 0x00030000L +#define UVD_MP_SWAP_CNTL__MP_REF9_MC_SWAP_MASK 0x000C0000L +#define UVD_MP_SWAP_CNTL__MP_REF10_MC_SWAP_MASK 0x00300000L +#define UVD_MP_SWAP_CNTL__MP_REF11_MC_SWAP_MASK 0x00C00000L +#define UVD_MP_SWAP_CNTL__MP_REF12_MC_SWAP_MASK 0x03000000L +#define UVD_MP_SWAP_CNTL__MP_REF13_MC_SWAP_MASK 0x0C000000L +#define UVD_MP_SWAP_CNTL__MP_REF14_MC_SWAP_MASK 0x30000000L +#define UVD_MP_SWAP_CNTL__MP_REF15_MC_SWAP_MASK 0xC0000000L +//UVD_MPC_LUMA_SRCH +#define UVD_MPC_LUMA_SRCH__CNTR__SHIFT 0x0 +#define UVD_MPC_LUMA_SRCH__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_LUMA_HIT +#define UVD_MPC_LUMA_HIT__CNTR__SHIFT 0x0 +#define UVD_MPC_LUMA_HIT__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_LUMA_HITPEND +#define UVD_MPC_LUMA_HITPEND__CNTR__SHIFT 0x0 +#define UVD_MPC_LUMA_HITPEND__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CHROMA_SRCH +#define UVD_MPC_CHROMA_SRCH__CNTR__SHIFT 0x0 +#define UVD_MPC_CHROMA_SRCH__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CHROMA_HIT +#define UVD_MPC_CHROMA_HIT__CNTR__SHIFT 0x0 +#define UVD_MPC_CHROMA_HIT__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CHROMA_HITPEND +#define UVD_MPC_CHROMA_HITPEND__CNTR__SHIFT 0x0 +#define UVD_MPC_CHROMA_HITPEND__CNTR_MASK 0xFFFFFFFFL +//UVD_MPC_CNTL +#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x3 +#define UVD_MPC_CNTL__PERF_RST__SHIFT 0x6 +#define UVD_MPC_CNTL__AVE_WEIGHT__SHIFT 0x10 +#define UVD_MPC_CNTL__URGENT_EN__SHIFT 0x12 +#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP__SHIFT 0x13 +#define UVD_MPC_CNTL__TEST_MODE_EN__SHIFT 0x14 +#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L +#define UVD_MPC_CNTL__PERF_RST_MASK 0x00000040L +#define UVD_MPC_CNTL__AVE_WEIGHT_MASK 0x00030000L +#define UVD_MPC_CNTL__URGENT_EN_MASK 0x00040000L +#define UVD_MPC_CNTL__SMPAT_REQ_SPEED_UP_MASK 0x00080000L +#define UVD_MPC_CNTL__TEST_MODE_EN_MASK 0x00100000L +//UVD_MPC_PITCH +#define UVD_MPC_PITCH__LUMA_PITCH__SHIFT 0x0 +#define UVD_MPC_PITCH__LUMA_PITCH_MASK 0x000007FFL +//UVD_MPC_SET_MUXA0 +#define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x0 +#define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x6 +#define UVD_MPC_SET_MUXA0__VARA_2__SHIFT 0xc +#define UVD_MPC_SET_MUXA0__VARA_3__SHIFT 0x12 +#define UVD_MPC_SET_MUXA0__VARA_4__SHIFT 0x18 +#define UVD_MPC_SET_MUXA0__VARA_0_MASK 0x0000003FL +#define UVD_MPC_SET_MUXA0__VARA_1_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXA0__VARA_2_MASK 0x0003F000L +#define UVD_MPC_SET_MUXA0__VARA_3_MASK 0x00FC0000L +#define UVD_MPC_SET_MUXA0__VARA_4_MASK 0x3F000000L +//UVD_MPC_SET_MUXA1 +#define UVD_MPC_SET_MUXA1__VARA_5__SHIFT 0x0 +#define UVD_MPC_SET_MUXA1__VARA_6__SHIFT 0x6 +#define UVD_MPC_SET_MUXA1__VARA_7__SHIFT 0xc +#define UVD_MPC_SET_MUXA1__VARA_5_MASK 0x0000003FL +#define UVD_MPC_SET_MUXA1__VARA_6_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXA1__VARA_7_MASK 0x0003F000L +//UVD_MPC_SET_MUXB0 +#define UVD_MPC_SET_MUXB0__VARB_0__SHIFT 0x0 +#define UVD_MPC_SET_MUXB0__VARB_1__SHIFT 0x6 +#define UVD_MPC_SET_MUXB0__VARB_2__SHIFT 0xc +#define UVD_MPC_SET_MUXB0__VARB_3__SHIFT 0x12 +#define UVD_MPC_SET_MUXB0__VARB_4__SHIFT 0x18 +#define UVD_MPC_SET_MUXB0__VARB_0_MASK 0x0000003FL +#define UVD_MPC_SET_MUXB0__VARB_1_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXB0__VARB_2_MASK 0x0003F000L +#define UVD_MPC_SET_MUXB0__VARB_3_MASK 0x00FC0000L +#define UVD_MPC_SET_MUXB0__VARB_4_MASK 0x3F000000L +//UVD_MPC_SET_MUXB1 +#define UVD_MPC_SET_MUXB1__VARB_5__SHIFT 0x0 +#define UVD_MPC_SET_MUXB1__VARB_6__SHIFT 0x6 +#define UVD_MPC_SET_MUXB1__VARB_7__SHIFT 0xc +#define UVD_MPC_SET_MUXB1__VARB_5_MASK 0x0000003FL +#define UVD_MPC_SET_MUXB1__VARB_6_MASK 0x00000FC0L +#define UVD_MPC_SET_MUXB1__VARB_7_MASK 0x0003F000L +//UVD_MPC_SET_MUX +#define UVD_MPC_SET_MUX__SET_0__SHIFT 0x0 +#define UVD_MPC_SET_MUX__SET_1__SHIFT 0x3 +#define UVD_MPC_SET_MUX__SET_2__SHIFT 0x6 +#define UVD_MPC_SET_MUX__SET_0_MASK 0x00000007L +#define UVD_MPC_SET_MUX__SET_1_MASK 0x00000038L +#define UVD_MPC_SET_MUX__SET_2_MASK 0x000001C0L +//UVD_MPC_SET_ALU +#define UVD_MPC_SET_ALU__FUNCT__SHIFT 0x0 +#define UVD_MPC_SET_ALU__OPERAND__SHIFT 0x4 +#define UVD_MPC_SET_ALU__FUNCT_MASK 0x00000007L +#define UVD_MPC_SET_ALU__OPERAND_MASK 0x00000FF0L +//UVD_MPC_PERF0 +#define UVD_MPC_PERF0__MAX_LAT__SHIFT 0x0 +#define UVD_MPC_PERF0__MAX_LAT_MASK 0x000003FFL +//UVD_MPC_PERF1 +#define UVD_MPC_PERF1__AVE_LAT__SHIFT 0x0 +#define UVD_MPC_PERF1__AVE_LAT_MASK 0x000003FFL + + +// addressBlock: uvd0_uvd_rbcdec +//UVD_RBC_IB_SIZE +#define UVD_RBC_IB_SIZE__IB_SIZE__SHIFT 0x4 +#define UVD_RBC_IB_SIZE__IB_SIZE_MASK 0x007FFFF0L +//UVD_RBC_IB_SIZE_UPDATE +#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE__SHIFT 0x4 +#define UVD_RBC_IB_SIZE_UPDATE__REMAIN_IB_SIZE_MASK 0x007FFFF0L +//UVD_RBC_RB_CNTL +#define UVD_RBC_RB_CNTL__RB_BUFSZ__SHIFT 0x0 +#define UVD_RBC_RB_CNTL__RB_BLKSZ__SHIFT 0x8 +#define UVD_RBC_RB_CNTL__RB_NO_FETCH__SHIFT 0x10 +#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN__SHIFT 0x14 +#define UVD_RBC_RB_CNTL__RB_NO_UPDATE__SHIFT 0x18 +#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN__SHIFT 0x1c +#define UVD_RBC_RB_CNTL__RB_BUFSZ_MASK 0x0000001FL +#define UVD_RBC_RB_CNTL__RB_BLKSZ_MASK 0x00001F00L +#define UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK 0x00010000L +#define UVD_RBC_RB_CNTL__RB_WPTR_POLL_EN_MASK 0x00100000L +#define UVD_RBC_RB_CNTL__RB_NO_UPDATE_MASK 0x01000000L +#define UVD_RBC_RB_CNTL__RB_RPTR_WR_EN_MASK 0x10000000L +//UVD_RBC_RB_RPTR_ADDR +#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR__SHIFT 0x0 +#define UVD_RBC_RB_RPTR_ADDR__RB_RPTR_ADDR_MASK 0xFFFFFFFFL +//UVD_RBC_RB_RPTR +#define UVD_RBC_RB_RPTR__RB_RPTR__SHIFT 0x4 +#define UVD_RBC_RB_RPTR__RB_RPTR_MASK 0x007FFFF0L +//UVD_RBC_RB_WPTR +#define UVD_RBC_RB_WPTR__RB_WPTR__SHIFT 0x4 +#define UVD_RBC_RB_WPTR__RB_WPTR_MASK 0x007FFFF0L +//UVD_RBC_VCPU_ACCESS +#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC__SHIFT 0x0 +#define UVD_RBC_VCPU_ACCESS__ENABLE_RBC_MASK 0x00000001L +//UVD_RBC_READ_REQ_URGENT_CNTL +#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK__SHIFT 0x0 +#define UVD_RBC_READ_REQ_URGENT_CNTL__CMD_READ_REQ_PRIORITY_MARK_MASK 0x00000003L +//UVD_RBC_RB_WPTR_CNTL +#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER__SHIFT 0x0 +#define UVD_RBC_RB_WPTR_CNTL__RB_PRE_WRITE_TIMER_MASK 0x00007FFFL +//UVD_RBC_WPTR_STATUS +#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE__SHIFT 0x4 +#define UVD_RBC_WPTR_STATUS__RB_WPTR_IN_USE_MASK 0x007FFFF0L +//UVD_RBC_WPTR_POLL_CNTL +#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ__SHIFT 0x0 +#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT 0x10 +#define UVD_RBC_WPTR_POLL_CNTL__POLL_FREQ_MASK 0x0000FFFFL +#define UVD_RBC_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK 0xFFFF0000L +//UVD_RBC_WPTR_POLL_ADDR +#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR__SHIFT 0x2 +#define UVD_RBC_WPTR_POLL_ADDR__POLL_ADDR_MASK 0xFFFFFFFCL +//UVD_SEMA_CMD +#define UVD_SEMA_CMD__REQ_CMD__SHIFT 0x0 +#define UVD_SEMA_CMD__WR_PHASE__SHIFT 0x4 +#define UVD_SEMA_CMD__MODE__SHIFT 0x6 +#define UVD_SEMA_CMD__VMID_EN__SHIFT 0x7 +#define UVD_SEMA_CMD__VMID__SHIFT 0x8 +#define UVD_SEMA_CMD__REQ_CMD_MASK 0x0000000FL +#define UVD_SEMA_CMD__WR_PHASE_MASK 0x00000030L +#define UVD_SEMA_CMD__MODE_MASK 0x00000040L +#define UVD_SEMA_CMD__VMID_EN_MASK 0x00000080L +#define UVD_SEMA_CMD__VMID_MASK 0x00000F00L +//UVD_SEMA_ADDR_LOW +#define UVD_SEMA_ADDR_LOW__ADDR_26_3__SHIFT 0x0 +#define UVD_SEMA_ADDR_LOW__ADDR_26_3_MASK 0x00FFFFFFL +//UVD_SEMA_ADDR_HIGH +#define UVD_SEMA_ADDR_HIGH__ADDR_47_27__SHIFT 0x0 +#define UVD_SEMA_ADDR_HIGH__ADDR_47_27_MASK 0x001FFFFFL +//UVD_ENGINE_CNTL +#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x0 +#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x1 +#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE__SHIFT 0x2 +#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x00000001L +#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x00000002L +#define UVD_ENGINE_CNTL__NJ_PF_HANDLE_DISABLE_MASK 0x00000004L +//UVD_SEMA_TIMEOUT_STATUS +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x0 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT__SHIFT 0x1 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT__SHIFT 0x2 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR__SHIFT 0x3 +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000001L +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_WAIT_FAULT_TIMEOUT_STAT_MASK 0x00000002L +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_SIGNAL_INCOMPLETE_TIMEOUT_STAT_MASK 0x00000004L +#define UVD_SEMA_TIMEOUT_STATUS__SEMAPHORE_TIMEOUT_CLEAR_MASK 0x00000008L +//UVD_SEMA_CNTL +#define UVD_SEMA_CNTL__SEMAPHORE_EN__SHIFT 0x0 +#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS__SHIFT 0x1 +#define UVD_SEMA_CNTL__SEMAPHORE_EN_MASK 0x00000001L +#define UVD_SEMA_CNTL__ADVANCED_MODE_DIS_MASK 0x00000002L +//UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN__SHIFT 0x0 +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT__SHIFT 0x1 +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18 +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_EN_MASK 0x00000001L +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__SIGNAL_INCOMPLETE_COUNT_MASK 0x001FFFFEL +#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L +//UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN__SHIFT 0x0 +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT__SHIFT 0x1 +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18 +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_EN_MASK 0x00000001L +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__WAIT_FAULT_COUNT_MASK 0x001FFFFEL +#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L +//UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN__SHIFT 0x0 +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT__SHIFT 0x1 +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER__SHIFT 0x18 +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_EN_MASK 0x00000001L +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__WAIT_INCOMPLETE_COUNT_MASK 0x001FFFFEL +#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL__RESEND_TIMER_MASK 0x07000000L +//UVD_JOB_START +#define UVD_JOB_START__JOB_START__SHIFT 0x0 +#define UVD_JOB_START__JOB_START_MASK 0x00000001L +//UVD_RBC_BUF_STATUS +#define UVD_RBC_BUF_STATUS__RB_BUF_VALID__SHIFT 0x0 +#define UVD_RBC_BUF_STATUS__IB_BUF_VALID__SHIFT 0x8 +#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR__SHIFT 0x10 +#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR__SHIFT 0x13 +#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR__SHIFT 0x16 +#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR__SHIFT 0x19 +#define UVD_RBC_BUF_STATUS__RB_BUF_VALID_MASK 0x000000FFL +#define UVD_RBC_BUF_STATUS__IB_BUF_VALID_MASK 0x0000FF00L +#define UVD_RBC_BUF_STATUS__RB_BUF_RD_ADDR_MASK 0x00070000L +#define UVD_RBC_BUF_STATUS__IB_BUF_RD_ADDR_MASK 0x00380000L +#define UVD_RBC_BUF_STATUS__RB_BUF_WR_ADDR_MASK 0x01C00000L +#define UVD_RBC_BUF_STATUS__IB_BUF_WR_ADDR_MASK 0x0E000000L + + +// addressBlock: uvd0_uvdgendec +//UVD_LCM_CGC_CNTRL +#define UVD_LCM_CGC_CNTRL__FORCE_OFF__SHIFT 0x12 +#define UVD_LCM_CGC_CNTRL__FORCE_ON__SHIFT 0x13 +#define UVD_LCM_CGC_CNTRL__OFF_DELAY__SHIFT 0x14 +#define UVD_LCM_CGC_CNTRL__ON_DELAY__SHIFT 0x1c +#define UVD_LCM_CGC_CNTRL__FORCE_OFF_MASK 0x00040000L +#define UVD_LCM_CGC_CNTRL__FORCE_ON_MASK 0x00080000L +#define UVD_LCM_CGC_CNTRL__OFF_DELAY_MASK 0x0FF00000L +#define UVD_LCM_CGC_CNTRL__ON_DELAY_MASK 0xF0000000L + + +// addressBlock: uvd0_lmi_adpdec +//UVD_LMI_RBC_RB_64BIT_BAR_LOW +#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RBC_RB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_RB_64BIT_BAR_HIGH +#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RBC_RB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_IB_64BIT_BAR_LOW +#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_RBC_IB_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_RBC_IB_64BIT_BAR_HIGH +#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_RBC_IB_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_LBSI_64BIT_BAR_LOW +#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_LBSI_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_LBSI_64BIT_BAR_HIGH +#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_LBSI_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC0_64BIT_BAR_LOW +#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC1_64BIT_BAR_LOW +#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE8_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC0_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC1_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC2_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC3_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC4_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC5_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC6_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_LOW__BITS_31_0_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC7_64BIT_BAR_HIGH__BITS_63_32_MASK 0xFFFFFFFFL +//UVD_LMI_MMSCH_NC_VMID +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID__SHIFT 0x0 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID__SHIFT 0x4 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID__SHIFT 0x8 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID__SHIFT 0xc +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID__SHIFT 0x10 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID__SHIFT 0x14 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID__SHIFT 0x18 +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID__SHIFT 0x1c +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC0_VMID_MASK 0x0000000FL +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC1_VMID_MASK 0x000000F0L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC2_VMID_MASK 0x00000F00L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC3_VMID_MASK 0x0000F000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC4_VMID_MASK 0x000F0000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC5_VMID_MASK 0x00F00000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC6_VMID_MASK 0x0F000000L +#define UVD_LMI_MMSCH_NC_VMID__MMSCH_NC7_VMID_MASK 0xF0000000L +//UVD_LMI_MMSCH_CTRL +#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN__SHIFT 0x0 +#define UVD_LMI_MMSCH_CTRL__MMSCH_VM__SHIFT 0x1 +#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP__SHIFT 0x3 +#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP__SHIFT 0x5 +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD__SHIFT 0x7 +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR__SHIFT 0x9 +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP__SHIFT 0xb +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP__SHIFT 0xc +#define UVD_LMI_MMSCH_CTRL__MMSCH_DATA_COHERENCY_EN_MASK 0x00000001L +#define UVD_LMI_MMSCH_CTRL__MMSCH_VM_MASK 0x00000002L +#define UVD_LMI_MMSCH_CTRL__MMSCH_R_MC_SWAP_MASK 0x00000018L +#define UVD_LMI_MMSCH_CTRL__MMSCH_W_MC_SWAP_MASK 0x00000060L +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_MASK 0x00000180L +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_MASK 0x00000600L +#define UVD_LMI_MMSCH_CTRL__MMSCH_RD_DROP_MASK 0x00000800L +#define UVD_LMI_MMSCH_CTRL__MMSCH_WR_DROP_MASK 0x00001000L +//UVD_LMI_ARB_CTRL2 +#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN__SHIFT 0x0 +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN__SHIFT 0x1 +#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST__SHIFT 0x2 +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST__SHIFT 0x6 +#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX__SHIFT 0xa +#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX__SHIFT 0x14 +#define UVD_LMI_ARB_CTRL2__CENC_RD_WAIT_EN_MASK 0x00000001L +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_WAIT_EN_MASK 0x00000002L +#define UVD_LMI_ARB_CTRL2__CENC_RD_MAX_BURST_MASK 0x0000003CL +#define UVD_LMI_ARB_CTRL2__ATOMIC_WR_MAX_BURST_MASK 0x000003C0L +#define UVD_LMI_ARB_CTRL2__MIF_RD_REQ_RET_MAX_MASK 0x000FFC00L +#define UVD_LMI_ARB_CTRL2__MIF_WR_REQ_RET_MAX_MASK 0xFFF00000L +//UVD_LMI_VCPU_CACHE_VMIDS_MULTI +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID__SHIFT 0x4 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID__SHIFT 0x8 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID__SHIFT 0xc +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID__SHIFT 0x10 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID__SHIFT 0x14 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID__SHIFT 0x18 +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID__SHIFT 0x1c +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE1_VMID_MASK 0x0000000FL +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE2_VMID_MASK 0x000000F0L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE3_VMID_MASK 0x00000F00L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE4_VMID_MASK 0x0000F000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE5_VMID_MASK 0x000F0000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE6_VMID_MASK 0x00F00000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE7_VMID_MASK 0x0F000000L +#define UVD_LMI_VCPU_CACHE_VMIDS_MULTI__VCPU_CACHE8_VMID_MASK 0xF0000000L +//UVD_LMI_VCPU_NC_VMIDS_MULTI +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID__SHIFT 0x4 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID__SHIFT 0x8 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID__SHIFT 0xc +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID__SHIFT 0x10 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID__SHIFT 0x14 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID__SHIFT 0x18 +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC2_VMID_MASK 0x000000F0L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC3_VMID_MASK 0x00000F00L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC4_VMID_MASK 0x0000F000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC5_VMID_MASK 0x000F0000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC6_VMID_MASK 0x00F00000L +#define UVD_LMI_VCPU_NC_VMIDS_MULTI__VCPU_NC7_VMID_MASK 0x0F000000L +//UVD_LMI_LAT_CTRL +#define UVD_LMI_LAT_CTRL__SCALE__SHIFT 0x0 +#define UVD_LMI_LAT_CTRL__MAX_START__SHIFT 0x8 +#define UVD_LMI_LAT_CTRL__MIN_START__SHIFT 0x9 +#define UVD_LMI_LAT_CTRL__AVG_START__SHIFT 0xa +#define UVD_LMI_LAT_CTRL__PERFMON_SYNC__SHIFT 0xb +#define UVD_LMI_LAT_CTRL__SKIP__SHIFT 0x10 +#define UVD_LMI_LAT_CTRL__SCALE_MASK 0x000000FFL +#define UVD_LMI_LAT_CTRL__MAX_START_MASK 0x00000100L +#define UVD_LMI_LAT_CTRL__MIN_START_MASK 0x00000200L +#define UVD_LMI_LAT_CTRL__AVG_START_MASK 0x00000400L +#define UVD_LMI_LAT_CTRL__PERFMON_SYNC_MASK 0x00000800L +#define UVD_LMI_LAT_CTRL__SKIP_MASK 0x000F0000L +//UVD_LMI_LAT_CNTR +#define UVD_LMI_LAT_CNTR__MAX_LAT__SHIFT 0x0 +#define UVD_LMI_LAT_CNTR__MIN_LAT__SHIFT 0x8 +#define UVD_LMI_LAT_CNTR__MAX_LAT_MASK 0x000000FFL +#define UVD_LMI_LAT_CNTR__MIN_LAT_MASK 0x0000FF00L +//UVD_LMI_AVG_LAT_CNTR +#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW__SHIFT 0x0 +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH__SHIFT 0x8 +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT__SHIFT 0x10 +#define UVD_LMI_AVG_LAT_CNTR__ENV_LOW_MASK 0x000000FFL +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIGH_MASK 0x0000FF00L +#define UVD_LMI_AVG_LAT_CNTR__ENV_HIT_MASK 0xFFFF0000L +//UVD_LMI_SPH +#define UVD_LMI_SPH__ADDR__SHIFT 0x0 +#define UVD_LMI_SPH__STS__SHIFT 0x1c +#define UVD_LMI_SPH__STS_VALID__SHIFT 0x1e +#define UVD_LMI_SPH__STS_OVERFLOW__SHIFT 0x1f +#define UVD_LMI_SPH__ADDR_MASK 0x0FFFFFFFL +#define UVD_LMI_SPH__STS_MASK 0x30000000L +#define UVD_LMI_SPH__STS_VALID_MASK 0x40000000L +#define UVD_LMI_SPH__STS_OVERFLOW_MASK 0x80000000L +//UVD_LMI_VCPU_CACHE_VMID +#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID__SHIFT 0x0 +#define UVD_LMI_VCPU_CACHE_VMID__VCPU_CACHE_VMID_MASK 0x0000000FL +//UVD_LMI_CTRL2 +#define UVD_LMI_CTRL2__SPH_DIS__SHIFT 0x0 +#define UVD_LMI_CTRL2__STALL_ARB__SHIFT 0x1 +#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT__SHIFT 0x2 +#define UVD_LMI_CTRL2__MASK_UMC_URGENT__SHIFT 0x3 +#define UVD_LMI_CTRL2__CRC1_RESET__SHIFT 0x4 +#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS__SHIFT 0x7 +#define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8 +#define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9 +#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb +#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN__SHIFT 0xd +#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN__SHIFT 0xe +#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN__SHIFT 0xf +#define UVD_LMI_CTRL2__RE_OFFLOAD_EN__SHIFT 0x10 +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11 +#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP__SHIFT 0x19 +#define UVD_LMI_CTRL2__NJ_MIF_GATING__SHIFT 0x1a +#define UVD_LMI_CTRL2__CRC1_SEL__SHIFT 0x1b +#define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L +#define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L +#define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L +#define UVD_LMI_CTRL2__MASK_UMC_URGENT_MASK 0x00000008L +#define UVD_LMI_CTRL2__CRC1_RESET_MASK 0x00000010L +#define UVD_LMI_CTRL2__DRCITF_BUBBLE_FIX_DIS_MASK 0x00000080L +#define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L +#define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L +#define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L +#define UVD_LMI_CTRL2__VCPU_NC0_EXT_EN_MASK 0x00002000L +#define UVD_LMI_CTRL2__VCPU_NC1_EXT_EN_MASK 0x00004000L +#define UVD_LMI_CTRL2__SPU_EXTRA_CID_EN_MASK 0x00008000L +#define UVD_LMI_CTRL2__RE_OFFLOAD_EN_MASK 0x00010000L +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L +#define UVD_LMI_CTRL2__CLEAR_NJ_PF_BP_MASK 0x02000000L +#define UVD_LMI_CTRL2__NJ_MIF_GATING_MASK 0x04000000L +#define UVD_LMI_CTRL2__CRC1_SEL_MASK 0xF8000000L +//UVD_LMI_URGENT_CTRL +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL__SHIFT 0x0 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL__SHIFT 0x1 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT__SHIFT 0x2 +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL__SHIFT 0x8 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL__SHIFT 0x9 +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT__SHIFT 0xa +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL__SHIFT 0x10 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL__SHIFT 0x11 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT__SHIFT 0x12 +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL__SHIFT 0x18 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL__SHIFT 0x19 +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT__SHIFT 0x1a +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_RD_URGENT_STALL_MASK 0x00000001L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_STALL_MASK 0x00000002L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_RD_URGENT_MASK 0x0000003CL +#define UVD_LMI_URGENT_CTRL__ENABLE_MC_WR_URGENT_STALL_MASK 0x00000100L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_STALL_MASK 0x00000200L +#define UVD_LMI_URGENT_CTRL__ASSERT_MC_WR_URGENT_MASK 0x00003C00L +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_RD_URGENT_STALL_MASK 0x00010000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_STALL_MASK 0x00020000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_RD_URGENT_MASK 0x003C0000L +#define UVD_LMI_URGENT_CTRL__ENABLE_UMC_WR_URGENT_STALL_MASK 0x01000000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_STALL_MASK 0x02000000L +#define UVD_LMI_URGENT_CTRL__ASSERT_UMC_WR_URGENT_MASK 0x3C000000L +//UVD_LMI_CTRL +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT 0x0 +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN__SHIFT 0x8 +#define UVD_LMI_CTRL__REQ_MODE__SHIFT 0x9 +#define UVD_LMI_CTRL__ASSERT_MC_URGENT__SHIFT 0xb +#define UVD_LMI_CTRL__MASK_MC_URGENT__SHIFT 0xc +#define UVD_LMI_CTRL__DATA_COHERENCY_EN__SHIFT 0xd +#define UVD_LMI_CTRL__CRC_RESET__SHIFT 0xe +#define UVD_LMI_CTRL__CRC_SEL__SHIFT 0xf +#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN__SHIFT 0x15 +#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN__SHIFT 0x16 +#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN__SHIFT 0x17 +#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN__SHIFT 0x18 +#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN__SHIFT 0x19 +#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN__SHIFT 0x1a +#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ__SHIFT 0x1b +#define UVD_LMI_CTRL__RFU__SHIFT 0x1e +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_MASK 0x000000FFL +#define UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK 0x00000100L +#define UVD_LMI_CTRL__REQ_MODE_MASK 0x00000200L +#define UVD_LMI_CTRL__ASSERT_MC_URGENT_MASK 0x00000800L +#define UVD_LMI_CTRL__MASK_MC_URGENT_MASK 0x00001000L +#define UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK 0x00002000L +#define UVD_LMI_CTRL__CRC_RESET_MASK 0x00004000L +#define UVD_LMI_CTRL__CRC_SEL_MASK 0x000F8000L +#define UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK 0x00200000L +#define UVD_LMI_CTRL__CM_DATA_COHERENCY_EN_MASK 0x00400000L +#define UVD_LMI_CTRL__DB_DB_DATA_COHERENCY_EN_MASK 0x00800000L +#define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L +#define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L +#define UVD_LMI_CTRL__MIF_MIF_DATA_COHERENCY_EN_MASK 0x04000000L +#define UVD_LMI_CTRL__MIF_LESS_OUTSTANDING_RD_REQ_MASK 0x08000000L +#define UVD_LMI_CTRL__RFU_MASK 0xC0000000L +//UVD_LMI_STATUS +#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0 +#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1 +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2 +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3 +#define UVD_LMI_STATUS__UMC_READ_CLEAN__SHIFT 0x4 +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN__SHIFT 0x5 +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6 +#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE__SHIFT 0x7 +#define UVD_LMI_STATUS__READ_CLEAN_RAW__SHIFT 0x8 +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9 +#define UVD_LMI_STATUS__UMC_UVD_IDLE__SHIFT 0xa +#define UVD_LMI_STATUS__UMC_AVP_IDLE__SHIFT 0xb +#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN__SHIFT 0xc +#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN__SHIFT 0xd +#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN__SHIFT 0x12 +#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN__SHIFT 0x13 +#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN__SHIFT 0x14 +#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN__SHIFT 0x15 +#define UVD_LMI_STATUS__CENC_READ_CLEAN__SHIFT 0x16 +#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L +#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L +#define UVD_LMI_STATUS__UMC_READ_CLEAN_MASK 0x00000010L +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_MASK 0x00000020L +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L +#define UVD_LMI_STATUS__PENDING_UVD_MC_WRITE_MASK 0x00000080L +#define UVD_LMI_STATUS__READ_CLEAN_RAW_MASK 0x00000100L +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L +#define UVD_LMI_STATUS__UMC_UVD_IDLE_MASK 0x00000400L +#define UVD_LMI_STATUS__UMC_AVP_IDLE_MASK 0x00000800L +#define UVD_LMI_STATUS__ADP_MC_READ_CLEAN_MASK 0x00001000L +#define UVD_LMI_STATUS__ADP_UMC_READ_CLEAN_MASK 0x00002000L +#define UVD_LMI_STATUS__BSP0_WRITE_CLEAN_MASK 0x00040000L +#define UVD_LMI_STATUS__BSP1_WRITE_CLEAN_MASK 0x00080000L +#define UVD_LMI_STATUS__BSP2_WRITE_CLEAN_MASK 0x00100000L +#define UVD_LMI_STATUS__BSP3_WRITE_CLEAN_MASK 0x00200000L +#define UVD_LMI_STATUS__CENC_READ_CLEAN_MASK 0x00400000L +//UVD_LMI_PERFMON_CTRL +#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE__SHIFT 0x0 +#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL__SHIFT 0x8 +#define UVD_LMI_PERFMON_CTRL__PERFMON_STATE_MASK 0x00000003L +#define UVD_LMI_PERFMON_CTRL__PERFMON_SEL_MASK 0x00001F00L +//UVD_LMI_PERFMON_COUNT_LO +#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT__SHIFT 0x0 +#define UVD_LMI_PERFMON_COUNT_LO__PERFMON_COUNT_MASK 0xFFFFFFFFL +//UVD_LMI_PERFMON_COUNT_HI +#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT__SHIFT 0x0 +#define UVD_LMI_PERFMON_COUNT_HI__PERFMON_COUNT_MASK 0x0000FFFFL +//UVD_LMI_RBC_RB_VMID +#define UVD_LMI_RBC_RB_VMID__RB_VMID__SHIFT 0x0 +#define UVD_LMI_RBC_RB_VMID__RB_VMID_MASK 0x0000000FL +//UVD_LMI_RBC_IB_VMID +#define UVD_LMI_RBC_IB_VMID__IB_VMID__SHIFT 0x0 +#define UVD_LMI_RBC_IB_VMID__IB_VMID_MASK 0x0000000FL +//UVD_LMI_MC_CREDITS +#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS__SHIFT 0x0 +#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS__SHIFT 0x8 +#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS__SHIFT 0x10 +#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS__SHIFT 0x18 +#define UVD_LMI_MC_CREDITS__UVD_RD_CREDITS_MASK 0x0000003FL +#define UVD_LMI_MC_CREDITS__UVD_WR_CREDITS_MASK 0x00003F00L +#define UVD_LMI_MC_CREDITS__UMC_RD_CREDITS_MASK 0x003F0000L +#define UVD_LMI_MC_CREDITS__UMC_WR_CREDITS_MASK 0x3F000000L + + +// addressBlock: uvd0_uvdnpdec +//MDM_DMA_CMD +#define MDM_DMA_CMD__MDM_DMA_CMD__SHIFT 0x0 +#define MDM_DMA_CMD__MDM_DMA_CMD_MASK 0xFFFFFFFFL +//MDM_DMA_STATUS +#define MDM_DMA_STATUS__SDB_DMA_WR_BUSY__SHIFT 0x0 +#define MDM_DMA_STATUS__SCM_DMA_WR_BUSY__SHIFT 0x1 +#define MDM_DMA_STATUS__SCM_DMA_RD_BUSY__SHIFT 0x2 +#define MDM_DMA_STATUS__RB_DMA_WR_BUSY__SHIFT 0x3 +#define MDM_DMA_STATUS__RB_DMA_RD_BUSY__SHIFT 0x4 +#define MDM_DMA_STATUS__SDB_DMA_RD_BUSY__SHIFT 0x5 +#define MDM_DMA_STATUS__SCLR_DMA_WR_BUSY__SHIFT 0x6 +#define MDM_DMA_STATUS__SDB_DMA_WR_BUSY_MASK 0x00000001L +#define MDM_DMA_STATUS__SCM_DMA_WR_BUSY_MASK 0x00000002L +#define MDM_DMA_STATUS__SCM_DMA_RD_BUSY_MASK 0x00000004L +#define MDM_DMA_STATUS__RB_DMA_WR_BUSY_MASK 0x00000008L +#define MDM_DMA_STATUS__RB_DMA_RD_BUSY_MASK 0x00000010L +#define MDM_DMA_STATUS__SDB_DMA_RD_BUSY_MASK 0x00000020L +#define MDM_DMA_STATUS__SCLR_DMA_WR_BUSY_MASK 0x00000040L +//MDM_DMA_CTL +#define MDM_DMA_CTL__MDM_BYPASS__SHIFT 0x0 +#define MDM_DMA_CTL__FOUR_CMD__SHIFT 0x1 +#define MDM_DMA_CTL__ENCODE_MODE__SHIFT 0x2 +#define MDM_DMA_CTL__VP9_DEC_MODE__SHIFT 0x3 +#define MDM_DMA_CTL__SW_DRST__SHIFT 0x1f +#define MDM_DMA_CTL__MDM_BYPASS_MASK 0x00000001L +#define MDM_DMA_CTL__FOUR_CMD_MASK 0x00000002L +#define MDM_DMA_CTL__ENCODE_MODE_MASK 0x00000004L +#define MDM_DMA_CTL__VP9_DEC_MODE_MASK 0x00000008L +#define MDM_DMA_CTL__SW_DRST_MASK 0x80000000L +//MDM_ENC_PIPE_BUSY +#define MDM_ENC_PIPE_BUSY__IME_BUSY__SHIFT 0x0 +#define MDM_ENC_PIPE_BUSY__SMP_BUSY__SHIFT 0x1 +#define MDM_ENC_PIPE_BUSY__SIT_BUSY__SHIFT 0x2 +#define MDM_ENC_PIPE_BUSY__SDB_BUSY__SHIFT 0x3 +#define MDM_ENC_PIPE_BUSY__ENT_BUSY__SHIFT 0x4 +#define MDM_ENC_PIPE_BUSY__ENT_HEADER_BUSY__SHIFT 0x5 +#define MDM_ENC_PIPE_BUSY__LCM_BUSY__SHIFT 0x6 +#define MDM_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x7 +#define MDM_ENC_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x8 +#define MDM_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x9 +#define MDM_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0xa +#define MDM_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0xb +#define MDM_ENC_PIPE_BUSY__MDM_EFC_BUSY__SHIFT 0xc +#define MDM_ENC_PIPE_BUSY__MDM_EFC_PROGRAM_BUSY__SHIFT 0xd +#define MDM_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0x10 +#define MDM_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0x11 +#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0x12 +#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0x13 +#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0x14 +#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0x15 +#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x16 +#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x17 +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x18 +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x19 +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x1a +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x1b +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x1c +#define MDM_ENC_PIPE_BUSY__IME_BUSY_MASK 0x00000001L +#define MDM_ENC_PIPE_BUSY__SMP_BUSY_MASK 0x00000002L +#define MDM_ENC_PIPE_BUSY__SIT_BUSY_MASK 0x00000004L +#define MDM_ENC_PIPE_BUSY__SDB_BUSY_MASK 0x00000008L +#define MDM_ENC_PIPE_BUSY__ENT_BUSY_MASK 0x00000010L +#define MDM_ENC_PIPE_BUSY__ENT_HEADER_BUSY_MASK 0x00000020L +#define MDM_ENC_PIPE_BUSY__LCM_BUSY_MASK 0x00000040L +#define MDM_ENC_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000080L +#define MDM_ENC_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000100L +#define MDM_ENC_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000200L +#define MDM_ENC_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000400L +#define MDM_ENC_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000800L +#define MDM_ENC_PIPE_BUSY__MDM_EFC_BUSY_MASK 0x00001000L +#define MDM_ENC_PIPE_BUSY__MDM_EFC_PROGRAM_BUSY_MASK 0x00002000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00010000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00020000L +#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00040000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00080000L +#define MDM_ENC_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00100000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00200000L +#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00400000L +#define MDM_ENC_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00800000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x01000000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x02000000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x04000000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x08000000L +#define MDM_ENC_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x10000000L +//MDM_WIG_PIPE_BUSY +#define MDM_WIG_PIPE_BUSY__WIG_TBE_BUSY__SHIFT 0x0 +#define MDM_WIG_PIPE_BUSY__WIG_ENT_BUSY__SHIFT 0x1 +#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_BUSY__SHIFT 0x2 +#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_FIFO_FULL__SHIFT 0x3 +#define MDM_WIG_PIPE_BUSY__LCM_BUSY__SHIFT 0x4 +#define MDM_WIG_PIPE_BUSY__MDM_RD_CUR_BUSY__SHIFT 0x5 +#define MDM_WIG_PIPE_BUSY__MDM_RD_REF_BUSY__SHIFT 0x6 +#define MDM_WIG_PIPE_BUSY__MDM_RD_GEN_BUSY__SHIFT 0x7 +#define MDM_WIG_PIPE_BUSY__MDM_WR_RECON_BUSY__SHIFT 0x8 +#define MDM_WIG_PIPE_BUSY__MDM_WR_GEN_BUSY__SHIFT 0x9 +#define MDM_WIG_PIPE_BUSY__MIF_RD_CUR_BUSY__SHIFT 0xa +#define MDM_WIG_PIPE_BUSY__MIF_RD_REF0_BUSY__SHIFT 0xb +#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN0_BUSY__SHIFT 0xc +#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN0_BUSY__SHIFT 0xd +#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN1_BUSY__SHIFT 0xe +#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN1_BUSY__SHIFT 0xf +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP0_BUSY__SHIFT 0x10 +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP1_BUSY__SHIFT 0x11 +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD0_BUSY__SHIFT 0x12 +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD1_BUSY__SHIFT 0x13 +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD2_BUSY__SHIFT 0x14 +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD3_BUSY__SHIFT 0x15 +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD4_BUSY__SHIFT 0x16 +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD5_BUSY__SHIFT 0x17 +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP2_BUSY__SHIFT 0x18 +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP3_BUSY__SHIFT 0x19 +#define MDM_WIG_PIPE_BUSY__LCM_BSP0_NOT_EMPTY__SHIFT 0x1a +#define MDM_WIG_PIPE_BUSY__LCM_BSP1_NOT_EMPTY__SHIFT 0x1b +#define MDM_WIG_PIPE_BUSY__LCM_BSP2_NOT_EMPTY__SHIFT 0x1c +#define MDM_WIG_PIPE_BUSY__LCM_BSP3_NOT_EMPTY__SHIFT 0x1d +#define MDM_WIG_PIPE_BUSY__WIG_TBE_BUSY_MASK 0x00000001L +#define MDM_WIG_PIPE_BUSY__WIG_ENT_BUSY_MASK 0x00000002L +#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_BUSY_MASK 0x00000004L +#define MDM_WIG_PIPE_BUSY__WIG_ENT_HEADER_FIFO_FULL_MASK 0x00000008L +#define MDM_WIG_PIPE_BUSY__LCM_BUSY_MASK 0x00000010L +#define MDM_WIG_PIPE_BUSY__MDM_RD_CUR_BUSY_MASK 0x00000020L +#define MDM_WIG_PIPE_BUSY__MDM_RD_REF_BUSY_MASK 0x00000040L +#define MDM_WIG_PIPE_BUSY__MDM_RD_GEN_BUSY_MASK 0x00000080L +#define MDM_WIG_PIPE_BUSY__MDM_WR_RECON_BUSY_MASK 0x00000100L +#define MDM_WIG_PIPE_BUSY__MDM_WR_GEN_BUSY_MASK 0x00000200L +#define MDM_WIG_PIPE_BUSY__MIF_RD_CUR_BUSY_MASK 0x00000400L +#define MDM_WIG_PIPE_BUSY__MIF_RD_REF0_BUSY_MASK 0x00000800L +#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN0_BUSY_MASK 0x00001000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN0_BUSY_MASK 0x00002000L +#define MDM_WIG_PIPE_BUSY__MIF_WR_GEN1_BUSY_MASK 0x00004000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_GEN1_BUSY_MASK 0x00008000L +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP0_BUSY_MASK 0x00010000L +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP1_BUSY_MASK 0x00020000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD0_BUSY_MASK 0x00040000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD1_BUSY_MASK 0x00080000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD2_BUSY_MASK 0x00100000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD3_BUSY_MASK 0x00200000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD4_BUSY_MASK 0x00400000L +#define MDM_WIG_PIPE_BUSY__MIF_RD_BSD5_BUSY_MASK 0x00800000L +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP2_BUSY_MASK 0x01000000L +#define MDM_WIG_PIPE_BUSY__MIF_WR_BSP3_BUSY_MASK 0x02000000L +#define MDM_WIG_PIPE_BUSY__LCM_BSP0_NOT_EMPTY_MASK 0x04000000L +#define MDM_WIG_PIPE_BUSY__LCM_BSP1_NOT_EMPTY_MASK 0x08000000L +#define MDM_WIG_PIPE_BUSY__LCM_BSP2_NOT_EMPTY_MASK 0x10000000L +#define MDM_WIG_PIPE_BUSY__LCM_BSP3_NOT_EMPTY_MASK 0x20000000L + + +// addressBlock: lmi_adp_indirect +//UVD_LMI_CRC0 +#define UVD_LMI_CRC0__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC0__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC1 +#define UVD_LMI_CRC1__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC1__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC2 +#define UVD_LMI_CRC2__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC2__CRC32_MASK 0xFFFFFFFFL +//UVD_LMI_CRC3 +#define UVD_LMI_CRC3__CRC32__SHIFT 0x0 +#define UVD_LMI_CRC3__CRC32_MASK 0xFFFFFFFFL + + +#endif -- cgit v1.2.3 From f1cf876931afa1d48bbefa0f3095bfd3cf9fb04c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 29 Aug 2018 16:28:38 +0800 Subject: drm/amdgpu: add Arcturus ip_offset header (v3) Provides the absolute offsets of the IP register blocks. v2: update chip name in source code v3: squash in MP offset updates (Alex) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/arct_ip_offset.h | 1654 ++++++++++++++++++++++++++ 1 file changed, 1654 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/arct_ip_offset.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/arct_ip_offset.h b/drivers/gpu/drm/amd/include/arct_ip_offset.h new file mode 100644 index 000000000000..3211b3a96d68 --- /dev/null +++ b/drivers/gpu/drm/amd/include/arct_ip_offset.h @@ -0,0 +1,1654 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _arct_ip_offset_HEADER +#define _arct_ip_offset_HEADER + +#define MAX_INSTANCE 8 +#define MAX_SEGMENT 6 + + +struct IP_BASE_INSTANCE +{ + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE +{ + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + + +static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0x00012460, 0x00408C00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE CLK_BASE ={ { { { 0x000120C0, 0x00016C00, 0x00401800, 0, 0, 0 } }, + { { 0x000120E0, 0x00016E00, 0x00401C00, 0, 0, 0 } }, + { { 0x00012100, 0x00017000, 0x00402000, 0, 0, 0 } }, + { { 0x00012120, 0x00017200, 0x00402400, 0, 0, 0 } }, + { { 0x000136C0, 0x0001B000, 0x0042D800, 0, 0, 0 } }, + { { 0x00013720, 0x0001B200, 0x0042E400, 0, 0, 0 } }, + { { 0x000125E0, 0x00017E00, 0x0040BC00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0x000125C0, 0x0040B800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE FUSE_BASE ={ { { { 0x000120A0, 0x00017400, 0x00401400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0x00012160, 0x00402C00, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0x00012520, 0x0040A400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MMHUB_BASE ={ { { { 0x00012440, 0x0001A000, 0x00408800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x00012D80, 0x0041B000 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x00012500, 0x0040A000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE PCIE0_BASE ={ { { { 0x000128C0, 0x00411800, 0x04440000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0x00012540, 0x0040A800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA1_BASE ={ { { { 0x00001860, 0x00012560, 0x0040AC00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA2_BASE ={ { { { 0x00013760, 0x0001E000, 0x0042EC00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA3_BASE ={ { { { 0x00013780, 0x0001E400, 0x0042F000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA4_BASE ={ { { { 0x000137A0, 0x0001E800, 0x0042F400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA5_BASE ={ { { { 0x000137C0, 0x0001EC00, 0x0042F800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA6_BASE ={ { { { 0x000137E0, 0x0001F000, 0x0042FC00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA7_BASE ={ { { { 0x00013800, 0x0001F400, 0x00430000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SMUIO_BASE ={ { { { 0x00012080, 0x00016800, 0x00016A00, 0x00401000, 0x00440000, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE THM_BASE ={ { { { 0x00012060, 0x00016600, 0x00400C00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE UMC_BASE ={ { { { 0x000132C0, 0x00014000, 0x00425800, 0, 0, 0 } }, + { { 0x000132E0, 0x00054000, 0x00425C00, 0, 0, 0 } }, + { { 0x00013300, 0x00094000, 0x00426000, 0, 0, 0 } }, + { { 0x00013320, 0x000D4000, 0x00426400, 0, 0, 0 } }, + { { 0x00013340, 0x00114000, 0x00426800, 0, 0, 0 } }, + { { 0x00013360, 0x00154000, 0x00426C00, 0, 0, 0 } }, + { { 0x00013380, 0x00194000, 0x00427000, 0, 0, 0 } }, + { { 0x000133A0, 0x001D4000, 0x00427400, 0, 0, 0 } } } }; +static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, 0x00012180, 0x00403000, 0, 0 } }, + { { 0x00007A00, 0x00009000, 0x000136E0, 0x0042DC00, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DBGU_IO_BASE ={ { { { 0x000001E0, 0x000125A0, 0x0040B400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; + + + +#define ATHUB_BASE__INST0_SEG0 0x00000C20 +#define ATHUB_BASE__INST0_SEG1 0x00012460 +#define ATHUB_BASE__INST0_SEG2 0x00408C00 +#define ATHUB_BASE__INST0_SEG3 0 +#define ATHUB_BASE__INST0_SEG4 0 +#define ATHUB_BASE__INST0_SEG5 0 + +#define ATHUB_BASE__INST1_SEG0 0 +#define ATHUB_BASE__INST1_SEG1 0 +#define ATHUB_BASE__INST1_SEG2 0 +#define ATHUB_BASE__INST1_SEG3 0 +#define ATHUB_BASE__INST1_SEG4 0 +#define ATHUB_BASE__INST1_SEG5 0 + +#define ATHUB_BASE__INST2_SEG0 0 +#define ATHUB_BASE__INST2_SEG1 0 +#define ATHUB_BASE__INST2_SEG2 0 +#define ATHUB_BASE__INST2_SEG3 0 +#define ATHUB_BASE__INST2_SEG4 0 +#define ATHUB_BASE__INST2_SEG5 0 + +#define ATHUB_BASE__INST3_SEG0 0 +#define ATHUB_BASE__INST3_SEG1 0 +#define ATHUB_BASE__INST3_SEG2 0 +#define ATHUB_BASE__INST3_SEG3 0 +#define ATHUB_BASE__INST3_SEG4 0 +#define ATHUB_BASE__INST3_SEG5 0 + +#define ATHUB_BASE__INST4_SEG0 0 +#define ATHUB_BASE__INST4_SEG1 0 +#define ATHUB_BASE__INST4_SEG2 0 +#define ATHUB_BASE__INST4_SEG3 0 +#define ATHUB_BASE__INST4_SEG4 0 +#define ATHUB_BASE__INST4_SEG5 0 + +#define ATHUB_BASE__INST5_SEG0 0 +#define ATHUB_BASE__INST5_SEG1 0 +#define ATHUB_BASE__INST5_SEG2 0 +#define ATHUB_BASE__INST5_SEG3 0 +#define ATHUB_BASE__INST5_SEG4 0 +#define ATHUB_BASE__INST5_SEG5 0 + +#define ATHUB_BASE__INST6_SEG0 0 +#define ATHUB_BASE__INST6_SEG1 0 +#define ATHUB_BASE__INST6_SEG2 0 +#define ATHUB_BASE__INST6_SEG3 0 +#define ATHUB_BASE__INST6_SEG4 0 +#define ATHUB_BASE__INST6_SEG5 0 + +#define ATHUB_BASE__INST7_SEG0 0 +#define ATHUB_BASE__INST7_SEG1 0 +#define ATHUB_BASE__INST7_SEG2 0 +#define ATHUB_BASE__INST7_SEG3 0 +#define ATHUB_BASE__INST7_SEG4 0 +#define ATHUB_BASE__INST7_SEG5 0 + +#define CLK_BASE__INST0_SEG0 0x000120C0 +#define CLK_BASE__INST0_SEG1 0x00016C00 +#define CLK_BASE__INST0_SEG2 0x00401800 +#define CLK_BASE__INST0_SEG3 0 +#define CLK_BASE__INST0_SEG4 0 +#define CLK_BASE__INST0_SEG5 0 + +#define CLK_BASE__INST1_SEG0 0x000120E0 +#define CLK_BASE__INST1_SEG1 0x00016E00 +#define CLK_BASE__INST1_SEG2 0x00401C00 +#define CLK_BASE__INST1_SEG3 0 +#define CLK_BASE__INST1_SEG4 0 +#define CLK_BASE__INST1_SEG5 0 + +#define CLK_BASE__INST2_SEG0 0x00012100 +#define CLK_BASE__INST2_SEG1 0x00017000 +#define CLK_BASE__INST2_SEG2 0x00402000 +#define CLK_BASE__INST2_SEG3 0 +#define CLK_BASE__INST2_SEG4 0 +#define CLK_BASE__INST2_SEG5 0 + +#define CLK_BASE__INST3_SEG0 0x00012120 +#define CLK_BASE__INST3_SEG1 0x00017200 +#define CLK_BASE__INST3_SEG2 0x00402400 +#define CLK_BASE__INST3_SEG3 0 +#define CLK_BASE__INST3_SEG4 0 +#define CLK_BASE__INST3_SEG5 0 + +#define CLK_BASE__INST4_SEG0 0x000136C0 +#define CLK_BASE__INST4_SEG1 0x0001B000 +#define CLK_BASE__INST4_SEG2 0x0042D800 +#define CLK_BASE__INST4_SEG3 0 +#define CLK_BASE__INST4_SEG4 0 +#define CLK_BASE__INST4_SEG5 0 + +#define CLK_BASE__INST5_SEG0 0x00013720 +#define CLK_BASE__INST5_SEG1 0x0001B200 +#define CLK_BASE__INST5_SEG2 0x0042E400 +#define CLK_BASE__INST5_SEG3 0 +#define CLK_BASE__INST5_SEG4 0 +#define CLK_BASE__INST5_SEG5 0 + +#define CLK_BASE__INST6_SEG0 0x000125E0 +#define CLK_BASE__INST6_SEG1 0x00017E00 +#define CLK_BASE__INST6_SEG2 0x0040BC00 +#define CLK_BASE__INST6_SEG3 0 +#define CLK_BASE__INST6_SEG4 0 +#define CLK_BASE__INST6_SEG5 0 + +#define CLK_BASE__INST7_SEG0 0 +#define CLK_BASE__INST7_SEG1 0 +#define CLK_BASE__INST7_SEG2 0 +#define CLK_BASE__INST7_SEG3 0 +#define CLK_BASE__INST7_SEG4 0 +#define CLK_BASE__INST7_SEG5 0 + +#define DF_BASE__INST0_SEG0 0x00007000 +#define DF_BASE__INST0_SEG1 0x000125C0 +#define DF_BASE__INST0_SEG2 0x0040B800 +#define DF_BASE__INST0_SEG3 0 +#define DF_BASE__INST0_SEG4 0 +#define DF_BASE__INST0_SEG5 0 + +#define DF_BASE__INST1_SEG0 0 +#define DF_BASE__INST1_SEG1 0 +#define DF_BASE__INST1_SEG2 0 +#define DF_BASE__INST1_SEG3 0 +#define DF_BASE__INST1_SEG4 0 +#define DF_BASE__INST1_SEG5 0 + +#define DF_BASE__INST2_SEG0 0 +#define DF_BASE__INST2_SEG1 0 +#define DF_BASE__INST2_SEG2 0 +#define DF_BASE__INST2_SEG3 0 +#define DF_BASE__INST2_SEG4 0 +#define DF_BASE__INST2_SEG5 0 + +#define DF_BASE__INST3_SEG0 0 +#define DF_BASE__INST3_SEG1 0 +#define DF_BASE__INST3_SEG2 0 +#define DF_BASE__INST3_SEG3 0 +#define DF_BASE__INST3_SEG4 0 +#define DF_BASE__INST3_SEG5 0 + +#define DF_BASE__INST4_SEG0 0 +#define DF_BASE__INST4_SEG1 0 +#define DF_BASE__INST4_SEG2 0 +#define DF_BASE__INST4_SEG3 0 +#define DF_BASE__INST4_SEG4 0 +#define DF_BASE__INST4_SEG5 0 + +#define DF_BASE__INST5_SEG0 0 +#define DF_BASE__INST5_SEG1 0 +#define DF_BASE__INST5_SEG2 0 +#define DF_BASE__INST5_SEG3 0 +#define DF_BASE__INST5_SEG4 0 +#define DF_BASE__INST5_SEG5 0 + +#define DF_BASE__INST6_SEG0 0 +#define DF_BASE__INST6_SEG1 0 +#define DF_BASE__INST6_SEG2 0 +#define DF_BASE__INST6_SEG3 0 +#define DF_BASE__INST6_SEG4 0 +#define DF_BASE__INST6_SEG5 0 + +#define DF_BASE__INST7_SEG0 0 +#define DF_BASE__INST7_SEG1 0 +#define DF_BASE__INST7_SEG2 0 +#define DF_BASE__INST7_SEG3 0 +#define DF_BASE__INST7_SEG4 0 +#define DF_BASE__INST7_SEG5 0 + +#define FUSE_BASE__INST0_SEG0 0x000120A0 +#define FUSE_BASE__INST0_SEG1 0x00017400 +#define FUSE_BASE__INST0_SEG2 0x00401400 +#define FUSE_BASE__INST0_SEG3 0 +#define FUSE_BASE__INST0_SEG4 0 +#define FUSE_BASE__INST0_SEG5 0 + +#define FUSE_BASE__INST1_SEG0 0 +#define FUSE_BASE__INST1_SEG1 0 +#define FUSE_BASE__INST1_SEG2 0 +#define FUSE_BASE__INST1_SEG3 0 +#define FUSE_BASE__INST1_SEG4 0 +#define FUSE_BASE__INST1_SEG5 0 + +#define FUSE_BASE__INST2_SEG0 0 +#define FUSE_BASE__INST2_SEG1 0 +#define FUSE_BASE__INST2_SEG2 0 +#define FUSE_BASE__INST2_SEG3 0 +#define FUSE_BASE__INST2_SEG4 0 +#define FUSE_BASE__INST2_SEG5 0 + +#define FUSE_BASE__INST3_SEG0 0 +#define FUSE_BASE__INST3_SEG1 0 +#define FUSE_BASE__INST3_SEG2 0 +#define FUSE_BASE__INST3_SEG3 0 +#define FUSE_BASE__INST3_SEG4 0 +#define FUSE_BASE__INST3_SEG5 0 + +#define FUSE_BASE__INST4_SEG0 0 +#define FUSE_BASE__INST4_SEG1 0 +#define FUSE_BASE__INST4_SEG2 0 +#define FUSE_BASE__INST4_SEG3 0 +#define FUSE_BASE__INST4_SEG4 0 +#define FUSE_BASE__INST4_SEG5 0 + +#define FUSE_BASE__INST5_SEG0 0 +#define FUSE_BASE__INST5_SEG1 0 +#define FUSE_BASE__INST5_SEG2 0 +#define FUSE_BASE__INST5_SEG3 0 +#define FUSE_BASE__INST5_SEG4 0 +#define FUSE_BASE__INST5_SEG5 0 + +#define FUSE_BASE__INST6_SEG0 0 +#define FUSE_BASE__INST6_SEG1 0 +#define FUSE_BASE__INST6_SEG2 0 +#define FUSE_BASE__INST6_SEG3 0 +#define FUSE_BASE__INST6_SEG4 0 +#define FUSE_BASE__INST6_SEG5 0 + +#define FUSE_BASE__INST7_SEG0 0 +#define FUSE_BASE__INST7_SEG1 0 +#define FUSE_BASE__INST7_SEG2 0 +#define FUSE_BASE__INST7_SEG3 0 +#define FUSE_BASE__INST7_SEG4 0 +#define FUSE_BASE__INST7_SEG5 0 + +#define GC_BASE__INST0_SEG0 0x00002000 +#define GC_BASE__INST0_SEG1 0x0000A000 +#define GC_BASE__INST0_SEG2 0x00012160 +#define GC_BASE__INST0_SEG3 0x00402C00 +#define GC_BASE__INST0_SEG4 0 +#define GC_BASE__INST0_SEG5 0 + +#define GC_BASE__INST1_SEG0 0 +#define GC_BASE__INST1_SEG1 0 +#define GC_BASE__INST1_SEG2 0 +#define GC_BASE__INST1_SEG3 0 +#define GC_BASE__INST1_SEG4 0 +#define GC_BASE__INST1_SEG5 0 + +#define GC_BASE__INST2_SEG0 0 +#define GC_BASE__INST2_SEG1 0 +#define GC_BASE__INST2_SEG2 0 +#define GC_BASE__INST2_SEG3 0 +#define GC_BASE__INST2_SEG4 0 +#define GC_BASE__INST2_SEG5 0 + +#define GC_BASE__INST3_SEG0 0 +#define GC_BASE__INST3_SEG1 0 +#define GC_BASE__INST3_SEG2 0 +#define GC_BASE__INST3_SEG3 0 +#define GC_BASE__INST3_SEG4 0 +#define GC_BASE__INST3_SEG5 0 + +#define GC_BASE__INST4_SEG0 0 +#define GC_BASE__INST4_SEG1 0 +#define GC_BASE__INST4_SEG2 0 +#define GC_BASE__INST4_SEG3 0 +#define GC_BASE__INST4_SEG4 0 +#define GC_BASE__INST4_SEG5 0 + +#define GC_BASE__INST5_SEG0 0 +#define GC_BASE__INST5_SEG1 0 +#define GC_BASE__INST5_SEG2 0 +#define GC_BASE__INST5_SEG3 0 +#define GC_BASE__INST5_SEG4 0 +#define GC_BASE__INST5_SEG5 0 + +#define GC_BASE__INST6_SEG0 0 +#define GC_BASE__INST6_SEG1 0 +#define GC_BASE__INST6_SEG2 0 +#define GC_BASE__INST6_SEG3 0 +#define GC_BASE__INST6_SEG4 0 +#define GC_BASE__INST6_SEG5 0 + +#define GC_BASE__INST7_SEG0 0 +#define GC_BASE__INST7_SEG1 0 +#define GC_BASE__INST7_SEG2 0 +#define GC_BASE__INST7_SEG3 0 +#define GC_BASE__INST7_SEG4 0 +#define GC_BASE__INST7_SEG5 0 + +#define HDP_BASE__INST0_SEG0 0x00000F20 +#define HDP_BASE__INST0_SEG1 0x00012520 +#define HDP_BASE__INST0_SEG2 0x0040A400 +#define HDP_BASE__INST0_SEG3 0 +#define HDP_BASE__INST0_SEG4 0 +#define HDP_BASE__INST0_SEG5 0 + +#define HDP_BASE__INST1_SEG0 0 +#define HDP_BASE__INST1_SEG1 0 +#define HDP_BASE__INST1_SEG2 0 +#define HDP_BASE__INST1_SEG3 0 +#define HDP_BASE__INST1_SEG4 0 +#define HDP_BASE__INST1_SEG5 0 + +#define HDP_BASE__INST2_SEG0 0 +#define HDP_BASE__INST2_SEG1 0 +#define HDP_BASE__INST2_SEG2 0 +#define HDP_BASE__INST2_SEG3 0 +#define HDP_BASE__INST2_SEG4 0 +#define HDP_BASE__INST2_SEG5 0 + +#define HDP_BASE__INST3_SEG0 0 +#define HDP_BASE__INST3_SEG1 0 +#define HDP_BASE__INST3_SEG2 0 +#define HDP_BASE__INST3_SEG3 0 +#define HDP_BASE__INST3_SEG4 0 +#define HDP_BASE__INST3_SEG5 0 + +#define HDP_BASE__INST4_SEG0 0 +#define HDP_BASE__INST4_SEG1 0 +#define HDP_BASE__INST4_SEG2 0 +#define HDP_BASE__INST4_SEG3 0 +#define HDP_BASE__INST4_SEG4 0 +#define HDP_BASE__INST4_SEG5 0 + +#define HDP_BASE__INST5_SEG0 0 +#define HDP_BASE__INST5_SEG1 0 +#define HDP_BASE__INST5_SEG2 0 +#define HDP_BASE__INST5_SEG3 0 +#define HDP_BASE__INST5_SEG4 0 +#define HDP_BASE__INST5_SEG5 0 + +#define HDP_BASE__INST6_SEG0 0 +#define HDP_BASE__INST6_SEG1 0 +#define HDP_BASE__INST6_SEG2 0 +#define HDP_BASE__INST6_SEG3 0 +#define HDP_BASE__INST6_SEG4 0 +#define HDP_BASE__INST6_SEG5 0 + +#define HDP_BASE__INST7_SEG0 0 +#define HDP_BASE__INST7_SEG1 0 +#define HDP_BASE__INST7_SEG2 0 +#define HDP_BASE__INST7_SEG3 0 +#define HDP_BASE__INST7_SEG4 0 +#define HDP_BASE__INST7_SEG5 0 + +#define MMHUB_BASE__INST0_SEG0 0x00012440 +#define MMHUB_BASE__INST0_SEG1 0x0001A000 +#define MMHUB_BASE__INST0_SEG2 0x00408800 +#define MMHUB_BASE__INST0_SEG3 0 +#define MMHUB_BASE__INST0_SEG4 0 +#define MMHUB_BASE__INST0_SEG5 0 + +#define MMHUB_BASE__INST1_SEG0 0 +#define MMHUB_BASE__INST1_SEG1 0 +#define MMHUB_BASE__INST1_SEG2 0 +#define MMHUB_BASE__INST1_SEG3 0 +#define MMHUB_BASE__INST1_SEG4 0 +#define MMHUB_BASE__INST1_SEG5 0 + +#define MMHUB_BASE__INST2_SEG0 0 +#define MMHUB_BASE__INST2_SEG1 0 +#define MMHUB_BASE__INST2_SEG2 0 +#define MMHUB_BASE__INST2_SEG3 0 +#define MMHUB_BASE__INST2_SEG4 0 +#define MMHUB_BASE__INST2_SEG5 0 + +#define MMHUB_BASE__INST3_SEG0 0 +#define MMHUB_BASE__INST3_SEG1 0 +#define MMHUB_BASE__INST3_SEG2 0 +#define MMHUB_BASE__INST3_SEG3 0 +#define MMHUB_BASE__INST3_SEG4 0 +#define MMHUB_BASE__INST3_SEG5 0 + +#define MMHUB_BASE__INST4_SEG0 0 +#define MMHUB_BASE__INST4_SEG1 0 +#define MMHUB_BASE__INST4_SEG2 0 +#define MMHUB_BASE__INST4_SEG3 0 +#define MMHUB_BASE__INST4_SEG4 0 +#define MMHUB_BASE__INST4_SEG5 0 + +#define MMHUB_BASE__INST5_SEG0 0 +#define MMHUB_BASE__INST5_SEG1 0 +#define MMHUB_BASE__INST5_SEG2 0 +#define MMHUB_BASE__INST5_SEG3 0 +#define MMHUB_BASE__INST5_SEG4 0 +#define MMHUB_BASE__INST5_SEG5 0 + +#define MMHUB_BASE__INST6_SEG0 0 +#define MMHUB_BASE__INST6_SEG1 0 +#define MMHUB_BASE__INST6_SEG2 0 +#define MMHUB_BASE__INST6_SEG3 0 +#define MMHUB_BASE__INST6_SEG4 0 +#define MMHUB_BASE__INST6_SEG5 0 + +#define MMHUB_BASE__INST7_SEG0 0 +#define MMHUB_BASE__INST7_SEG1 0 +#define MMHUB_BASE__INST7_SEG2 0 +#define MMHUB_BASE__INST7_SEG3 0 +#define MMHUB_BASE__INST7_SEG4 0 +#define MMHUB_BASE__INST7_SEG5 0 + +#define MP0_BASE__INST0_SEG0 0x00013FE0 +#define MP0_BASE__INST0_SEG1 0x00016000 +#define MP0_BASE__INST0_SEG2 0x0043FC00 +#define MP0_BASE__INST0_SEG3 0x00DC0000 +#define MP0_BASE__INST0_SEG4 0x00E00000 +#define MP0_BASE__INST0_SEG5 0x00E40000 + +#define MP0_BASE__INST1_SEG0 0 +#define MP0_BASE__INST1_SEG1 0 +#define MP0_BASE__INST1_SEG2 0 +#define MP0_BASE__INST1_SEG3 0 +#define MP0_BASE__INST1_SEG4 0 +#define MP0_BASE__INST1_SEG5 0 + +#define MP0_BASE__INST2_SEG0 0 +#define MP0_BASE__INST2_SEG1 0 +#define MP0_BASE__INST2_SEG2 0 +#define MP0_BASE__INST2_SEG3 0 +#define MP0_BASE__INST2_SEG4 0 +#define MP0_BASE__INST2_SEG5 0 + +#define MP0_BASE__INST3_SEG0 0 +#define MP0_BASE__INST3_SEG1 0 +#define MP0_BASE__INST3_SEG2 0 +#define MP0_BASE__INST3_SEG3 0 +#define MP0_BASE__INST3_SEG4 0 +#define MP0_BASE__INST3_SEG5 0 + +#define MP0_BASE__INST4_SEG0 0 +#define MP0_BASE__INST4_SEG1 0 +#define MP0_BASE__INST4_SEG2 0 +#define MP0_BASE__INST4_SEG3 0 +#define MP0_BASE__INST4_SEG4 0 +#define MP0_BASE__INST4_SEG5 0 + +#define MP0_BASE__INST5_SEG0 0 +#define MP0_BASE__INST5_SEG1 0 +#define MP0_BASE__INST5_SEG2 0 +#define MP0_BASE__INST5_SEG3 0 +#define MP0_BASE__INST5_SEG4 0 +#define MP0_BASE__INST5_SEG5 0 + +#define MP0_BASE__INST6_SEG0 0 +#define MP0_BASE__INST6_SEG1 0 +#define MP0_BASE__INST6_SEG2 0 +#define MP0_BASE__INST6_SEG3 0 +#define MP0_BASE__INST6_SEG4 0 +#define MP0_BASE__INST6_SEG5 0 + +#define MP0_BASE__INST7_SEG0 0 +#define MP0_BASE__INST7_SEG1 0 +#define MP0_BASE__INST7_SEG2 0 +#define MP0_BASE__INST7_SEG3 0 +#define MP0_BASE__INST7_SEG4 0 +#define MP0_BASE__INST7_SEG5 0 + +#define MP1_BASE__INST0_SEG0 0x00012020 +#define MP1_BASE__INST0_SEG1 0x00016200 +#define MP1_BASE__INST0_SEG2 0x00400400 +#define MP1_BASE__INST0_SEG3 0x00E80000 +#define MP1_BASE__INST0_SEG4 0x00EC0000 +#define MP1_BASE__INST0_SEG5 0x00F00000 + +#define MP1_BASE__INST1_SEG0 0 +#define MP1_BASE__INST1_SEG1 0 +#define MP1_BASE__INST1_SEG2 0 +#define MP1_BASE__INST1_SEG3 0 +#define MP1_BASE__INST1_SEG4 0 +#define MP1_BASE__INST1_SEG5 0 + +#define MP1_BASE__INST2_SEG0 0 +#define MP1_BASE__INST2_SEG1 0 +#define MP1_BASE__INST2_SEG2 0 +#define MP1_BASE__INST2_SEG3 0 +#define MP1_BASE__INST2_SEG4 0 +#define MP1_BASE__INST2_SEG5 0 + +#define MP1_BASE__INST3_SEG0 0 +#define MP1_BASE__INST3_SEG1 0 +#define MP1_BASE__INST3_SEG2 0 +#define MP1_BASE__INST3_SEG3 0 +#define MP1_BASE__INST3_SEG4 0 +#define MP1_BASE__INST3_SEG5 0 + +#define MP1_BASE__INST4_SEG0 0 +#define MP1_BASE__INST4_SEG1 0 +#define MP1_BASE__INST4_SEG2 0 +#define MP1_BASE__INST4_SEG3 0 +#define MP1_BASE__INST4_SEG4 0 +#define MP1_BASE__INST4_SEG5 0 + +#define MP1_BASE__INST5_SEG0 0 +#define MP1_BASE__INST5_SEG1 0 +#define MP1_BASE__INST5_SEG2 0 +#define MP1_BASE__INST5_SEG3 0 +#define MP1_BASE__INST5_SEG4 0 +#define MP1_BASE__INST5_SEG5 0 + +#define MP1_BASE__INST6_SEG0 0 +#define MP1_BASE__INST6_SEG1 0 +#define MP1_BASE__INST6_SEG2 0 +#define MP1_BASE__INST6_SEG3 0 +#define MP1_BASE__INST6_SEG4 0 +#define MP1_BASE__INST6_SEG5 0 + +#define MP1_BASE__INST7_SEG0 0 +#define MP1_BASE__INST7_SEG1 0 +#define MP1_BASE__INST7_SEG2 0 +#define MP1_BASE__INST7_SEG3 0 +#define MP1_BASE__INST7_SEG4 0 +#define MP1_BASE__INST7_SEG5 0 + +#define NBIF0_BASE__INST0_SEG0 0x00000000 +#define NBIF0_BASE__INST0_SEG1 0x00000014 +#define NBIF0_BASE__INST0_SEG2 0x00000D20 +#define NBIF0_BASE__INST0_SEG3 0x00010400 +#define NBIF0_BASE__INST0_SEG4 0x00012D80 +#define NBIF0_BASE__INST0_SEG5 0x0041B000 + +#define NBIF0_BASE__INST1_SEG0 0 +#define NBIF0_BASE__INST1_SEG1 0 +#define NBIF0_BASE__INST1_SEG2 0 +#define NBIF0_BASE__INST1_SEG3 0 +#define NBIF0_BASE__INST1_SEG4 0 +#define NBIF0_BASE__INST1_SEG5 0 + +#define NBIF0_BASE__INST2_SEG0 0 +#define NBIF0_BASE__INST2_SEG1 0 +#define NBIF0_BASE__INST2_SEG2 0 +#define NBIF0_BASE__INST2_SEG3 0 +#define NBIF0_BASE__INST2_SEG4 0 +#define NBIF0_BASE__INST2_SEG5 0 + +#define NBIF0_BASE__INST3_SEG0 0 +#define NBIF0_BASE__INST3_SEG1 0 +#define NBIF0_BASE__INST3_SEG2 0 +#define NBIF0_BASE__INST3_SEG3 0 +#define NBIF0_BASE__INST3_SEG4 0 +#define NBIF0_BASE__INST3_SEG5 0 + +#define NBIF0_BASE__INST4_SEG0 0 +#define NBIF0_BASE__INST4_SEG1 0 +#define NBIF0_BASE__INST4_SEG2 0 +#define NBIF0_BASE__INST4_SEG3 0 +#define NBIF0_BASE__INST4_SEG4 0 +#define NBIF0_BASE__INST4_SEG5 0 + +#define NBIF0_BASE__INST5_SEG0 0 +#define NBIF0_BASE__INST5_SEG1 0 +#define NBIF0_BASE__INST5_SEG2 0 +#define NBIF0_BASE__INST5_SEG3 0 +#define NBIF0_BASE__INST5_SEG4 0 +#define NBIF0_BASE__INST5_SEG5 0 + +#define NBIF0_BASE__INST6_SEG0 0 +#define NBIF0_BASE__INST6_SEG1 0 +#define NBIF0_BASE__INST6_SEG2 0 +#define NBIF0_BASE__INST6_SEG3 0 +#define NBIF0_BASE__INST6_SEG4 0 +#define NBIF0_BASE__INST6_SEG5 0 + +#define NBIF0_BASE__INST7_SEG0 0 +#define NBIF0_BASE__INST7_SEG1 0 +#define NBIF0_BASE__INST7_SEG2 0 +#define NBIF0_BASE__INST7_SEG3 0 +#define NBIF0_BASE__INST7_SEG4 0 +#define NBIF0_BASE__INST7_SEG5 0 + +#define OSSSYS_BASE__INST0_SEG0 0x000010A0 +#define OSSSYS_BASE__INST0_SEG1 0x00012500 +#define OSSSYS_BASE__INST0_SEG2 0x0040A000 +#define OSSSYS_BASE__INST0_SEG3 0 +#define OSSSYS_BASE__INST0_SEG4 0 +#define OSSSYS_BASE__INST0_SEG5 0 + +#define OSSSYS_BASE__INST1_SEG0 0 +#define OSSSYS_BASE__INST1_SEG1 0 +#define OSSSYS_BASE__INST1_SEG2 0 +#define OSSSYS_BASE__INST1_SEG3 0 +#define OSSSYS_BASE__INST1_SEG4 0 +#define OSSSYS_BASE__INST1_SEG5 0 + +#define OSSSYS_BASE__INST2_SEG0 0 +#define OSSSYS_BASE__INST2_SEG1 0 +#define OSSSYS_BASE__INST2_SEG2 0 +#define OSSSYS_BASE__INST2_SEG3 0 +#define OSSSYS_BASE__INST2_SEG4 0 +#define OSSSYS_BASE__INST2_SEG5 0 + +#define OSSSYS_BASE__INST3_SEG0 0 +#define OSSSYS_BASE__INST3_SEG1 0 +#define OSSSYS_BASE__INST3_SEG2 0 +#define OSSSYS_BASE__INST3_SEG3 0 +#define OSSSYS_BASE__INST3_SEG4 0 +#define OSSSYS_BASE__INST3_SEG5 0 + +#define OSSSYS_BASE__INST4_SEG0 0 +#define OSSSYS_BASE__INST4_SEG1 0 +#define OSSSYS_BASE__INST4_SEG2 0 +#define OSSSYS_BASE__INST4_SEG3 0 +#define OSSSYS_BASE__INST4_SEG4 0 +#define OSSSYS_BASE__INST4_SEG5 0 + +#define OSSSYS_BASE__INST5_SEG0 0 +#define OSSSYS_BASE__INST5_SEG1 0 +#define OSSSYS_BASE__INST5_SEG2 0 +#define OSSSYS_BASE__INST5_SEG3 0 +#define OSSSYS_BASE__INST5_SEG4 0 +#define OSSSYS_BASE__INST5_SEG5 0 + +#define OSSSYS_BASE__INST6_SEG0 0 +#define OSSSYS_BASE__INST6_SEG1 0 +#define OSSSYS_BASE__INST6_SEG2 0 +#define OSSSYS_BASE__INST6_SEG3 0 +#define OSSSYS_BASE__INST6_SEG4 0 +#define OSSSYS_BASE__INST6_SEG5 0 + +#define OSSSYS_BASE__INST7_SEG0 0 +#define OSSSYS_BASE__INST7_SEG1 0 +#define OSSSYS_BASE__INST7_SEG2 0 +#define OSSSYS_BASE__INST7_SEG3 0 +#define OSSSYS_BASE__INST7_SEG4 0 +#define OSSSYS_BASE__INST7_SEG5 0 + +#define PCIE0_BASE__INST0_SEG0 0x000128C0 +#define PCIE0_BASE__INST0_SEG1 0x00411800 +#define PCIE0_BASE__INST0_SEG2 0x04440000 +#define PCIE0_BASE__INST0_SEG3 0 +#define PCIE0_BASE__INST0_SEG4 0 +#define PCIE0_BASE__INST0_SEG5 0 + +#define PCIE0_BASE__INST1_SEG0 0 +#define PCIE0_BASE__INST1_SEG1 0 +#define PCIE0_BASE__INST1_SEG2 0 +#define PCIE0_BASE__INST1_SEG3 0 +#define PCIE0_BASE__INST1_SEG4 0 +#define PCIE0_BASE__INST1_SEG5 0 + +#define PCIE0_BASE__INST2_SEG0 0 +#define PCIE0_BASE__INST2_SEG1 0 +#define PCIE0_BASE__INST2_SEG2 0 +#define PCIE0_BASE__INST2_SEG3 0 +#define PCIE0_BASE__INST2_SEG4 0 +#define PCIE0_BASE__INST2_SEG5 0 + +#define PCIE0_BASE__INST3_SEG0 0 +#define PCIE0_BASE__INST3_SEG1 0 +#define PCIE0_BASE__INST3_SEG2 0 +#define PCIE0_BASE__INST3_SEG3 0 +#define PCIE0_BASE__INST3_SEG4 0 +#define PCIE0_BASE__INST3_SEG5 0 + +#define PCIE0_BASE__INST4_SEG0 0 +#define PCIE0_BASE__INST4_SEG1 0 +#define PCIE0_BASE__INST4_SEG2 0 +#define PCIE0_BASE__INST4_SEG3 0 +#define PCIE0_BASE__INST4_SEG4 0 +#define PCIE0_BASE__INST4_SEG5 0 + +#define PCIE0_BASE__INST5_SEG0 0 +#define PCIE0_BASE__INST5_SEG1 0 +#define PCIE0_BASE__INST5_SEG2 0 +#define PCIE0_BASE__INST5_SEG3 0 +#define PCIE0_BASE__INST5_SEG4 0 +#define PCIE0_BASE__INST5_SEG5 0 + +#define PCIE0_BASE__INST6_SEG0 0 +#define PCIE0_BASE__INST6_SEG1 0 +#define PCIE0_BASE__INST6_SEG2 0 +#define PCIE0_BASE__INST6_SEG3 0 +#define PCIE0_BASE__INST6_SEG4 0 +#define PCIE0_BASE__INST6_SEG5 0 + +#define PCIE0_BASE__INST7_SEG0 0 +#define PCIE0_BASE__INST7_SEG1 0 +#define PCIE0_BASE__INST7_SEG2 0 +#define PCIE0_BASE__INST7_SEG3 0 +#define PCIE0_BASE__INST7_SEG4 0 +#define PCIE0_BASE__INST7_SEG5 0 + +#define SDMA0_BASE__INST0_SEG0 0x00001260 +#define SDMA0_BASE__INST0_SEG1 0x00012540 +#define SDMA0_BASE__INST0_SEG2 0x0040A800 +#define SDMA0_BASE__INST0_SEG3 0 +#define SDMA0_BASE__INST0_SEG4 0 +#define SDMA0_BASE__INST0_SEG5 0 + +#define SDMA0_BASE__INST1_SEG0 0 +#define SDMA0_BASE__INST1_SEG1 0 +#define SDMA0_BASE__INST1_SEG2 0 +#define SDMA0_BASE__INST1_SEG3 0 +#define SDMA0_BASE__INST1_SEG4 0 +#define SDMA0_BASE__INST1_SEG5 0 + +#define SDMA0_BASE__INST2_SEG0 0 +#define SDMA0_BASE__INST2_SEG1 0 +#define SDMA0_BASE__INST2_SEG2 0 +#define SDMA0_BASE__INST2_SEG3 0 +#define SDMA0_BASE__INST2_SEG4 0 +#define SDMA0_BASE__INST2_SEG5 0 + +#define SDMA0_BASE__INST3_SEG0 0 +#define SDMA0_BASE__INST3_SEG1 0 +#define SDMA0_BASE__INST3_SEG2 0 +#define SDMA0_BASE__INST3_SEG3 0 +#define SDMA0_BASE__INST3_SEG4 0 +#define SDMA0_BASE__INST3_SEG5 0 + +#define SDMA0_BASE__INST4_SEG0 0 +#define SDMA0_BASE__INST4_SEG1 0 +#define SDMA0_BASE__INST4_SEG2 0 +#define SDMA0_BASE__INST4_SEG3 0 +#define SDMA0_BASE__INST4_SEG4 0 +#define SDMA0_BASE__INST4_SEG5 0 + +#define SDMA0_BASE__INST5_SEG0 0 +#define SDMA0_BASE__INST5_SEG1 0 +#define SDMA0_BASE__INST5_SEG2 0 +#define SDMA0_BASE__INST5_SEG3 0 +#define SDMA0_BASE__INST5_SEG4 0 +#define SDMA0_BASE__INST5_SEG5 0 + +#define SDMA0_BASE__INST6_SEG0 0 +#define SDMA0_BASE__INST6_SEG1 0 +#define SDMA0_BASE__INST6_SEG2 0 +#define SDMA0_BASE__INST6_SEG3 0 +#define SDMA0_BASE__INST6_SEG4 0 +#define SDMA0_BASE__INST6_SEG5 0 + +#define SDMA1_BASE__INST0_SEG0 0x00001860 +#define SDMA1_BASE__INST0_SEG1 0x00012560 +#define SDMA1_BASE__INST0_SEG2 0x0040AC00 +#define SDMA1_BASE__INST0_SEG3 0 +#define SDMA1_BASE__INST0_SEG4 0 +#define SDMA1_BASE__INST0_SEG5 0 + +#define SDMA1_BASE__INST1_SEG0 0 +#define SDMA1_BASE__INST1_SEG1 0 +#define SDMA1_BASE__INST1_SEG2 0 +#define SDMA1_BASE__INST1_SEG3 0 +#define SDMA1_BASE__INST1_SEG4 0 +#define SDMA1_BASE__INST1_SEG5 0 + +#define SDMA1_BASE__INST2_SEG0 0 +#define SDMA1_BASE__INST2_SEG1 0 +#define SDMA1_BASE__INST2_SEG2 0 +#define SDMA1_BASE__INST2_SEG3 0 +#define SDMA1_BASE__INST2_SEG4 0 +#define SDMA1_BASE__INST2_SEG5 0 + +#define SDMA1_BASE__INST3_SEG0 0 +#define SDMA1_BASE__INST3_SEG1 0 +#define SDMA1_BASE__INST3_SEG2 0 +#define SDMA1_BASE__INST3_SEG3 0 +#define SDMA1_BASE__INST3_SEG4 0 +#define SDMA1_BASE__INST3_SEG5 0 + +#define SDMA1_BASE__INST4_SEG0 0 +#define SDMA1_BASE__INST4_SEG1 0 +#define SDMA1_BASE__INST4_SEG2 0 +#define SDMA1_BASE__INST4_SEG3 0 +#define SDMA1_BASE__INST4_SEG4 0 +#define SDMA1_BASE__INST4_SEG5 0 + +#define SDMA1_BASE__INST5_SEG0 0 +#define SDMA1_BASE__INST5_SEG1 0 +#define SDMA1_BASE__INST5_SEG2 0 +#define SDMA1_BASE__INST5_SEG3 0 +#define SDMA1_BASE__INST5_SEG4 0 +#define SDMA1_BASE__INST5_SEG5 0 + + +#define SDMA1_BASE__INST6_SEG0 0 +#define SDMA1_BASE__INST6_SEG1 0 +#define SDMA1_BASE__INST6_SEG2 0 +#define SDMA1_BASE__INST6_SEG3 0 +#define SDMA1_BASE__INST6_SEG4 0 +#define SDMA1_BASE__INST6_SEG5 0 + + +#define SDMA2_BASE__INST0_SEG0 0x00013760 +#define SDMA2_BASE__INST0_SEG1 0x0001E000 +#define SDMA2_BASE__INST0_SEG2 0x0042EC00 +#define SDMA2_BASE__INST0_SEG3 0 +#define SDMA2_BASE__INST0_SEG4 0 +#define SDMA2_BASE__INST0_SEG5 0 + + +#define SDMA2_BASE__INST1_SEG0 0 +#define SDMA2_BASE__INST1_SEG1 0 +#define SDMA2_BASE__INST1_SEG2 0 +#define SDMA2_BASE__INST1_SEG3 0 +#define SDMA2_BASE__INST1_SEG4 0 +#define SDMA2_BASE__INST1_SEG5 0 + +#define SDMA2_BASE__INST2_SEG0 0 +#define SDMA2_BASE__INST2_SEG1 0 +#define SDMA2_BASE__INST2_SEG2 0 +#define SDMA2_BASE__INST2_SEG3 0 +#define SDMA2_BASE__INST2_SEG4 0 +#define SDMA2_BASE__INST2_SEG5 0 + +#define SDMA2_BASE__INST3_SEG0 0 +#define SDMA2_BASE__INST3_SEG1 0 +#define SDMA2_BASE__INST3_SEG2 0 +#define SDMA2_BASE__INST3_SEG3 0 +#define SDMA2_BASE__INST3_SEG4 0 +#define SDMA2_BASE__INST3_SEG5 0 + +#define SDMA2_BASE__INST4_SEG0 0 +#define SDMA2_BASE__INST4_SEG1 0 +#define SDMA2_BASE__INST4_SEG2 0 +#define SDMA2_BASE__INST4_SEG3 0 +#define SDMA2_BASE__INST4_SEG4 0 +#define SDMA2_BASE__INST4_SEG5 0 + +#define SDMA2_BASE__INST5_SEG0 0 +#define SDMA2_BASE__INST5_SEG1 0 +#define SDMA2_BASE__INST5_SEG2 0 +#define SDMA2_BASE__INST5_SEG3 0 +#define SDMA2_BASE__INST5_SEG4 0 +#define SDMA2_BASE__INST5_SEG5 0 + +#define SDMA2_BASE__INST6_SEG0 0 +#define SDMA2_BASE__INST6_SEG1 0 +#define SDMA2_BASE__INST6_SEG2 0 +#define SDMA2_BASE__INST6_SEG3 0 +#define SDMA2_BASE__INST6_SEG4 0 +#define SDMA2_BASE__INST6_SEG5 0 + +#define SDMA3_BASE__INST0_SEG0 0x00013780 +#define SDMA3_BASE__INST0_SEG1 0x0001E400 +#define SDMA3_BASE__INST0_SEG2 0x0042F000 +#define SDMA3_BASE__INST0_SEG3 0 +#define SDMA3_BASE__INST0_SEG4 0 +#define SDMA3_BASE__INST0_SEG5 0 + +#define SDMA3_BASE__INST1_SEG0 0 +#define SDMA3_BASE__INST1_SEG1 0 +#define SDMA3_BASE__INST1_SEG2 0 +#define SDMA3_BASE__INST1_SEG3 0 +#define SDMA3_BASE__INST1_SEG4 0 +#define SDMA3_BASE__INST1_SEG5 0 + +#define SDMA3_BASE__INST2_SEG0 0 +#define SDMA3_BASE__INST2_SEG1 0 +#define SDMA3_BASE__INST2_SEG2 0 +#define SDMA3_BASE__INST2_SEG3 0 +#define SDMA3_BASE__INST2_SEG4 0 +#define SDMA3_BASE__INST2_SEG5 0 + +#define SDMA3_BASE__INST3_SEG0 0 +#define SDMA3_BASE__INST3_SEG1 0 +#define SDMA3_BASE__INST3_SEG2 0 +#define SDMA3_BASE__INST3_SEG3 0 +#define SDMA3_BASE__INST3_SEG4 0 +#define SDMA3_BASE__INST3_SEG5 0 + +#define SDMA3_BASE__INST4_SEG0 0 +#define SDMA3_BASE__INST4_SEG1 0 +#define SDMA3_BASE__INST4_SEG2 0 +#define SDMA3_BASE__INST4_SEG3 0 +#define SDMA3_BASE__INST4_SEG4 0 +#define SDMA3_BASE__INST4_SEG5 0 + +#define SDMA3_BASE__INST5_SEG0 0 +#define SDMA3_BASE__INST5_SEG1 0 +#define SDMA3_BASE__INST5_SEG2 0 +#define SDMA3_BASE__INST5_SEG3 0 +#define SDMA3_BASE__INST5_SEG4 0 +#define SDMA3_BASE__INST5_SEG5 0 + +#define SDMA3_BASE__INST6_SEG0 0 +#define SDMA3_BASE__INST6_SEG1 0 +#define SDMA3_BASE__INST6_SEG2 0 +#define SDMA3_BASE__INST6_SEG3 0 +#define SDMA3_BASE__INST6_SEG4 0 +#define SDMA3_BASE__INST6_SEG5 0 + +#define SDMA4_BASE__INST0_SEG0 0x000137A0 +#define SDMA4_BASE__INST0_SEG1 0x0001E800 +#define SDMA4_BASE__INST0_SEG2 0x0042F400 +#define SDMA4_BASE__INST0_SEG3 0 +#define SDMA4_BASE__INST0_SEG4 0 +#define SDMA4_BASE__INST0_SEG5 0 + +#define SDMA4_BASE__INST1_SEG0 0 +#define SDMA4_BASE__INST1_SEG1 0 +#define SDMA4_BASE__INST1_SEG2 0 +#define SDMA4_BASE__INST1_SEG3 0 +#define SDMA4_BASE__INST1_SEG4 0 +#define SDMA4_BASE__INST1_SEG5 0 + +#define SDMA4_BASE__INST2_SEG0 0 +#define SDMA4_BASE__INST2_SEG1 0 +#define SDMA4_BASE__INST2_SEG2 0 +#define SDMA4_BASE__INST2_SEG3 0 +#define SDMA4_BASE__INST2_SEG4 0 +#define SDMA4_BASE__INST2_SEG5 0 + +#define SDMA4_BASE__INST3_SEG0 0 +#define SDMA4_BASE__INST3_SEG1 0 +#define SDMA4_BASE__INST3_SEG2 0 +#define SDMA4_BASE__INST3_SEG3 0 +#define SDMA4_BASE__INST3_SEG4 0 +#define SDMA4_BASE__INST3_SEG5 0 + +#define SDMA4_BASE__INST4_SEG0 0 +#define SDMA4_BASE__INST4_SEG1 0 +#define SDMA4_BASE__INST4_SEG2 0 +#define SDMA4_BASE__INST4_SEG3 0 +#define SDMA4_BASE__INST4_SEG4 0 +#define SDMA4_BASE__INST4_SEG5 0 + +#define SDMA4_BASE__INST5_SEG0 0 +#define SDMA4_BASE__INST5_SEG1 0 +#define SDMA4_BASE__INST5_SEG2 0 +#define SDMA4_BASE__INST5_SEG3 0 +#define SDMA4_BASE__INST5_SEG4 0 +#define SDMA4_BASE__INST5_SEG5 0 + +#define SDMA4_BASE__INST6_SEG0 0 +#define SDMA4_BASE__INST6_SEG1 0 +#define SDMA4_BASE__INST6_SEG2 0 +#define SDMA4_BASE__INST6_SEG3 0 +#define SDMA4_BASE__INST6_SEG4 0 +#define SDMA4_BASE__INST6_SEG5 0 + +#define SDMA5_BASE__INST0_SEG0 0x000137C0 +#define SDMA5_BASE__INST0_SEG1 0x0001EC00 +#define SDMA5_BASE__INST0_SEG2 0x0042F800 +#define SDMA5_BASE__INST0_SEG3 0 +#define SDMA5_BASE__INST0_SEG4 0 +#define SDMA5_BASE__INST0_SEG5 0 + +#define SDMA5_BASE__INST1_SEG0 0 +#define SDMA5_BASE__INST1_SEG1 0 +#define SDMA5_BASE__INST1_SEG2 0 +#define SDMA5_BASE__INST1_SEG3 0 +#define SDMA5_BASE__INST1_SEG4 0 +#define SDMA5_BASE__INST1_SEG5 0 + +#define SDMA5_BASE__INST2_SEG0 0 +#define SDMA5_BASE__INST2_SEG1 0 +#define SDMA5_BASE__INST2_SEG2 0 +#define SDMA5_BASE__INST2_SEG3 0 +#define SDMA5_BASE__INST2_SEG4 0 +#define SDMA5_BASE__INST2_SEG5 0 + +#define SDMA5_BASE__INST3_SEG0 0 +#define SDMA5_BASE__INST3_SEG1 0 +#define SDMA5_BASE__INST3_SEG2 0 +#define SDMA5_BASE__INST3_SEG3 0 +#define SDMA5_BASE__INST3_SEG4 0 +#define SDMA5_BASE__INST3_SEG5 0 + +#define SDMA5_BASE__INST4_SEG0 0 +#define SDMA5_BASE__INST4_SEG1 0 +#define SDMA5_BASE__INST4_SEG2 0 +#define SDMA5_BASE__INST4_SEG3 0 +#define SDMA5_BASE__INST4_SEG4 0 +#define SDMA5_BASE__INST4_SEG5 0 + +#define SDMA5_BASE__INST5_SEG0 0 +#define SDMA5_BASE__INST5_SEG1 0 +#define SDMA5_BASE__INST5_SEG2 0 +#define SDMA5_BASE__INST5_SEG3 0 +#define SDMA5_BASE__INST5_SEG4 0 +#define SDMA5_BASE__INST5_SEG5 0 + +#define SDMA5_BASE__INST6_SEG0 0 +#define SDMA5_BASE__INST6_SEG1 0 +#define SDMA5_BASE__INST6_SEG2 0 +#define SDMA5_BASE__INST6_SEG3 0 +#define SDMA5_BASE__INST6_SEG4 0 +#define SDMA5_BASE__INST6_SEG5 0 + +#define SDMA6_BASE__INST0_SEG0 0x000137E0 +#define SDMA6_BASE__INST0_SEG1 0x0001F000 +#define SDMA6_BASE__INST0_SEG2 0x0042FC00 +#define SDMA6_BASE__INST0_SEG3 0 +#define SDMA6_BASE__INST0_SEG4 0 +#define SDMA6_BASE__INST0_SEG5 0 + +#define SDMA6_BASE__INST1_SEG0 0 +#define SDMA6_BASE__INST1_SEG1 0 +#define SDMA6_BASE__INST1_SEG2 0 +#define SDMA6_BASE__INST1_SEG3 0 +#define SDMA6_BASE__INST1_SEG4 0 +#define SDMA6_BASE__INST1_SEG5 0 + +#define SDMA6_BASE__INST2_SEG0 0 +#define SDMA6_BASE__INST2_SEG1 0 +#define SDMA6_BASE__INST2_SEG2 0 +#define SDMA6_BASE__INST2_SEG3 0 +#define SDMA6_BASE__INST2_SEG4 0 +#define SDMA6_BASE__INST2_SEG5 0 + +#define SDMA6_BASE__INST3_SEG0 0 +#define SDMA6_BASE__INST3_SEG1 0 +#define SDMA6_BASE__INST3_SEG2 0 +#define SDMA6_BASE__INST3_SEG3 0 +#define SDMA6_BASE__INST3_SEG4 0 +#define SDMA6_BASE__INST3_SEG5 0 + +#define SDMA6_BASE__INST4_SEG0 0 +#define SDMA6_BASE__INST4_SEG1 0 +#define SDMA6_BASE__INST4_SEG2 0 +#define SDMA6_BASE__INST4_SEG3 0 +#define SDMA6_BASE__INST4_SEG4 0 +#define SDMA6_BASE__INST4_SEG5 0 + +#define SDMA6_BASE__INST5_SEG0 0 +#define SDMA6_BASE__INST5_SEG1 0 +#define SDMA6_BASE__INST5_SEG2 0 +#define SDMA6_BASE__INST5_SEG3 0 +#define SDMA6_BASE__INST5_SEG4 0 +#define SDMA6_BASE__INST5_SEG5 0 + +#define SDMA6_BASE__INST6_SEG0 0 +#define SDMA6_BASE__INST6_SEG1 0 +#define SDMA6_BASE__INST6_SEG2 0 +#define SDMA6_BASE__INST6_SEG3 0 +#define SDMA6_BASE__INST6_SEG4 0 +#define SDMA6_BASE__INST6_SEG5 0 + +#define SDMA7_BASE__INST0_SEG0 0x00013800 +#define SDMA7_BASE__INST0_SEG1 0x0001F400 +#define SDMA7_BASE__INST0_SEG2 0x00430000 +#define SDMA7_BASE__INST0_SEG3 0 +#define SDMA7_BASE__INST0_SEG4 0 +#define SDMA7_BASE__INST0_SEG5 0 + +#define SDMA7_BASE__INST1_SEG0 0 +#define SDMA7_BASE__INST1_SEG1 0 +#define SDMA7_BASE__INST1_SEG2 0 +#define SDMA7_BASE__INST1_SEG3 0 +#define SDMA7_BASE__INST1_SEG4 0 +#define SDMA7_BASE__INST1_SEG5 0 + +#define SDMA7_BASE__INST2_SEG0 0 +#define SDMA7_BASE__INST2_SEG1 0 +#define SDMA7_BASE__INST2_SEG2 0 +#define SDMA7_BASE__INST2_SEG3 0 +#define SDMA7_BASE__INST2_SEG4 0 +#define SDMA7_BASE__INST2_SEG5 0 + +#define SDMA7_BASE__INST3_SEG0 0 +#define SDMA7_BASE__INST3_SEG1 0 +#define SDMA7_BASE__INST3_SEG2 0 +#define SDMA7_BASE__INST3_SEG3 0 +#define SDMA7_BASE__INST3_SEG4 0 +#define SDMA7_BASE__INST3_SEG5 0 + +#define SDMA7_BASE__INST4_SEG0 0 +#define SDMA7_BASE__INST4_SEG1 0 +#define SDMA7_BASE__INST4_SEG2 0 +#define SDMA7_BASE__INST4_SEG3 0 +#define SDMA7_BASE__INST4_SEG4 0 +#define SDMA7_BASE__INST4_SEG5 0 + +#define SDMA7_BASE__INST5_SEG0 0 +#define SDMA7_BASE__INST5_SEG1 0 +#define SDMA7_BASE__INST5_SEG2 0 +#define SDMA7_BASE__INST5_SEG3 0 +#define SDMA7_BASE__INST5_SEG4 0 +#define SDMA7_BASE__INST5_SEG5 0 + +#define SDMA7_BASE__INST6_SEG0 0 +#define SDMA7_BASE__INST6_SEG1 0 +#define SDMA7_BASE__INST6_SEG2 0 +#define SDMA7_BASE__INST6_SEG3 0 +#define SDMA7_BASE__INST6_SEG4 0 +#define SDMA7_BASE__INST6_SEG5 0 + +#define SMUIO_BASE__INST0_SEG0 0x00012080 +#define SMUIO_BASE__INST0_SEG1 0x00016800 +#define SMUIO_BASE__INST0_SEG2 0x00016A00 +#define SMUIO_BASE__INST0_SEG3 0x00401000 +#define SMUIO_BASE__INST0_SEG4 0x00440000 +#define SMUIO_BASE__INST0_SEG5 0 + +#define SMUIO_BASE__INST1_SEG0 0 +#define SMUIO_BASE__INST1_SEG1 0 +#define SMUIO_BASE__INST1_SEG2 0 +#define SMUIO_BASE__INST1_SEG3 0 +#define SMUIO_BASE__INST1_SEG4 0 +#define SMUIO_BASE__INST1_SEG5 0 + +#define SMUIO_BASE__INST2_SEG0 0 +#define SMUIO_BASE__INST2_SEG1 0 +#define SMUIO_BASE__INST2_SEG2 0 +#define SMUIO_BASE__INST2_SEG3 0 +#define SMUIO_BASE__INST2_SEG4 0 +#define SMUIO_BASE__INST2_SEG5 0 + +#define SMUIO_BASE__INST3_SEG0 0 +#define SMUIO_BASE__INST3_SEG1 0 +#define SMUIO_BASE__INST3_SEG2 0 +#define SMUIO_BASE__INST3_SEG3 0 +#define SMUIO_BASE__INST3_SEG4 0 +#define SMUIO_BASE__INST3_SEG5 0 + +#define SMUIO_BASE__INST4_SEG0 0 +#define SMUIO_BASE__INST4_SEG1 0 +#define SMUIO_BASE__INST4_SEG2 0 +#define SMUIO_BASE__INST4_SEG3 0 +#define SMUIO_BASE__INST4_SEG4 0 +#define SMUIO_BASE__INST4_SEG5 0 + +#define SMUIO_BASE__INST5_SEG0 0 +#define SMUIO_BASE__INST5_SEG1 0 +#define SMUIO_BASE__INST5_SEG2 0 +#define SMUIO_BASE__INST5_SEG3 0 +#define SMUIO_BASE__INST5_SEG4 0 +#define SMUIO_BASE__INST5_SEG5 0 + +#define SMUIO_BASE__INST6_SEG0 0 +#define SMUIO_BASE__INST6_SEG1 0 +#define SMUIO_BASE__INST6_SEG2 0 +#define SMUIO_BASE__INST6_SEG3 0 +#define SMUIO_BASE__INST6_SEG4 0 +#define SMUIO_BASE__INST6_SEG5 0 + +#define SMUIO_BASE__INST7_SEG0 0 +#define SMUIO_BASE__INST7_SEG1 0 +#define SMUIO_BASE__INST7_SEG2 0 +#define SMUIO_BASE__INST7_SEG3 0 +#define SMUIO_BASE__INST7_SEG4 0 +#define SMUIO_BASE__INST7_SEG5 0 + +#define THM_BASE__INST0_SEG0 0x00012060 +#define THM_BASE__INST0_SEG1 0x00016600 +#define THM_BASE__INST0_SEG2 0x00400C00 +#define THM_BASE__INST0_SEG3 0 +#define THM_BASE__INST0_SEG4 0 +#define THM_BASE__INST0_SEG5 0 + +#define THM_BASE__INST1_SEG0 0 +#define THM_BASE__INST1_SEG1 0 +#define THM_BASE__INST1_SEG2 0 +#define THM_BASE__INST1_SEG3 0 +#define THM_BASE__INST1_SEG4 0 +#define THM_BASE__INST1_SEG5 0 + +#define THM_BASE__INST2_SEG0 0 +#define THM_BASE__INST2_SEG1 0 +#define THM_BASE__INST2_SEG2 0 +#define THM_BASE__INST2_SEG3 0 +#define THM_BASE__INST2_SEG4 0 +#define THM_BASE__INST2_SEG5 0 + +#define THM_BASE__INST3_SEG0 0 +#define THM_BASE__INST3_SEG1 0 +#define THM_BASE__INST3_SEG2 0 +#define THM_BASE__INST3_SEG3 0 +#define THM_BASE__INST3_SEG4 0 +#define THM_BASE__INST3_SEG5 0 + +#define THM_BASE__INST4_SEG0 0 +#define THM_BASE__INST4_SEG1 0 +#define THM_BASE__INST4_SEG2 0 +#define THM_BASE__INST4_SEG3 0 +#define THM_BASE__INST4_SEG4 0 +#define THM_BASE__INST4_SEG5 0 + +#define THM_BASE__INST5_SEG0 0 +#define THM_BASE__INST5_SEG1 0 +#define THM_BASE__INST5_SEG2 0 +#define THM_BASE__INST5_SEG3 0 +#define THM_BASE__INST5_SEG4 0 +#define THM_BASE__INST5_SEG5 0 + +#define THM_BASE__INST6_SEG0 0 +#define THM_BASE__INST6_SEG1 0 +#define THM_BASE__INST6_SEG2 0 +#define THM_BASE__INST6_SEG3 0 +#define THM_BASE__INST6_SEG4 0 +#define THM_BASE__INST6_SEG5 0 + +#define THM_BASE__INST7_SEG0 0 +#define THM_BASE__INST7_SEG1 0 +#define THM_BASE__INST7_SEG2 0 +#define THM_BASE__INST7_SEG3 0 +#define THM_BASE__INST7_SEG4 0 +#define THM_BASE__INST7_SEG5 0 + +#define UMC_BASE__INST0_SEG0 0x000132C0 +#define UMC_BASE__INST0_SEG1 0x00014000 +#define UMC_BASE__INST0_SEG2 0x00425800 +#define UMC_BASE__INST0_SEG3 0 +#define UMC_BASE__INST0_SEG4 0 +#define UMC_BASE__INST0_SEG5 0 + +#define UMC_BASE__INST1_SEG0 0x000132E0 +#define UMC_BASE__INST1_SEG1 0x00054000 +#define UMC_BASE__INST1_SEG2 0x00425C00 +#define UMC_BASE__INST1_SEG3 0 +#define UMC_BASE__INST1_SEG4 0 +#define UMC_BASE__INST1_SEG5 0 + +#define UMC_BASE__INST2_SEG0 0x00013300 +#define UMC_BASE__INST2_SEG1 0x00094000 +#define UMC_BASE__INST2_SEG2 0x00426000 +#define UMC_BASE__INST2_SEG3 0 +#define UMC_BASE__INST2_SEG4 0 +#define UMC_BASE__INST2_SEG5 0 + +#define UMC_BASE__INST3_SEG0 0x00013320 +#define UMC_BASE__INST3_SEG1 0x000D4000 +#define UMC_BASE__INST3_SEG2 0x00426400 +#define UMC_BASE__INST3_SEG3 0 +#define UMC_BASE__INST3_SEG4 0 +#define UMC_BASE__INST3_SEG5 0 + +#define UMC_BASE__INST4_SEG0 0x00013340 +#define UMC_BASE__INST4_SEG1 0x00114000 +#define UMC_BASE__INST4_SEG2 0x00426800 +#define UMC_BASE__INST4_SEG3 0 +#define UMC_BASE__INST4_SEG4 0 +#define UMC_BASE__INST4_SEG5 0 + +#define UMC_BASE__INST5_SEG0 0x00013360 +#define UMC_BASE__INST5_SEG1 0x00154000 +#define UMC_BASE__INST5_SEG2 0x00426C00 +#define UMC_BASE__INST5_SEG3 0 +#define UMC_BASE__INST5_SEG4 0 +#define UMC_BASE__INST5_SEG5 0 + +#define UMC_BASE__INST6_SEG0 0x00013380 +#define UMC_BASE__INST6_SEG1 0x00194000 +#define UMC_BASE__INST6_SEG2 0x00427000 +#define UMC_BASE__INST6_SEG3 0 +#define UMC_BASE__INST6_SEG4 0 +#define UMC_BASE__INST6_SEG5 0 + +#define UMC_BASE__INST7_SEG0 0x000133A0 +#define UMC_BASE__INST7_SEG1 0x001D4000 +#define UMC_BASE__INST7_SEG2 0x00427400 +#define UMC_BASE__INST7_SEG3 0 +#define UMC_BASE__INST7_SEG4 0 +#define UMC_BASE__INST7_SEG5 0 + +#define UVD_BASE__INST0_SEG0 0x00007800 +#define UVD_BASE__INST0_SEG1 0x00007E00 +#define UVD_BASE__INST0_SEG2 0x00012180 +#define UVD_BASE__INST0_SEG3 0x00403000 +#define UVD_BASE__INST0_SEG4 0 +#define UVD_BASE__INST0_SEG5 0 + +#define UVD_BASE__INST1_SEG0 0x00007A00 +#define UVD_BASE__INST1_SEG1 0x00009000 +#define UVD_BASE__INST1_SEG2 0x000136E0 +#define UVD_BASE__INST1_SEG3 0x0042DC00 +#define UVD_BASE__INST1_SEG4 0 +#define UVD_BASE__INST1_SEG5 0 + +#define UVD_BASE__INST2_SEG0 0 +#define UVD_BASE__INST2_SEG1 0 +#define UVD_BASE__INST2_SEG2 0 +#define UVD_BASE__INST2_SEG3 0 +#define UVD_BASE__INST2_SEG4 0 +#define UVD_BASE__INST2_SEG5 0 + +#define UVD_BASE__INST3_SEG0 0 +#define UVD_BASE__INST3_SEG1 0 +#define UVD_BASE__INST3_SEG2 0 +#define UVD_BASE__INST3_SEG3 0 +#define UVD_BASE__INST3_SEG4 0 +#define UVD_BASE__INST3_SEG5 0 + +#define UVD_BASE__INST4_SEG0 0 +#define UVD_BASE__INST4_SEG1 0 +#define UVD_BASE__INST4_SEG2 0 +#define UVD_BASE__INST4_SEG3 0 +#define UVD_BASE__INST4_SEG4 0 +#define UVD_BASE__INST4_SEG5 0 + +#define UVD_BASE__INST5_SEG0 0 +#define UVD_BASE__INST5_SEG1 0 +#define UVD_BASE__INST5_SEG2 0 +#define UVD_BASE__INST5_SEG3 0 +#define UVD_BASE__INST5_SEG4 0 +#define UVD_BASE__INST5_SEG5 0 + +#define UVD_BASE__INST6_SEG0 0 +#define UVD_BASE__INST6_SEG1 0 +#define UVD_BASE__INST6_SEG2 0 +#define UVD_BASE__INST6_SEG3 0 +#define UVD_BASE__INST6_SEG4 0 +#define UVD_BASE__INST6_SEG5 0 + +#define UVD_BASE__INST7_SEG0 0 +#define UVD_BASE__INST7_SEG1 0 +#define UVD_BASE__INST7_SEG2 0 +#define UVD_BASE__INST7_SEG3 0 +#define UVD_BASE__INST7_SEG4 0 +#define UVD_BASE__INST7_SEG5 0 + +#define DBGU_IO_BASE__INST0_SEG0 0x000001E0 +#define DBGU_IO_BASE__INST0_SEG1 0x000125A0 +#define DBGU_IO_BASE__INST0_SEG2 0x0040B400 +#define DBGU_IO_BASE__INST0_SEG3 0 +#define DBGU_IO_BASE__INST0_SEG4 0 +#define DBGU_IO_BASE__INST0_SEG5 0 + +#define DBGU_IO_BASE__INST1_SEG0 0 +#define DBGU_IO_BASE__INST1_SEG1 0 +#define DBGU_IO_BASE__INST1_SEG2 0 +#define DBGU_IO_BASE__INST1_SEG3 0 +#define DBGU_IO_BASE__INST1_SEG4 0 +#define DBGU_IO_BASE__INST1_SEG5 0 + +#define DBGU_IO_BASE__INST2_SEG0 0 +#define DBGU_IO_BASE__INST2_SEG1 0 +#define DBGU_IO_BASE__INST2_SEG2 0 +#define DBGU_IO_BASE__INST2_SEG3 0 +#define DBGU_IO_BASE__INST2_SEG4 0 +#define DBGU_IO_BASE__INST2_SEG5 0 + +#define DBGU_IO_BASE__INST3_SEG0 0 +#define DBGU_IO_BASE__INST3_SEG1 0 +#define DBGU_IO_BASE__INST3_SEG2 0 +#define DBGU_IO_BASE__INST3_SEG3 0 +#define DBGU_IO_BASE__INST3_SEG4 0 +#define DBGU_IO_BASE__INST3_SEG5 0 + +#define DBGU_IO_BASE__INST4_SEG0 0 +#define DBGU_IO_BASE__INST4_SEG1 0 +#define DBGU_IO_BASE__INST4_SEG2 0 +#define DBGU_IO_BASE__INST4_SEG3 0 +#define DBGU_IO_BASE__INST4_SEG4 0 +#define DBGU_IO_BASE__INST4_SEG5 0 + +#define DBGU_IO_BASE__INST5_SEG0 0 +#define DBGU_IO_BASE__INST5_SEG1 0 +#define DBGU_IO_BASE__INST5_SEG2 0 +#define DBGU_IO_BASE__INST5_SEG3 0 +#define DBGU_IO_BASE__INST5_SEG4 0 +#define DBGU_IO_BASE__INST5_SEG5 0 + +#define DBGU_IO_BASE__INST6_SEG0 0 +#define DBGU_IO_BASE__INST6_SEG1 0 +#define DBGU_IO_BASE__INST6_SEG2 0 +#define DBGU_IO_BASE__INST6_SEG3 0 +#define DBGU_IO_BASE__INST6_SEG4 0 +#define DBGU_IO_BASE__INST6_SEG5 0 + +#define DBGU_IO_BASE__INST7_SEG0 0 +#define DBGU_IO_BASE__INST7_SEG1 0 +#define DBGU_IO_BASE__INST7_SEG2 0 +#define DBGU_IO_BASE__INST7_SEG3 0 +#define DBGU_IO_BASE__INST7_SEG4 0 +#define DBGU_IO_BASE__INST7_SEG5 0 + +#define RSMU_BASE__INST0_SEG0 0x00012000 +#define RSMU_BASE__INST0_SEG1 0 +#define RSMU_BASE__INST0_SEG2 0 +#define RSMU_BASE__INST0_SEG3 0 +#define RSMU_BASE__INST0_SEG4 0 +#define RSMU_BASE__INST0_SEG5 0 + +#define RSMU_BASE__INST1_SEG0 0 +#define RSMU_BASE__INST1_SEG1 0 +#define RSMU_BASE__INST1_SEG2 0 +#define RSMU_BASE__INST1_SEG3 0 +#define RSMU_BASE__INST1_SEG4 0 +#define RSMU_BASE__INST1_SEG5 0 + +#define RSMU_BASE__INST2_SEG0 0 +#define RSMU_BASE__INST2_SEG1 0 +#define RSMU_BASE__INST2_SEG2 0 +#define RSMU_BASE__INST2_SEG3 0 +#define RSMU_BASE__INST2_SEG4 0 +#define RSMU_BASE__INST2_SEG5 0 + +#define RSMU_BASE__INST3_SEG0 0 +#define RSMU_BASE__INST3_SEG1 0 +#define RSMU_BASE__INST3_SEG2 0 +#define RSMU_BASE__INST3_SEG3 0 +#define RSMU_BASE__INST3_SEG4 0 +#define RSMU_BASE__INST3_SEG5 0 + +#define RSMU_BASE__INST4_SEG0 0 +#define RSMU_BASE__INST4_SEG1 0 +#define RSMU_BASE__INST4_SEG2 0 +#define RSMU_BASE__INST4_SEG3 0 +#define RSMU_BASE__INST4_SEG4 0 +#define RSMU_BASE__INST4_SEG5 0 + +#define RSMU_BASE__INST5_SEG0 0 +#define RSMU_BASE__INST5_SEG1 0 +#define RSMU_BASE__INST5_SEG2 0 +#define RSMU_BASE__INST5_SEG3 0 +#define RSMU_BASE__INST5_SEG4 0 +#define RSMU_BASE__INST5_SEG5 0 + +#define RSMU_BASE__INST6_SEG0 0 +#define RSMU_BASE__INST6_SEG1 0 +#define RSMU_BASE__INST6_SEG2 0 +#define RSMU_BASE__INST6_SEG3 0 +#define RSMU_BASE__INST6_SEG4 0 +#define RSMU_BASE__INST6_SEG5 0 + +#define RSMU_BASE__INST7_SEG0 0 +#define RSMU_BASE__INST7_SEG1 0 +#define RSMU_BASE__INST7_SEG2 0 +#define RSMU_BASE__INST7_SEG3 0 +#define RSMU_BASE__INST7_SEG4 0 +#define RSMU_BASE__INST7_SEG5 0 + + +#endif -- cgit v1.2.3 From d6c3b24ea28ddae06c9ff9fe9ae58664144d7ae8 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 9 Jul 2019 09:16:01 -0500 Subject: drm/amdgpu: add Arcturus asic type Add asic type for Arcturus. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/drm/amd_asic_type.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6aa3c3e5bd50..c1edf105c660 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -99,6 +99,7 @@ static const char *amdgpu_asic_name[] = { "VEGA12", "VEGA20", "RAVEN", + "ARCTURUS", "NAVI10", "NAVI14", "LAST", diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 0c4766af04af..0f5a12a99948 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -49,6 +49,7 @@ enum amd_asic_type { CHIP_VEGA12, CHIP_VEGA20, CHIP_RAVEN, + CHIP_ARCTURUS, CHIP_NAVI10, CHIP_NAVI14, CHIP_LAST, -- cgit v1.2.3 From 3de2ff5d60d0695568dc8333947b0446a35d49d8 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 4 Sep 2018 14:52:25 +0800 Subject: drm/amdgpu: add gmc basic support for Arcturus Add initial GMC support for Arcturus Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 73f3b79ab131..d1ede86cbdb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -887,6 +887,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) case CHIP_VEGA10: /* all engines support GPUVM */ case CHIP_VEGA12: /* all engines support GPUVM */ case CHIP_VEGA20: + case CHIP_ARCTURUS: default: adev->gmc.gart_size = 512ULL << 20; break; @@ -1002,6 +1003,10 @@ static int gmc_v9_0_sw_init(void *handle) else amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; + case CHIP_ARCTURUS: + /* Keep the vm size same with Vega20 */ + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); + break; default: break; } -- cgit v1.2.3 From a2d15ed733365ba2e0e3eb55a83c7a493eaaaa2c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 16 Jul 2019 13:29:19 -0500 Subject: drm/amdgpu: rename AMDGPU_GFXHUB/MMHUB macro with hub number The number of GFXHUB/MMHUB may be expanded in later ASICs. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 30 +++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 6 +++--- 16 files changed, 46 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 24c3c05e2fb7..4726b5176417 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -3060,12 +3060,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* current, we only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); + r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); if (r) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: - amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); + amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 489a162ca620..8e78b81d0a05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -101,8 +101,8 @@ struct amdgpu_bo_list_entry; /* max number of VMHUB */ #define AMDGPU_MAX_VMHUBS 2 -#define AMDGPU_GFXHUB 0 -#define AMDGPU_MMHUB 1 +#define AMDGPU_GFXHUB_0 0 +#define AMDGPU_MMHUB_0 1 /* hardcode that limit for now */ #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index ec11bfded772..5b5ea9b41c12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1603,7 +1603,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { nv_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG); @@ -5005,7 +5005,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB, + .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v10_0_ring_get_rptr_gfx, .get_wptr = gfx_v10_0_ring_get_wptr_gfx, .set_wptr = gfx_v10_0_ring_set_wptr_gfx, @@ -5056,7 +5056,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB, + .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v10_0_ring_get_rptr_compute, .get_wptr = gfx_v10_0_ring_get_wptr_compute, .set_wptr = gfx_v10_0_ring_set_wptr_compute, @@ -5089,7 +5089,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB, + .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v10_0_ring_get_rptr_compute, .get_wptr = gfx_v10_0_ring_get_wptr_compute, .set_wptr = gfx_v10_0_ring_set_wptr_compute, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f4c4eea62526..eca9ea779649 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1936,7 +1936,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ mutex_lock(&adev->srbm_mutex); - for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { + for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) { soc15_grbm_select(adev, 0, 0, 0, i); /* CP and shaders */ if (i == 0) { @@ -5174,7 +5174,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB, + .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v9_0_ring_get_rptr_gfx, .get_wptr = gfx_v9_0_ring_get_wptr_gfx, .set_wptr = gfx_v9_0_ring_set_wptr_gfx, @@ -5225,7 +5225,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB, + .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v9_0_ring_get_rptr_compute, .get_wptr = gfx_v9_0_ring_get_wptr_compute, .set_wptr = gfx_v9_0_ring_set_wptr_compute, @@ -5260,7 +5260,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { .align_mask = 0xff, .nop = PACKET3(PACKET3_NOP, 0x3FFF), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB, + .vmhub = AMDGPU_GFXHUB_0, .get_rptr = gfx_v9_0_ring_get_rptr_compute, .get_wptr = gfx_v9_0_ring_get_wptr_compute, .set_wptr = gfx_v9_0_ring_set_wptr_compute, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 15986748f59f..6ce37ce77d14 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -357,7 +357,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, void gfxhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index d605b4963f8a..8ce5bf5feb45 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -333,7 +333,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, void gfxhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(GC, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 8a1e23c6eee6..f52823ffc7fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -62,7 +62,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_vmhub *hub; u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i; - bits[AMDGPU_GFXHUB] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | @@ -70,7 +70,7 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; - bits[AMDGPU_MMHUB] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | + bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | @@ -81,39 +81,39 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB]; + hub = &adev->vmhub[AMDGPU_MMHUB_0]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); - tmp &= ~bits[AMDGPU_MMHUB]; + tmp &= ~bits[AMDGPU_MMHUB_0]; WREG32(reg, tmp); } /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB]; + hub = &adev->vmhub[AMDGPU_GFXHUB_0]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); - tmp &= ~bits[AMDGPU_GFXHUB]; + tmp &= ~bits[AMDGPU_GFXHUB_0]; WREG32(reg, tmp); } break; case AMDGPU_IRQ_STATE_ENABLE: /* MM HUB */ - hub = &adev->vmhub[AMDGPU_MMHUB]; + hub = &adev->vmhub[AMDGPU_MMHUB_0]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); - tmp |= bits[AMDGPU_MMHUB]; + tmp |= bits[AMDGPU_MMHUB_0]; WREG32(reg, tmp); } /* GFX HUB */ - hub = &adev->vmhub[AMDGPU_GFXHUB]; + hub = &adev->vmhub[AMDGPU_GFXHUB_0]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; tmp = RREG32(reg); - tmp |= bits[AMDGPU_GFXHUB]; + tmp |= bits[AMDGPU_GFXHUB_0]; WREG32(reg, tmp); } break; @@ -244,11 +244,11 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, mutex_lock(&adev->mman.gtt_window_lock); - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB, 0); + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0); if (!adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready || adev->in_gpu_reset) { - gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB, 0); + gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0); mutex_unlock(&adev->mman.gtt_window_lock); return; } @@ -313,7 +313,7 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid struct amdgpu_device *adev = ring->adev; uint32_t reg; - if (ring->funcs->vmhub == AMDGPU_GFXHUB) + if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -682,8 +682,8 @@ static int gmc_v10_0_sw_init(void *handle) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; - adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; amdgpu_vm_manager_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index d1ede86cbdb0..ad45d633b147 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -480,7 +480,7 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, struct amdgpu_device *adev = ring->adev; uint32_t reg; - if (ring->funcs->vmhub == AMDGPU_GFXHUB) + if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid; @@ -1082,8 +1082,8 @@ static int gmc_v9_0_sw_init(void *handle) * amdgpu graphics/compute will use VMIDs 1-7 * amdkfd will use VMIDs 8-15 */ - adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS; - adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; amdgpu_vm_manager_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index dc5ce03034d3..3abd02bd5222 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -407,7 +407,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) void mmhub_v1_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index a5c7ed1f37eb..d2f4775299c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -324,7 +324,7 @@ void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) void mmhub_v2_0_init(struct amdgpu_device *adev) { - struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB]; + struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; hub->ctx0_ptb_addr_lo32 = SOC15_REG_OFFSET(MMHUB, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4428018672d3..4654846fb580 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2133,7 +2133,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { .align_mask = 0xf, .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = sdma_v4_0_ring_get_rptr, .get_wptr = sdma_v4_0_ring_get_wptr, .set_wptr = sdma_v4_0_ring_set_wptr, @@ -2165,7 +2165,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = { .align_mask = 0xf, .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = sdma_v4_0_ring_get_rptr, .get_wptr = sdma_v4_0_page_ring_get_wptr, .set_wptr = sdma_v4_0_page_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 89893261f145..3e536140bfd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1554,7 +1554,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .align_mask = 0xf, .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), .support_64bit_ptrs = true, - .vmhub = AMDGPU_GFXHUB, + .vmhub = AMDGPU_GFXHUB_0, .get_rptr = sdma_v5_0_ring_get_rptr, .get_wptr = sdma_v5_0_ring_get_wptr, .set_wptr = sdma_v5_0_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index a6bfe7651d07..01f658fa72c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1763,7 +1763,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { .align_mask = 0xf, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = uvd_v7_0_ring_get_rptr, .get_wptr = uvd_v7_0_ring_get_wptr, .set_wptr = uvd_v7_0_ring_set_wptr, @@ -1796,7 +1796,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { .nop = HEVC_ENC_CMD_NO_OP, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = uvd_v7_0_enc_ring_get_rptr, .get_wptr = uvd_v7_0_enc_ring_get_wptr, .set_wptr = uvd_v7_0_enc_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index eafbe8d8248d..683701cf7270 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -1070,7 +1070,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { .nop = VCE_CMD_NO_OP, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = vce_v4_0_ring_get_rptr, .get_wptr = vce_v4_0_ring_get_wptr, .set_wptr = vce_v4_0_ring_set_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index dde22b7d140d..916e32533c1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -2198,7 +2198,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .align_mask = 0xf, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v1_0_dec_ring_get_rptr, .get_wptr = vcn_v1_0_dec_ring_get_wptr, .set_wptr = vcn_v1_0_dec_ring_set_wptr, @@ -2232,7 +2232,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .nop = VCN_ENC_CMD_NO_OP, .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v1_0_enc_ring_get_rptr, .get_wptr = vcn_v1_0_enc_ring_get_wptr, .set_wptr = vcn_v1_0_enc_ring_set_wptr, @@ -2264,7 +2264,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .no_user_fence = true, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .extra_dw = 64, .get_rptr = vcn_v1_0_jpeg_ring_get_rptr, .get_wptr = vcn_v1_0_jpeg_ring_get_wptr, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 988c0adaca91..c701868dd57f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -2131,7 +2131,7 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = { static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v2_0_dec_ring_get_rptr, .get_wptr = vcn_v2_0_dec_ring_get_wptr, .set_wptr = vcn_v2_0_dec_ring_set_wptr, @@ -2162,7 +2162,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_ENC, .align_mask = 0x3f, .nop = VCN_ENC_CMD_NO_OP, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v2_0_enc_ring_get_rptr, .get_wptr = vcn_v2_0_enc_ring_get_wptr, .set_wptr = vcn_v2_0_enc_ring_set_wptr, @@ -2191,7 +2191,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_JPEG, .align_mask = 0xf, - .vmhub = AMDGPU_MMHUB, + .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v2_0_jpeg_ring_get_rptr, .get_wptr = vcn_v2_0_jpeg_ring_get_wptr, .set_wptr = vcn_v2_0_jpeg_ring_set_wptr, -- cgit v1.2.3 From 1daa2bfa17aa5ace20bb02509cd3d2c2c0afc642 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 31 Aug 2018 14:17:28 +0800 Subject: drm/amdgpu: add new member in amdgpu_device for vmhub counts per asic chip It aims to replace AMDGPU_MAX_VMHUBS in for loop to initialize registers. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 +++++++--- 3 files changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ca82fef421e1..e41f489a8dc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -836,6 +836,7 @@ struct amdgpu_device { dma_addr_t dummy_page_addr; struct amdgpu_vm_manager vm_manager; struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; + unsigned num_vmhubs; /* memory management */ struct amdgpu_mman mman; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index f52823ffc7fd..0fd85cb15322 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -603,6 +603,7 @@ static int gmc_v10_0_sw_init(void *handle) switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + adev->num_vmhubs = 2; /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Navi10/Navi14, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index ad45d633b147..2afc37237ad3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -284,7 +284,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { + for (j = 0; j < adev->num_vmhubs; j++) { hub = &adev->vmhub[j]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; @@ -295,7 +295,7 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, } break; case AMDGPU_IRQ_STATE_ENABLE: - for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) { + for (j = 0; j < adev->num_vmhubs; j++) { hub = &adev->vmhub[j]; for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + i; @@ -419,7 +419,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, const unsigned eng = 17; unsigned i, j; - for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { + for (i = 0; i < adev->num_vmhubs; ++i) { struct amdgpu_vmhub *hub = &adev->vmhub[i]; u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); @@ -980,6 +980,8 @@ static int gmc_v9_0_sw_init(void *handle) adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); switch (adev->asic_type) { case CHIP_RAVEN: + adev->num_vmhubs = 2; + if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); } else { @@ -992,6 +994,8 @@ static int gmc_v9_0_sw_init(void *handle) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: + adev->num_vmhubs = 2; + /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Vega10, -- cgit v1.2.3 From c8a6e2a3170064c1f476407139c3dd97d9a9087c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 31 Aug 2018 14:46:47 +0800 Subject: drm/amdgpu: add one more mmhub instance for Arcturus (v2) v2: set mmhub num under CHIP_ARCTURUS switch case and add one more mmhub id_mgr Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 8e78b81d0a05..42daf8877c3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -100,9 +100,10 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 /* max number of VMHUB */ -#define AMDGPU_MAX_VMHUBS 2 +#define AMDGPU_MAX_VMHUBS 3 #define AMDGPU_GFXHUB_0 0 #define AMDGPU_MMHUB_0 1 +#define AMDGPU_MMHUB_1 2 /* hardcode that limit for now */ #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 2afc37237ad3..08cdb5ff7228 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -641,7 +641,8 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) { struct amdgpu_ring *ring; unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = - {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP}; + {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, + GFXHUB_FREE_VM_INV_ENGS_BITMAP}; unsigned i; unsigned vmhub, inv_eng; @@ -1008,6 +1009,8 @@ static int gmc_v9_0_sw_init(void *handle) amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; case CHIP_ARCTURUS: + adev->num_vmhubs = 3; + /* Keep the vm size same with Vega20 */ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; @@ -1088,6 +1091,7 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS; + adev->vm_manager.id_mgr[AMDGPU_MMHUB_1].num_ids = AMDGPU_NUM_OF_VMIDS; amdgpu_vm_manager_init(adev); -- cgit v1.2.3 From 2cb2ea1e073fb8d8c1850383b70b78e69ef8b3cc Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 9 Jul 2019 09:18:03 -0500 Subject: drm/amdgpu: add mmhub v9.4.1 block for Arcturus (v2) Arcturus as an updated mmhub block. mmhub is the memory controller hub used for sdma and multimedia. v2: squash in AGP BAR programming (Alex) Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 504 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h | 33 +++ 3 files changed, 538 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c create mode 100644 drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 3f5329906fce..7b6fb0e9ee76 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -77,7 +77,7 @@ amdgpu-y += \ amdgpu-y += \ gmc_v7_0.o \ gmc_v8_0.o \ - gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o \ + gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o # add IH block diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c new file mode 100644 index 000000000000..aa9b43b6ba6b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -0,0 +1,504 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "mmhub_v9_4.h" + +#include "mmhub/mmhub_9_4_1_offset.h" +#include "mmhub/mmhub_9_4_1_sh_mask.h" +#include "mmhub/mmhub_9_4_1_default.h" +#include "athub/athub_1_0_offset.h" +#include "athub/athub_1_0_sh_mask.h" +#include "vega10_enum.h" + +#include "soc15_common.h" + +#define MMHUB_NUM_INSTANCES 2 +#define MMHUB_INSTANCE_REGISTER_OFFSET 0x3000 + +u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) +{ + /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */ + u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE); + + base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; + base <<= 24; + + return base; +} + +static void mmhub_v9_4_init_gart_pt_regs(struct amdgpu_device *adev, int hubid) +{ + uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + lower_32_bits(value)); + + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + upper_32_bits(value)); + +} + +static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, + int hubid) +{ + mmhub_v9_4_init_gart_pt_regs(adev, hubid); + + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(adev->gmc.gart_start >> 12)); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(adev->gmc.gart_start >> 44)); + + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(adev->gmc.gart_end >> 12)); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(adev->gmc.gart_end >> 44)); +} + +static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, + int hubid) +{ + uint64_t value; + uint32_t tmp; + + /* Program the AGP BAR */ + WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BASE, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_TOP, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + adev->gmc.agp_end >> 24); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_AGP_BOT, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + adev->gmc.agp_start >> 24); + + /* Program the system aperture low logical page number. */ + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + min(adev->gmc.vram_start, adev->gmc.agp_start) + >> 18); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); + + /* Set default page address. */ + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + + adev->vm_manager.vram_base_offset; + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(value >> 12)); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(value >> 44)); + + /* Program "protection fault". */ + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)(adev->dummy_page_addr >> 12)); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + (u32)((u64)adev->dummy_page_addr >> 44)); + + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); +} + +static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) +{ + uint32_t tmp; + + /* Setup TLB control */ + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); + + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + ENABLE_L1_TLB, 1); + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + SYSTEM_ACCESS_MODE, 3); + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 1); + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + ECO_BITS, 0); + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + MTYPE, MTYPE_UC);/* XXX for emulation. */ + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + ATC_EN, 1); + + WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); +} + +static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid) +{ + uint32_t tmp; + + /* Setup L2 cache */ + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, + ENABLE_L2_CACHE, 1); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, + ENABLE_L2_FRAGMENT_PROCESSING, 1); + /* XXX for emulation, Refer to closed source code.*/ + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, + L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, + PDE_FAULT_CLASSIFICATION, 1); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, + CONTEXT1_IDENTITY_ACCESS_MODE, 1); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, + IDENTITY_MODE_FRAGMENT_SIZE, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2, + INVALIDATE_ALL_L1_TLBS, 1); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL2, + INVALIDATE_L2_CACHE, 1); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL2, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + + tmp = mmVML2PF0_VM_L2_CNTL3_DEFAULT; + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + + tmp = mmVML2PF0_VM_L2_CNTL4_DEFAULT; + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4, + VMC_TAP_PDE_REQUEST_PHYSICAL, 0); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL4, + VMC_TAP_PTE_REQUEST_PHYSICAL, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL4, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); +} + +static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev, + int hubid) +{ + uint32_t tmp; + + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); +} + +static void mmhub_v9_4_disable_identity_aperture(struct amdgpu_device *adev, + int hubid) +{ + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0XFFFFFFFF); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0x0000000F); + + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0); + + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 0); +} + +static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid) +{ + uint32_t tmp; + int i; + + for (i = 0; i <= 14; i++) { + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + ENABLE_CONTEXT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + PAGE_TABLE_DEPTH, + adev->vm_manager.num_level); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + PAGE_TABLE_BLOCK_SIZE, + adev->vm_manager.block_size - 9); + /* Send no-retry XNACK on fault to suppress VM fault storm. */ + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i, + tmp); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + i*2, + upper_32_bits(adev->vm_manager.max_pfn - 1)); + } +} + +static void mmhub_v9_4_program_invalidation(struct amdgpu_device *adev, + int hubid) +{ + unsigned i; + + for (i = 0; i < 18; ++i) { + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i, + 0xffffffff); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + hubid * MMHUB_INSTANCE_REGISTER_OFFSET + 2 * i, + 0x1f); + } +} + +int mmhub_v9_4_gart_enable(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { + if (amdgpu_sriov_vf(adev)) { + /* + * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase + * they are VF copy registers so vbios post doesn't + * program them, for SRIOV driver need to program them + */ + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE, + i * MMHUB_INSTANCE_REGISTER_OFFSET, + adev->gmc.vram_start >> 24); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP, + i * MMHUB_INSTANCE_REGISTER_OFFSET, + adev->gmc.vram_end >> 24); + } + + /* GART Enable. */ + mmhub_v9_4_init_gart_aperture_regs(adev, i); + mmhub_v9_4_init_system_aperture_regs(adev, i); + mmhub_v9_4_init_tlb_regs(adev, i); + mmhub_v9_4_init_cache_regs(adev, i); + + mmhub_v9_4_enable_system_domain(adev, i); + mmhub_v9_4_disable_identity_aperture(adev, i); + mmhub_v9_4_setup_vmid_config(adev, i); + mmhub_v9_4_program_invalidation(adev, i); + } + + return 0; +} + +void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) +{ + u32 tmp; + u32 i, j; + + for (j = 0; j < MMHUB_NUM_INSTANCES; j++) { + /* Disable all tables */ + for (i = 0; i < 16; i++) + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_CNTL, + j * MMHUB_INSTANCE_REGISTER_OFFSET + + i, 0); + + /* Setup TLB control */ + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + j * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + ENABLE_L1_TLB, 0); + tmp = REG_SET_FIELD(tmp, + VMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + ENABLE_ADVANCED_DRIVER_MODEL, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVMSHAREDVC0_MC_VM_MX_L1_TLB_CNTL, + j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + + /* Setup L2 cache */ + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL, + j * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, + ENABLE_L2_CACHE, 0); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL, + j * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_CNTL3, + j * MMHUB_INSTANCE_REGISTER_OFFSET, 0); + } +} + +/** + * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling + * + * @adev: amdgpu_device pointer + * @value: true redirects VM faults to the default page + */ +void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, bool value) +{ + u32 tmp; + int i; + + for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { + tmp = RREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + i * MMHUB_INSTANCE_REGISTER_OFFSET); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, + VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + NACK_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + VALID_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + READ_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, + value); + if (!value) { + tmp = REG_SET_FIELD(tmp, + VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_NO_RETRY_FAULT, 1); + tmp = REG_SET_FIELD(tmp, + VML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + CRASH_ON_RETRY_FAULT, 1); + } + + WREG32_SOC15_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL, + i * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); + } +} + +void mmhub_v9_4_init(struct amdgpu_device *adev) +{ + struct amdgpu_vmhub *hub[MMHUB_NUM_INSTANCES] = + {&adev->vmhub[AMDGPU_MMHUB_0], &adev->vmhub[AMDGPU_MMHUB_1]}; + int i; + + for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { + hub[i]->ctx0_ptb_addr_lo32 = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->ctx0_ptb_addr_hi32 = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_inv_eng0_req = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_REQ) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_inv_eng0_ack = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_ACK) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_context0_cntl = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_CONTEXT0_CNTL) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_l2_pro_fault_status = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_STATUS) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_l2_pro_fault_cntl = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h new file mode 100644 index 000000000000..9ba3dd808826 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h @@ -0,0 +1,33 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __MMHUB_V9_4_H__ +#define __MMHUB_V9_4_H__ + +u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev); +int mmhub_v9_4_gart_enable(struct amdgpu_device *adev); +void mmhub_v9_4_gart_disable(struct amdgpu_device *adev); +void mmhub_v9_4_set_fault_enable_default(struct amdgpu_device *adev, + bool value); +void mmhub_v9_4_init(struct amdgpu_device *adev); + +#endif -- cgit v1.2.3 From 51cce480fda970118dd661ce6e0715f3269cbc31 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 4 Sep 2018 15:29:52 +0800 Subject: drm/amdgpu: use new mmhub interfaces for Arcturus Arcturus has two MMHUBs. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 08cdb5ff7228..3ce10e8ebefa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -48,6 +48,7 @@ #include "gfxhub_v1_0.h" #include "mmhub_v1_0.h" #include "gfxhub_v1_1.h" +#include "mmhub_v9_4.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" @@ -807,8 +808,12 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { u64 base = 0; - if (!amdgpu_sriov_vf(adev)) - base = mmhub_v1_0_get_fb_location(adev); + if (!amdgpu_sriov_vf(adev)) { + if (adev->asic_type == CHIP_ARCTURUS) + base = mmhub_v9_4_get_fb_location(adev); + else + base = mmhub_v1_0_get_fb_location(adev); + } /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; amdgpu_gmc_vram_location(adev, mc, base); @@ -974,7 +979,10 @@ static int gmc_v9_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v1_0_init(adev); - mmhub_v1_0_init(adev); + if (adev->asic_type == CHIP_ARCTURUS) + mmhub_v9_4_init(adev); + else + mmhub_v1_0_init(adev); spin_lock_init(&adev->gmc.invalidate_lock); @@ -1194,7 +1202,10 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - r = mmhub_v1_0_gart_enable(adev); + if (adev->asic_type == CHIP_ARCTURUS) + r = mmhub_v9_4_gart_enable(adev); + else + r = mmhub_v1_0_gart_enable(adev); if (r) return r; @@ -1215,7 +1226,10 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) value = true; gfxhub_v1_0_set_fault_enable_default(adev, value); - mmhub_v1_0_set_fault_enable_default(adev, value); + if (adev->asic_type == CHIP_ARCTURUS) + mmhub_v9_4_set_fault_enable_default(adev, value); + else + mmhub_v1_0_set_fault_enable_default(adev, value); gmc_v9_0_flush_gpu_tlb(adev, 0, 0); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -1256,7 +1270,10 @@ static int gmc_v9_0_hw_init(void *handle) static void gmc_v9_0_gart_disable(struct amdgpu_device *adev) { gfxhub_v1_0_gart_disable(adev); - mmhub_v1_0_gart_disable(adev); + if (adev->asic_type == CHIP_ARCTURUS) + mmhub_v9_4_gart_disable(adev); + else + mmhub_v1_0_gart_disable(adev); amdgpu_gart_table_vram_unpin(adev); } @@ -1321,6 +1338,9 @@ static int gmc_v9_0_set_clockgating_state(void *handle, { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->asic_type == CHIP_ARCTURUS) + return 0; + return mmhub_v1_0_set_clockgating(adev, state); } @@ -1328,6 +1348,9 @@ static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->asic_type == CHIP_ARCTURUS) + return; + mmhub_v1_0_get_clockgating(adev, flags); } -- cgit v1.2.3 From 8024f1d5e180e2a41121e7e6ac1b79a400430a8c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 3 Sep 2018 19:27:52 +0800 Subject: drm/amdgpu: add SDMA 2~7 interrupt client id for Arcturus Add new client ids. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/soc15_ih_clientid.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h index 12e196c15bbe..5b40b9d1b788 100644 --- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h +++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h @@ -63,7 +63,13 @@ enum soc15_ih_clientid { SOC15_IH_CLIENTID_MAX, - SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD + SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD, + SOC15_IH_CLIENTID_SDMA2 = SOC15_IH_CLIENTID_ACP, + SOC15_IH_CLIENTID_SDMA3 = SOC15_IH_CLIENTID_DCE, + SOC15_IH_CLIENTID_SDMA4 = SOC15_IH_CLIENTID_ISP, + SOC15_IH_CLIENTID_SDMA5 = SOC15_IH_CLIENTID_VCE0, + SOC15_IH_CLIENTID_SDMA6 = SOC15_IH_CLIENTID_XDMA, + SOC15_IH_CLIENTID_SDMA7 = SOC15_IH_CLIENTID_VCE1, }; #endif -- cgit v1.2.3 From fa5d2e6f0ab273e55786991699127334bf8ef034 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 4 Sep 2018 13:36:22 +0800 Subject: drm/amdgpu: add SDMA 2~7 ip block type Add IP block type. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e41f489a8dc2..ceb725a571a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -712,6 +712,12 @@ enum amd_hw_ip_block_type { HDP_HWIP, SDMA0_HWIP, SDMA1_HWIP, + SDMA2_HWIP, + SDMA3_HWIP, + SDMA4_HWIP, + SDMA5_HWIP, + SDMA6_HWIP, + SDMA7_HWIP, MMHUB_HWIP, ATHUB_HWIP, NBIO_HWIP, -- cgit v1.2.3 From 113b47e78067db00cc449b6666916715d02ed01c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 9 Jul 2019 09:20:24 -0500 Subject: drm/amdgpu: increase max number of ip base instances to 8 For Arcturus, the number of IP base instances is 8. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ceb725a571a7..c4e04d14a1ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -737,7 +737,7 @@ enum amd_hw_ip_block_type { MAX_HWIP }; -#define HWIP_MAX_INSTANCE 7 +#define HWIP_MAX_INSTANCE 8 struct amd_powerplay { void *pp_handle; -- cgit v1.2.3 From e78705ec5a7f607a3340c71a104b0d1c327958a4 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 9 Jul 2019 09:21:53 -0500 Subject: drm/amdgpu: dynamically initialize IP offset for Arcturus Add support for the IP offsets on Arcturus. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/arct_reg_init.c | 57 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + 4 files changed, 63 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/arct_reg_init.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7b6fb0e9ee76..97671df6a5f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -66,7 +66,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ - vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o + vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ + arct_reg_init.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c new file mode 100644 index 000000000000..51b8cdffb196 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c @@ -0,0 +1,57 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "arct_ip_offset.h" + +int arct_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the block needed by our driver */ + uint32_t i; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i])); + adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(SDMA2_BASE.instance[i])); + adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(SDMA3_BASE.instance[i])); + adev->reg_offset[SDMA4_HWIP][i] = (uint32_t *)(&(SDMA4_BASE.instance[i])); + adev->reg_offset[SDMA5_HWIP][i] = (uint32_t *)(&(SDMA5_BASE.instance[i])); + adev->reg_offset[SDMA6_HWIP][i] = (uint32_t *)(&(SDMA6_BASE.instance[i])); + adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + } + return 0; +} + + diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 23265414d448..fbc067786b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -586,6 +586,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_VEGA20: vega20_reg_base_init(adev); break; + case CHIP_ARCTURUS: + arct_reg_base_init(adev); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index 7a6b2cc6d9f5..a3dde0c31f57 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -77,6 +77,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); +int arct_reg_base_init(struct amdgpu_device *adev); void vega10_doorbell_index_init(struct amdgpu_device *adev); void vega20_doorbell_index_init(struct amdgpu_device *adev); -- cgit v1.2.3 From 7d19b15f7013c9a9ebdcf253eb016ff37752ecc9 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 6 Sep 2018 17:34:06 +0800 Subject: drm/amdgpu: add VMC1 interrupt client id for Arcturus New IH client id for VMC1. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 +++++++ drivers/gpu/drm/amd/include/soc15_ih_clientid.h | 1 + 2 files changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3ce10e8ebefa..ad387529446e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1032,6 +1032,13 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; + if (adev->asic_type == CHIP_ARCTURUS) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT, + &adev->gmc.vm_fault); + if (r) + return r; + } + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT, &adev->gmc.vm_fault); diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h index 5b40b9d1b788..0f386b2e1f4f 100644 --- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h +++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h @@ -70,6 +70,7 @@ enum soc15_ih_clientid { SOC15_IH_CLIENTID_SDMA5 = SOC15_IH_CLIENTID_VCE0, SOC15_IH_CLIENTID_SDMA6 = SOC15_IH_CLIENTID_XDMA, SOC15_IH_CLIENTID_SDMA7 = SOC15_IH_CLIENTID_VCE1, + SOC15_IH_CLIENTID_VMC1 = SOC15_IH_CLIENTID_PCIE0, }; #endif -- cgit v1.2.3 From 51c608984fd5eed89bb9a8bd49133cde405892da Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 6 Sep 2018 19:37:51 +0800 Subject: drm/amdgpu: update vmc interrupt routine to support 3 vmhubs There is one more vmc interrupt and mmhub on Arcturus. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index ad387529446e..75801178efa1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -316,10 +316,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src]; + struct amdgpu_vmhub *hub; bool retry_fault = !!(entry->src_data[1] & 0x80); uint32_t status = 0; u64 addr; + char hub_name[10]; addr = (u64)entry->src_data[0] << 12; addr |= ((u64)entry->src_data[1] & 0xf) << 44; @@ -328,6 +329,17 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, entry->timestamp)) return 1; /* This also prevents sending it to KFD */ + if (entry->client_id == SOC15_IH_CLIENTID_VMC) { + snprintf(hub_name, sizeof(hub_name), "mmhub0"); + hub = &adev->vmhub[AMDGPU_MMHUB_0]; + } else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) { + snprintf(hub_name, sizeof(hub_name), "mmhub1"); + hub = &adev->vmhub[AMDGPU_MMHUB_1]; + } else { + snprintf(hub_name, sizeof(hub_name), "gfxhub0"); + hub = &adev->vmhub[AMDGPU_GFXHUB_0]; + } + /* If it's the first fault for this address, process it normally */ if (!amdgpu_sriov_vf(adev)) { status = RREG32(hub->vm_l2_pro_fault_status); @@ -343,8 +355,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, "[%s] %s page fault (src_id:%u ring:%u vmid:%u " "pasid:%u, for process %s pid %d thread %s pid %d)\n", - entry->vmid_src ? "mmhub" : "gfxhub", - retry_fault ? "retry" : "no-retry", + hub_name, retry_fault ? "retry" : "no-retry", entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, task_info.task_name, task_info.pid); -- cgit v1.2.3 From 667a48226edfbf4b0b8d1b61255105918fea4af7 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 5 Sep 2018 16:21:20 +0800 Subject: drm/amdgpu: reorganize sdma v4 code to support more instances This change is needed for Arcturus which has 8 sdma instances. The CG/PG part is not covered for now. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 8 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 144 +++++++++++++++++++++---------- 2 files changed, 107 insertions(+), 45 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 35dd152f9d5c..a9ae0d8a0589 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -25,11 +25,17 @@ #define __AMDGPU_SDMA_H__ /* max number of IP instances */ -#define AMDGPU_MAX_SDMA_INSTANCES 2 +#define AMDGPU_MAX_SDMA_INSTANCES 8 enum amdgpu_sdma_irq { AMDGPU_SDMA_IRQ_INSTANCE0 = 0, AMDGPU_SDMA_IRQ_INSTANCE1, + AMDGPU_SDMA_IRQ_INSTANCE2, + AMDGPU_SDMA_IRQ_INSTANCE3, + AMDGPU_SDMA_IRQ_INSTANCE4, + AMDGPU_SDMA_IRQ_INSTANCE5, + AMDGPU_SDMA_IRQ_INSTANCE6, + AMDGPU_SDMA_IRQ_INSTANCE7, AMDGPU_SDMA_IRQ_LAST }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4654846fb580..6ec699294a50 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -205,8 +205,77 @@ static const struct soc15_reg_golden golden_settings_sdma_rv2[] = static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) { - return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) : - (adev->reg_offset[SDMA1_HWIP][0][0] + offset)); + switch (instance) { + case 0: + return (adev->reg_offset[SDMA0_HWIP][0][0] + offset); + case 1: + return (adev->reg_offset[SDMA1_HWIP][0][0] + offset); + case 2: + return (adev->reg_offset[SDMA2_HWIP][0][0] + offset); + case 3: + return (adev->reg_offset[SDMA3_HWIP][0][0] + offset); + case 4: + return (adev->reg_offset[SDMA4_HWIP][0][0] + offset); + case 5: + return (adev->reg_offset[SDMA5_HWIP][0][0] + offset); + case 6: + return (adev->reg_offset[SDMA6_HWIP][0][0] + offset); + case 7: + return (adev->reg_offset[SDMA7_HWIP][0][0] + offset); + default: + break; + } + return 0; +} + +static unsigned sdma_v4_0_seq_to_irq_id(int seq_num) +{ + switch (seq_num) { + case 0: + return SOC15_IH_CLIENTID_SDMA0; + case 1: + return SOC15_IH_CLIENTID_SDMA1; + case 2: + return SOC15_IH_CLIENTID_SDMA2; + case 3: + return SOC15_IH_CLIENTID_SDMA3; + case 4: + return SOC15_IH_CLIENTID_SDMA4; + case 5: + return SOC15_IH_CLIENTID_SDMA5; + case 6: + return SOC15_IH_CLIENTID_SDMA6; + case 7: + return SOC15_IH_CLIENTID_SDMA7; + default: + break; + } + return 0; +} + +static int sdma_v4_0_irq_id_to_seq(unsigned client_id) +{ + switch (client_id) { + case SOC15_IH_CLIENTID_SDMA0: + return 0; + case SOC15_IH_CLIENTID_SDMA1: + return 1; + case SOC15_IH_CLIENTID_SDMA2: + return 2; + case SOC15_IH_CLIENTID_SDMA3: + return 3; + case SOC15_IH_CLIENTID_SDMA4: + return 4; + case SOC15_IH_CLIENTID_SDMA5: + return 5; + case SOC15_IH_CLIENTID_SDMA6: + return 6; + case SOC15_IH_CLIENTID_SDMA7: + return 7; + default: + break; + } + return 0; } static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) @@ -308,7 +377,7 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) if (i == 0) snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); else - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i); err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); if (err) goto out; @@ -620,26 +689,27 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se */ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; + struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 rb_cntl, ib_cntl; - int i; + int i, unset = 0; - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) + for (i = 0; i < adev->sdma.num_instances; i++) { + sdma[i] = &adev->sdma.instance[i].ring; + + if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) { amdgpu_ttm_set_buffer_funcs_status(adev, false); + unset = 1; + } - for (i = 0; i < adev->sdma.num_instances; i++) { rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); - } - sdma0->sched.ready = false; - sdma1->sched.ready = false; + sdma[i]->sched.ready = false; + } } /** @@ -1606,16 +1676,13 @@ static int sdma_v4_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* SDMA trap event */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_TRAP, - &adev->sdma.trap_irq); - if (r) - return r; - - /* SDMA trap event */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_TRAP, - &adev->sdma.trap_irq); - if (r) - return r; + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_TRAP, + &adev->sdma.trap_irq); + if (r) + return r; + } /* SDMA SRAM ECC event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC, @@ -1641,11 +1708,8 @@ static int sdma_v4_0_sw_init(void *handle) ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1; sprintf(ring->name, "sdma%d", i); - r = amdgpu_ring_init(adev, ring, 1024, - &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); if (r) return r; @@ -1776,15 +1840,17 @@ static bool sdma_v4_0_is_idle(void *handle) static int sdma_v4_0_wait_for_idle(void *handle) { - unsigned i; - u32 sdma0, sdma1; + unsigned i, j; + u32 sdma[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->usec_timeout; i++) { - sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG); - sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG); - - if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) + for (j = 0; j < adev->sdma.num_instances; j++) { + sdma[j] = RREG32_SDMA(j, mmSDMA0_STATUS_REG); + if (!(sdma[j] & SDMA0_STATUS_REG__IDLE_MASK)) + break; + } + if (j == adev->sdma.num_instances) return 0; udelay(1); } @@ -1820,17 +1886,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, uint32_t instance; DRM_DEBUG("IH: SDMA trap\n"); - switch (entry->client_id) { - case SOC15_IH_CLIENTID_SDMA0: - instance = 0; - break; - case SOC15_IH_CLIENTID_SDMA1: - instance = 1; - break; - default: - return 0; - } - + instance = sdma_v4_0_irq_id_to_seq(entry->client_id); switch (entry->ring_id) { case 0: amdgpu_fence_process(&adev->sdma.instance[instance].ring); -- cgit v1.2.3 From b482a134ade1a3a69eaf77d3c792706d90596b35 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 10 Sep 2018 14:26:44 +0800 Subject: drm/amdgpu: specify sdma instance 5~7 with second mmhub type On Arcturus, sdma instance 5~7 is connected to the second mmhub. The vmhub type in amdgpu_ring_funcs is constant, so we create an individual amdgpu_ring_funcs with different vmhub type(AMDGPU_MMHUB_1) for these sdma instances. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 43 +++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 6ec699294a50..10e885331023 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2216,6 +2216,42 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; +/* + * On Arcturus, SDMA instance 5~7 has a different vmhub type(AMDGPU_MMHUB_1). + * So create a individual constant ring_funcs for those instances. + */ +static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs_2nd_mmhub = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = sdma_v4_0_ring_get_rptr, + .get_wptr = sdma_v4_0_ring_get_wptr, + .set_wptr = sdma_v4_0_ring_set_wptr, + .emit_frame_size = + 6 + /* sdma_v4_0_ring_emit_hdp_flush */ + 3 + /* hdp invalidate */ + 6 + /* sdma_v4_0_ring_emit_pipeline_sync */ + /* sdma_v4_0_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ + .emit_ib = sdma_v4_0_ring_emit_ib, + .emit_fence = sdma_v4_0_ring_emit_fence, + .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, + .test_ring = sdma_v4_0_ring_test_ring, + .test_ib = sdma_v4_0_ring_test_ib, + .insert_nop = sdma_v4_0_ring_insert_nop, + .pad_ib = sdma_v4_0_ring_pad_ib, + .emit_wreg = sdma_v4_0_ring_emit_wreg, + .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = { .type = AMDGPU_RING_TYPE_SDMA, .align_mask = 0xf, @@ -2253,7 +2289,12 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->sdma.num_instances; i++) { - adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; + if (adev->asic_type == CHIP_ARCTURUS && i >=5) + adev->sdma.instance[i].ring.funcs = + &sdma_v4_0_ring_funcs_2nd_mmhub; + else + adev->sdma.instance[i].ring.funcs = + &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.me = i; if (adev->sdma.has_page_queue) { adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; -- cgit v1.2.3 From 0fe6a7b49f61c4ae692c790dd7afc47841314f0e Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 10 Sep 2018 19:51:07 +0800 Subject: drm/amdgpu: support hdp flush for more sdma instances The bit RSVD_ENG0 to RSVD_ENG5 in GPU_HDP_FLUSH_REQ/GPU_HDP_FLUSH_DONE can be leveraged for sdma instance 2~7 to poll register/memory. Signed-off-by: Le Ma Acked-by: Snow Zhang < Snow.Zhang@amd.com> Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++++ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 17 +++++++++++++++++ drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 5 +---- 3 files changed, 24 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c4e04d14a1ee..b601da7eb97c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -648,6 +648,12 @@ struct nbio_hdp_flush_reg { u32 ref_and_mask_cp9; u32 ref_and_mask_sdma0; u32 ref_and_mask_sdma1; + u32 ref_and_mask_sdma2; + u32 ref_and_mask_sdma3; + u32 ref_and_mask_sdma4; + u32 ref_and_mask_sdma5; + u32 ref_and_mask_sdma6; + u32 ref_and_mask_sdma7; }; struct amdgpu_mmio_remap { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index bfaaa327ae3c..2e1098c5c32b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -31,6 +31,17 @@ #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c +/* + * These are nbio v7_4_1 registers mask. Temporarily define these here since + * nbio v7_4_1 header is incomplete. + */ +#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L +#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L + static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, @@ -220,6 +231,12 @@ static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK, .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, + .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK, + .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, + .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, + .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, + .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, + .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, }; static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 10e885331023..647a4e916ec5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -630,10 +630,7 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) u32 ref_and_mask = 0; const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; - if (ring->me == 0) - ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; - else - ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; + ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; sdma_v4_0_wait_reg_mem(ring, 0, 1, adev->nbio_funcs->get_hdp_flush_done_offset(adev), -- cgit v1.2.3 From 0e54df05724e8ba7476681405f38e23f2277edd1 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 11:07:09 +0800 Subject: drm/amdgpu/soc15: add Arcturus common ip blocks Add common IP blocks for Arcturus. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index fbc067786b2a..54d407f0d04f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -598,12 +598,13 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) adev->nbio_funcs = &nbio_v7_0_funcs; - else if (adev->asic_type == CHIP_VEGA20) + else if (adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS) adev->nbio_funcs = &nbio_v7_4_funcs; else adev->nbio_funcs = &nbio_v6_1_funcs; - if (adev->asic_type == CHIP_VEGA20) + if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->df_funcs = &df_v3_6_funcs; else adev->df_funcs = &df_v1_7_funcs; @@ -675,6 +676,15 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) #endif amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); break; + case CHIP_ARCTURUS: + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + break; default: return -EINVAL; } @@ -1001,6 +1011,10 @@ static int soc15_common_early_init(void *handle) AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_RLC_SMU_HS; break; + case CHIP_ARCTURUS: + adev->cg_flags = 0; + adev->pg_flags = 0; + break; default: /* FIXME: not supported yet */ return -EINVAL; -- cgit v1.2.3 From 61cf44c1dbb66c4f653de76a3740f732168658cc Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 11:20:18 +0800 Subject: drm/amdgpu: add to set Arcturus ip blocks Add IP blocks for Arcturus. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c1edf105c660..3b94eb04fb33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1535,6 +1535,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: if (adev->asic_type == CHIP_RAVEN) adev->family = AMDGPU_FAMILY_RV; else -- cgit v1.2.3 From 7fafd613c03b044883b1b538c57a4149260215a8 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 11:35:34 +0800 Subject: drm/amdgpu: set Arcturus fw load type as direct We currently only support direct firmware loading. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 9e99736aa984..df5ebf72a979 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -356,6 +356,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; + case CHIP_ARCTURUS: + return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); } -- cgit v1.2.3 From 9f6ef81620f7e82c748ee0c48271f220d3f108fb Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 9 Jul 2019 09:23:11 -0500 Subject: drm/amdgpu/dce_virtual: add Arcturus virtual display support Virtual dce is a sw only display driver for emulation and virtualization and cases where we want to use a virtual display subsystem. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 30a08f8ea4bc..ee2210b8712c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -457,6 +457,7 @@ static int dce_virtual_hw_init(void *handle) case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: + case CHIP_ARCTURUS: case CHIP_NAVI10: case CHIP_NAVI14: break; -- cgit v1.2.3 From 84519350a73ec6d98f3d5f43c8a90f95e88b7f67 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 11:58:48 +0800 Subject: drm/amdgpu: add support for Arcturus firmware Add support for Arcturus gfx firmwares. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index eca9ea779649..36043f51470c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -104,6 +104,13 @@ MODULE_FIRMWARE("amdgpu/raven2_mec2.bin"); MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ce.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_pfp.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_me.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_mec.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_9_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), @@ -645,6 +652,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) else chip_name = "raven"; break; + case CHIP_ARCTURUS: + chip_name = "arcturus"; + break; default: BUG(); } -- cgit v1.2.3 From 6155e98ac018b886b31f8468380f2acd0fceb390 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 12:11:36 +0800 Subject: drm/amdgpu: add gfx config for Arcturus Add Arcturus GFX config. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 36043f51470c..c40dd7d665fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1387,6 +1387,16 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) else gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_ARCTURUS: + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); + gb_addr_config &= ~0xf3e777ff; + gb_addr_config |= 0x22014042; + break; default: BUG(); break; -- cgit v1.2.3 From 763bee0fb3a9d1e51fe843b504fd659101c14101 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 12:13:41 +0800 Subject: drm/amdgpu: add number of mec for Arcturus MEC is the CP compute microcontroller. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c40dd7d665fe..dc08213ee04a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1673,6 +1673,7 @@ static int gfx_v9_0_sw_init(void *handle) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: adev->gfx.mec.num_mec = 2; break; default: -- cgit v1.2.3 From 2564444f8cbb29e0dabadd067e48c5c68540880d Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 12:14:37 +0800 Subject: drm/amdgpu: add to set rlc funcs for Arcturus Shared with other gfx9 parts so use the same functions. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index dc08213ee04a..728d121df697 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5361,6 +5361,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; default: -- cgit v1.2.3 From 2065aa5494e465c0efb20fffd607db6cbfa74067 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 12:33:11 +0800 Subject: drm/amdgpu: skip to get 3D engine clockgating state for Arcturus It's because Arcturus has not 3D engine. Signed-off-by: Le Ma Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 728d121df697..a421a84dcafc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4342,14 +4342,16 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; - /* AMD_CG_SUPPORT_GFX_3D_CGCG */ - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); - if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) - *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; - - /* AMD_CG_SUPPORT_GFX_3D_CGLS */ - if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) - *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; + if (adev->asic_type != CHIP_ARCTURUS) { + /* AMD_CG_SUPPORT_GFX_3D_CGCG */ + data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); + if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; + + /* AMD_CG_SUPPORT_GFX_3D_CGLS */ + if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; + } } static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) -- cgit v1.2.3 From f2d6657111ed5c065b1f934b548a279205c3d370 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 11 Sep 2018 13:11:28 +0800 Subject: drm/amdgpu: skip pasid mapping for second mmhub on Arcturus There's no LUT register for second mmhub to convert pasid since it has no ATC. Signed-off-by: Le Ma Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 75801178efa1..a05040249a50 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -492,6 +492,10 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, struct amdgpu_device *adev = ring->adev; uint32_t reg; + /* Do nothing because there's no lut register for mmhub1. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_1) + return; + if (ring->funcs->vmhub == AMDGPU_GFXHUB_0) reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid; else -- cgit v1.2.3 From 65e60f6e06e55ea214d9f56a68f93a6f7c4b8455 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 9 Jul 2019 09:30:19 -0500 Subject: drm/amdgpu: add Arcturus gpu info firmware Add GPU info firmware for Arcturus. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3b94eb04fb33..14a9169446f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -70,6 +70,7 @@ MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); @@ -1387,6 +1388,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) else chip_name = "raven"; break; + case CHIP_ARCTURUS: + chip_name = "arcturus"; + break; case CHIP_NAVI10: chip_name = "navi10"; break; -- cgit v1.2.3 From 24c44c891782088a2d98a3883838186add8e2511 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 18 Sep 2018 16:11:44 +0800 Subject: drm/amdgpu: optimize gfx9 init_microcode function Split each type of firmware into single function for easy to maintain. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 247 +++++++++++++++++++++------------- 1 file changed, 153 insertions(+), 94 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index a421a84dcafc..609678d2d264 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -617,47 +617,14 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) } } -static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) +static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev, + const char *chip_name) { - const char *chip_name; char fw_name[30]; int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; const struct gfx_firmware_header_v1_0 *cp_hdr; - const struct rlc_firmware_header_v2_0 *rlc_hdr; - unsigned int *tmp = NULL; - unsigned int i = 0; - uint16_t version_major; - uint16_t version_minor; - uint32_t smu_version; - - DRM_DEBUG("\n"); - - switch (adev->asic_type) { - case CHIP_VEGA10: - chip_name = "vega10"; - break; - case CHIP_VEGA12: - chip_name = "vega12"; - break; - case CHIP_VEGA20: - chip_name = "vega20"; - break; - case CHIP_RAVEN: - if (adev->rev_id >= 8) - chip_name = "raven2"; - else if (adev->pdev->device == 0x15d8) - chip_name = "picasso"; - else - chip_name = "raven"; - break; - case CHIP_ARCTURUS: - chip_name = "arcturus"; - break; - default: - BUG(); - } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); @@ -692,6 +659,58 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; + info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; + info->fw = adev->gfx.pfp_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; + info->ucode_id = AMDGPU_UCODE_ID_CP_ME; + info->fw = adev->gfx.me_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; + info->ucode_id = AMDGPU_UCODE_ID_CP_CE; + info->fw = adev->gfx.ce_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } + +out: + if (err) { + dev_err(adev->dev, + "gfx9: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + } + return err; +} + +static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, + const char *chip_name) +{ + char fw_name[30]; + int err; + struct amdgpu_firmware_info *info = NULL; + const struct common_firmware_header *header = NULL; + const struct rlc_firmware_header_v2_0 *rlc_hdr; + unsigned int *tmp = NULL; + unsigned int i = 0; + uint16_t version_major; + uint16_t version_minor; + uint32_t smu_version; + /* * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin * instead of picasso_rlc.bin. @@ -766,57 +785,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) if (adev->gfx.rlc.is_rlc_v2_1) gfx_v9_0_init_rlc_ext_microcode(adev); - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); - err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->gfx.mec_fw); - if (err) - goto out; - cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); - adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); - err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); - if (!err) { - err = amdgpu_ucode_validate(adev->gfx.mec2_fw); - if (err) - goto out; - cp_hdr = (const struct gfx_firmware_header_v1_0 *) - adev->gfx.mec2_fw->data; - adev->gfx.mec2_fw_version = - le32_to_cpu(cp_hdr->header.ucode_version); - adev->gfx.mec2_feature_version = - le32_to_cpu(cp_hdr->ucode_feature_version); - } else { - err = 0; - adev->gfx.mec2_fw = NULL; - } - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; - info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; - info->fw = adev->gfx.pfp_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; - info->ucode_id = AMDGPU_UCODE_ID_CP_ME; - info->fw = adev->gfx.me_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; - info->ucode_id = AMDGPU_UCODE_ID_CP_CE; - info->fw = adev->gfx.ce_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; info->ucode_id = AMDGPU_UCODE_ID_RLC_G; info->fw = adev->gfx.rlc_fw; @@ -846,7 +815,58 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); } + } + +out: + if (err) { + dev_err(adev->dev, + "gfx9: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + } + return err; +} + +static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, + const char *chip_name) +{ + char fw_name[30]; + int err; + struct amdgpu_firmware_info *info = NULL; + const struct common_firmware_header *header = NULL; + const struct gfx_firmware_header_v1_0 *cp_hdr; + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); + err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->gfx.mec_fw); + if (err) + goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); + err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); + if (!err) { + err = amdgpu_ucode_validate(adev->gfx.mec2_fw); + if (err) + goto out; + cp_hdr = (const struct gfx_firmware_header_v1_0 *) + adev->gfx.mec2_fw->data; + adev->gfx.mec2_fw_version = + le32_to_cpu(cp_hdr->header.ucode_version); + adev->gfx.mec2_feature_version = + le32_to_cpu(cp_hdr->ucode_feature_version); + } else { + err = 0; + adev->gfx.mec2_fw = NULL; + } + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; info->fw = adev->gfx.mec_fw; @@ -875,7 +895,6 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); } - } out: @@ -885,14 +904,6 @@ out: dev_err(adev->dev, "gfx9: Failed to load firmware \"%s\"\n", fw_name); - release_firmware(adev->gfx.pfp_fw); - adev->gfx.pfp_fw = NULL; - release_firmware(adev->gfx.me_fw); - adev->gfx.me_fw = NULL; - release_firmware(adev->gfx.ce_fw); - adev->gfx.ce_fw = NULL; - release_firmware(adev->gfx.rlc_fw); - adev->gfx.rlc_fw = NULL; release_firmware(adev->gfx.mec_fw); adev->gfx.mec_fw = NULL; release_firmware(adev->gfx.mec2_fw); @@ -901,6 +912,54 @@ out: return err; } +static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + int r; + + DRM_DEBUG("\n"); + + switch (adev->asic_type) { + case CHIP_VEGA10: + chip_name = "vega10"; + break; + case CHIP_VEGA12: + chip_name = "vega12"; + break; + case CHIP_VEGA20: + chip_name = "vega20"; + break; + case CHIP_RAVEN: + if (adev->rev_id >= 8) + chip_name = "raven2"; + else if (adev->pdev->device == 0x15d8) + chip_name = "picasso"; + else + chip_name = "raven"; + break; + break; + case CHIP_ARCTURUS: + chip_name = "arcturus"; + break; + default: + BUG(); + } + + r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name); + if (r) + return r; + + r = gfx_v9_0_init_rlc_microcode(adev, chip_name); + if (r) + return r; + + r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name); + if (r) + return r; + + return r; +} + static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) { u32 count = 0; -- cgit v1.2.3 From f8b733b9d4a316122378861526aeb90d3144c83e Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 18 Sep 2018 17:04:42 +0800 Subject: drm/amdgpu: skip load cp gfx firmware for Arcturus Arcturus has no CPG component any more. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 609678d2d264..8f466282c6c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -945,9 +945,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) BUG(); } - r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name); - if (r) - return r; + /* No CPG in Arcturus */ + if (adev->asic_type != CHIP_ARCTURUS) { + r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name); + if (r) + return r; + } r = gfx_v9_0_init_rlc_microcode(adev, chip_name); if (r) @@ -3323,10 +3326,12 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) gfx_v9_0_enable_gui_idle_interrupt(adev, false); if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - /* legacy firmware loading */ - r = gfx_v9_0_cp_gfx_load_microcode(adev); - if (r) - return r; + if (adev->asic_type != CHIP_ARCTURUS) { + /* legacy firmware loading */ + r = gfx_v9_0_cp_gfx_load_microcode(adev); + if (r) + return r; + } r = gfx_v9_0_cp_compute_load_microcode(adev); if (r) -- cgit v1.2.3 From f5cdc2da460267fe234f8b5246896bea62c0db4a Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 18 Sep 2018 17:39:59 +0800 Subject: drm/amdgpu: skip all gfx ring settings for Arcturus Not needed on Arcturus. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 39 ++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8f466282c6c5..d8143b1e635a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3342,18 +3342,22 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) if (r) return r; - r = gfx_v9_0_cp_gfx_resume(adev); - if (r) - return r; + if (adev->asic_type != CHIP_ARCTURUS) { + r = gfx_v9_0_cp_gfx_resume(adev); + if (r) + return r; + } r = gfx_v9_0_kcq_resume(adev); if (r) return r; - ring = &adev->gfx.gfx_ring[0]; - r = amdgpu_ring_test_helper(ring); - if (r) - return r; + if (adev->asic_type != CHIP_ARCTURUS) { + ring = &adev->gfx.gfx_ring[0]; + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; @@ -3367,7 +3371,8 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) { - gfx_v9_0_cp_gfx_enable(adev, enable); + if (adev->asic_type != CHIP_ARCTURUS) + gfx_v9_0_cp_gfx_enable(adev, enable); gfx_v9_0_cp_compute_enable(adev, enable); } @@ -3392,9 +3397,11 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - r = gfx_v9_0_ngg_en(adev); - if (r) - return r; + if (adev->asic_type != CHIP_ARCTURUS) { + r = gfx_v9_0_ngg_en(adev); + if (r) + return r; + } return r; } @@ -3542,8 +3549,9 @@ static int gfx_v9_0_soft_reset(void *handle) /* stop the rlc */ adev->gfx.rlc.funcs->stop(adev); - /* Disable GFX parsing/prefetching */ - gfx_v9_0_cp_gfx_enable(adev, false); + if (adev->asic_type != CHIP_ARCTURUS) + /* Disable GFX parsing/prefetching */ + gfx_v9_0_cp_gfx_enable(adev, false); /* Disable MEC parsing/prefetching */ gfx_v9_0_cp_compute_enable(adev, false); @@ -3886,7 +3894,10 @@ static int gfx_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; + if (adev->asic_type == CHIP_ARCTURUS) + adev->gfx.num_gfx_rings = 0; + else + adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; gfx_v9_0_set_ring_funcs(adev); gfx_v9_0_set_irq_funcs(adev); -- cgit v1.2.3 From 3d81f67a1b1bfc789651e02dfe9705bb8483f133 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Wed, 19 Sep 2018 14:17:37 +0800 Subject: drm/amdgpu: support sdma 2~7 doorbell range register offset Update the doorbell range registers to support additional SDMA rings. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 2e1098c5c32b..fc45eaeaba6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -86,10 +86,24 @@ static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev) static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance, bool use_doorbell, int doorbell_index, int doorbell_size) { - u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) : - SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); + u32 reg, doorbell_range; - u32 doorbell_range = RREG32(reg); + if (instance < 2) + reg = instance + + SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE); + else + /* + * These registers address of SDMA2~7 is not consecutive + * from SDMA0~1. Need plus 4 dwords offset. + * + * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0 + * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4 + * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8 + */ + reg = instance + 0x4 + + SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE); + + doorbell_range = RREG32(reg); if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); -- cgit v1.2.3 From 5cd54ab85d6148b666f8ca1e5eb94fae5e4d392a Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 15 Nov 2018 18:56:17 +0800 Subject: drm/amdgpu: correct Arcturus SDMA address space base index Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 647a4e916ec5..b97306f1df11 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -211,17 +211,17 @@ static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, case 1: return (adev->reg_offset[SDMA1_HWIP][0][0] + offset); case 2: - return (adev->reg_offset[SDMA2_HWIP][0][0] + offset); + return (adev->reg_offset[SDMA2_HWIP][0][1] + offset); case 3: - return (adev->reg_offset[SDMA3_HWIP][0][0] + offset); + return (adev->reg_offset[SDMA3_HWIP][0][1] + offset); case 4: - return (adev->reg_offset[SDMA4_HWIP][0][0] + offset); + return (adev->reg_offset[SDMA4_HWIP][0][1] + offset); case 5: - return (adev->reg_offset[SDMA5_HWIP][0][0] + offset); + return (adev->reg_offset[SDMA5_HWIP][0][1] + offset); case 6: - return (adev->reg_offset[SDMA6_HWIP][0][0] + offset); + return (adev->reg_offset[SDMA6_HWIP][0][1] + offset); case 7: - return (adev->reg_offset[SDMA7_HWIP][0][0] + offset); + return (adev->reg_offset[SDMA7_HWIP][0][1] + offset); default: break; } -- cgit v1.2.3 From 121d859918a3220db5fb49bcf07803dc5675679e Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 20 Nov 2018 15:15:31 +0800 Subject: drm/amdgpu: enable 8 SDMA instances for Arcturus All the 8 SDMA instances work fine on the latest Gopher build model. Signed-off-by: Le Ma Reviewed-by: Snow Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index b97306f1df11..5abedba444fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1542,6 +1542,8 @@ static int sdma_v4_0_early_init(void *handle) if (adev->asic_type == CHIP_RAVEN) adev->sdma.num_instances = 1; + else if (adev->asic_type == CHIP_ARCTURUS) + adev->sdma.num_instances = 8; else adev->sdma.num_instances = 2; -- cgit v1.2.3 From 5ce40fd86cf155e0eefc73509343bb3eeaafa4bc Mon Sep 17 00:00:00 2001 From: Le Ma Date: Thu, 15 Nov 2018 18:54:35 +0800 Subject: drm/amdgpu: add Arcturus chip_name for init sdma microcode So we load the proper firmware for arcturus. Signed-off-by: Le Ma Acked-by: Felix Kuehling Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 5abedba444fc..e184645e0c47 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -369,6 +369,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) else chip_name = "raven"; break; + case CHIP_ARCTURUS: + chip_name = "arcturus"; + break; default: BUG(); } -- cgit v1.2.3 From f9c84ae582f675c58ff37728c708085278583935 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 26 Feb 2019 20:37:17 +0800 Subject: drm/amdgpu: correct programming of ih_chicken for Arcturus ih_chicken is a register that is not allowed to access by driver in the L0 security policy. psp bl need to enable field to allow driver to use physical bus address for ih ring on secure part. Signed-off-by: Le Ma Reviewed-by: Snow Zhang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 22260e6963b8..614d9cefb24f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -219,7 +219,7 @@ static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih) static int vega10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih; - u32 ih_rb_cntl; + u32 ih_rb_cntl, ih_chicken; int ret = 0; u32 tmp; @@ -247,6 +247,15 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); } + if (adev->asic_type == CHIP_ARCTURUS && + adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { + if (adev->irq.ih.use_bus_addr) { + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); + ih_chicken |= 0x00000010; + WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); + } + } + /* set the writeback address whether it's enabled or not */ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr)); -- cgit v1.2.3 From f864e3e65556968c63f7fc3b129b040e34620400 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 16 Jul 2019 13:39:42 -0500 Subject: drm/amdgpu: add paging queue support for 8 SDMA instances on Arcturus MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Properly enable all 8 instances for paging queue. Signed-off-by: Le Ma Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 62 ++++++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e184645e0c47..821126247b09 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -733,16 +733,20 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) */ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) { - struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page; - struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page; + struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 rb_cntl, ib_cntl; int i; - - if ((adev->mman.buffer_funcs_ring == sdma0) || - (adev->mman.buffer_funcs_ring == sdma1)) - amdgpu_ttm_set_buffer_funcs_status(adev, false); + bool unset = false; for (i = 0; i < adev->sdma.num_instances; i++) { + sdma[i] = &adev->sdma.instance[i].page; + + if ((adev->mman.buffer_funcs_ring == sdma[i]) && + (unset == false)) { + amdgpu_ttm_set_buffer_funcs_status(adev, false); + unset = true; + } + rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 0); @@ -751,10 +755,9 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev) ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); - } - sdma0->sched.ready = false; - sdma1->sched.ready = false; + sdma[i]->sched.ready = false; + } } /** @@ -2286,12 +2289,44 @@ static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; +static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs_2nd_mmhub = { + .type = AMDGPU_RING_TYPE_SDMA, + .align_mask = 0xf, + .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), + .support_64bit_ptrs = true, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = sdma_v4_0_ring_get_rptr, + .get_wptr = sdma_v4_0_page_ring_get_wptr, + .set_wptr = sdma_v4_0_page_ring_set_wptr, + .emit_frame_size = + 6 + /* sdma_v4_0_ring_emit_hdp_flush */ + 3 + /* hdp invalidate */ + 6 + /* sdma_v4_0_ring_emit_pipeline_sync */ + /* sdma_v4_0_ring_emit_vm_flush */ + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + + 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ + .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ + .emit_ib = sdma_v4_0_ring_emit_ib, + .emit_fence = sdma_v4_0_ring_emit_fence, + .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, + .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, + .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, + .test_ring = sdma_v4_0_ring_test_ring, + .test_ib = sdma_v4_0_ring_test_ib, + .insert_nop = sdma_v4_0_ring_insert_nop, + .pad_ib = sdma_v4_0_ring_pad_ib, + .emit_wreg = sdma_v4_0_ring_emit_wreg, + .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) { int i; for (i = 0; i < adev->sdma.num_instances; i++) { - if (adev->asic_type == CHIP_ARCTURUS && i >=5) + if (adev->asic_type == CHIP_ARCTURUS && i >= 5) adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs_2nd_mmhub; else @@ -2299,7 +2334,12 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.me = i; if (adev->sdma.has_page_queue) { - adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; + if (adev->asic_type == CHIP_ARCTURUS && i >= 5) + adev->sdma.instance[i].page.funcs = + &sdma_v4_0_page_ring_funcs_2nd_mmhub; + else + adev->sdma.instance[i].page.funcs = + &sdma_v4_0_page_ring_funcs; adev->sdma.instance[i].page.me = i; } } -- cgit v1.2.3 From 7f40581c2e468f7180de7b95e53fbbc9f64682aa Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 19 Dec 2018 08:44:38 -0600 Subject: drm/amdgpu: Initialize asic functions for Arcturus After cherry-picking doorbell rework changes from drm-next branch, Arcturus asic functions pointer need to be initialized to init doorbell index for Arcturus. Signed-off-by: Oak Zeng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 54d407f0d04f..8b17eeed09ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1012,6 +1012,7 @@ static int soc15_common_early_init(void *handle) AMD_PG_SUPPORT_RLC_SMU_HS; break; case CHIP_ARCTURUS: + adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = 0; adev->pg_flags = 0; break; -- cgit v1.2.3 From 3a65d14d259547794db679ce5741da182d6f0b2d Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Thu, 14 Feb 2019 14:53:15 -0600 Subject: drm/amdkfd: Extend PM4 packets to support 8 SDMA Extend map_queue and unmap_queue PM4 packets to support 8 SDMA engines. The new format is backward compatible. Signed-off-by: Oak Zeng Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 26 ++++++++++++++++++++---- drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 14 +++++++++++-- 2 files changed, 34 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index 2d5ddf199bd0..91da72d0d405 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -161,6 +161,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->bitfields2.engine_sel = engine_sel__mes_map_queues__compute_vi; packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_map_queues__legacy_engine_sel; packet->bitfields2.queue_type = queue_type__mes_map_queues__normal_compute_vi; @@ -176,9 +178,15 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, break; case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = q->properties.sdma_engine_id + - engine_sel__mes_map_queues__sdma0_vi; use_static = false; /* no static queues under SDMA */ + if (q->properties.sdma_engine_id < 2) + packet->bitfields2.engine_sel = q->properties.sdma_engine_id + + engine_sel__mes_map_queues__sdma0_vi; + else { + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_map_queues__sdma0_to_7_sel; + packet->bitfields2.engine_sel = q->properties.sdma_engine_id; + } break; default: WARN(1, "queue type %d", q->properties.type); @@ -218,13 +226,23 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, switch (type) { case KFD_QUEUE_TYPE_COMPUTE: case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_unmap_queues__legacy_engine_sel; packet->bitfields2.engine_sel = engine_sel__mes_unmap_queues__compute; break; case KFD_QUEUE_TYPE_SDMA: case KFD_QUEUE_TYPE_SDMA_XGMI: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; + if (sdma_engine < 2) { + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_unmap_queues__legacy_engine_sel; + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__sdma0 + sdma_engine; + } else { + packet->bitfields2.extended_engine_sel = + extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel; + packet->bitfields2.engine_sel = sdma_engine; + } break; default: WARN(1, "queue type %d", type); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index e3e21404cfa0..44ed94239513 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -260,6 +260,10 @@ enum mes_map_queues_engine_sel_enum { engine_sel__mes_map_queues__sdma1_vi = 3 }; +enum mes_map_queues_extended_engine_sel_enum { + extended_engine_sel__mes_map_queues__legacy_engine_sel = 0, + extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1 +}; struct pm4_mes_map_queues { union { @@ -269,7 +273,8 @@ struct pm4_mes_map_queues { union { struct { - uint32_t reserved1:4; + uint32_t reserved1:2; + enum mes_map_queues_extended_engine_sel_enum extended_engine_sel:2; enum mes_map_queues_queue_sel_enum queue_sel:2; uint32_t reserved5:6; uint32_t gws_control_queue:1; @@ -382,6 +387,11 @@ enum mes_unmap_queues_engine_sel_enum { engine_sel__mes_unmap_queues__sdmal = 3 }; +enum mes_unmap_queues_extended_engine_sel_enum { + extended_engine_sel__mes_unmap_queues__legacy_engine_sel = 0, + extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel = 1 +}; + struct pm4_mes_unmap_queues { union { union PM4_MES_TYPE_3_HEADER header; /* header */ @@ -391,7 +401,7 @@ struct pm4_mes_unmap_queues { union { struct { enum mes_unmap_queues_action_enum action:2; - uint32_t reserved1:2; + enum mes_unmap_queues_extended_engine_sel_enum extended_engine_sel:2; enum mes_unmap_queues_queue_sel_enum queue_sel:2; uint32_t reserved2:20; enum mes_unmap_queues_engine_sel_enum engine_sel:3; -- cgit v1.2.3 From 2fb1e49fda46e78db38eb0bed5ebda3940564458 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 19 Feb 2019 14:59:51 -0600 Subject: drm/amdkfd: Support bigger gds size Extend map_process and set_resources pm4 packet to support bigger gds size for arcturus. v2: Only make the change for v9 Signed-off-by: Oak Zeng Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 33 ++++++++++++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 10 +++---- 2 files changed, 36 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index 91da72d0d405..9a4bafb2e175 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -81,7 +81,8 @@ static int pm_map_process_v9(struct packet_manager *pm, packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; packet->bitfields2.process_quantum = 1; packet->bitfields2.pasid = qpd->pqm->process->pasid; - packet->bitfields14.gds_size = qpd->gds_size; + packet->bitfields14.gds_size = qpd->gds_size & 0x3F; + packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; packet->bitfields14.num_gws = qpd->num_gws; packet->bitfields14.num_oac = qpd->num_oac; packet->bitfields14.sdma_enable = 1; @@ -143,6 +144,34 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, return 0; } +static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res) +{ + struct pm4_mes_set_resources *packet; + + packet = (struct pm4_mes_set_resources *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); + + packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, + sizeof(struct pm4_mes_set_resources)); + + packet->bitfields2.queue_type = + queue_type__mes_set_resources__hsa_interface_queue_hiq; + packet->bitfields2.vmid_mask = res->vmid_mask; + packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; + packet->bitfields7.oac_mask = res->oac_mask; + packet->bitfields8.gds_heap_base = res->gds_heap_base; + packet->bitfields8.gds_heap_size = res->gds_heap_size; + + packet->gws_mask_lo = lower_32_bits(res->gws_mask); + packet->gws_mask_hi = upper_32_bits(res->gws_mask); + + packet->queue_mask_lo = lower_32_bits(res->queue_mask); + packet->queue_mask_hi = upper_32_bits(res->queue_mask); + + return 0; +} + static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, struct queue *q, bool is_static) { @@ -344,7 +373,7 @@ static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer) const struct packet_manager_funcs kfd_v9_pm_funcs = { .map_process = pm_map_process_v9, .runlist = pm_runlist_v9, - .set_resources = pm_set_resources_vi, + .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, .query_status = pm_query_status_v9, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 44ed94239513..4d7add843746 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -83,10 +83,10 @@ struct pm4_mes_set_resources { union { struct { - uint32_t gds_heap_base:6; - uint32_t reserved3:5; - uint32_t gds_heap_size:6; - uint32_t reserved4:15; + uint32_t gds_heap_base:10; + uint32_t reserved3:1; + uint32_t gds_heap_size:10; + uint32_t reserved4:11; } bitfields8; uint32_t ordinal8; }; @@ -179,7 +179,7 @@ struct pm4_mes_map_process { uint32_t num_gws:7; uint32_t sdma_enable:1; uint32_t num_oac:4; - uint32_t reserved8:4; + uint32_t gds_size_hi:4; uint32_t gds_size:6; uint32_t num_queues:10; } bitfields14; -- cgit v1.2.3 From 49adcf8a6f951450417c14afa6a404b7caea25ef Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 9 Jul 2019 09:37:04 -0500 Subject: amd/amdkfd: Add ASIC ARCTURUS to kfd Add initial support for ARCTURUS to kfd. Signed-off-by: Yong Zhao Signed-off-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 1 + 8 files changed, 23 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 9fa4f25a3745..af6b1eae94c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -85,6 +85,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); break; case CHIP_NAVI10: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 792371442195..b30ccbfeb648 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -662,6 +662,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: + case CHIP_ARCTURUS: pcache_info = vega10_cache_info; num_of_cache_types = ARRAY_SIZE(vega10_cache_info); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 3322a443dfb2..81602fb89d87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -317,6 +317,22 @@ static const struct kfd_device_info vega20_device_info = { .num_sdma_queues_per_engine = 8, }; +static const struct kfd_device_info arcturus_device_info = { + .asic_family = CHIP_ARCTURUS, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 8, +}; + static const struct kfd_device_info navi10_device_info = { .asic_family = CHIP_NAVI10, .max_pasid_bits = 16, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e6a4288bfaa6..d7794c8dc9cc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1786,6 +1786,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: device_queue_manager_init_v9(&dqm->asic_ops); break; case CHIP_NAVI10: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 60521366dd31..9dc4bff8085e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -405,6 +405,7 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: case CHIP_NAVI10: kfd_init_apertures_v9(pdd, id); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 29c0bd2d7a5c..8b4564f71a7a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -330,6 +330,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: kernel_queue_init_v9(&kq->ops_asic_specific); break; case CHIP_NAVI10: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index ccf6b2310316..2c8624c5b42c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -239,6 +239,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: pm->pmf = &kfd_v9_pm_funcs; break; case CHIP_NAVI10: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c2e6e47abaf2..36fa98fe858b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1321,6 +1321,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_ARCTURUS: case CHIP_NAVI10: dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & -- cgit v1.2.3 From 6d5311ab2c2a5e7b5412466be1dc63df1052f6e7 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Thu, 15 Nov 2018 12:59:34 -0500 Subject: drm/amdkfd: Expose function mmhub_v9_4_setup_vm_pt_regs() for kfd to use Signed-off-by: Yong Zhao Signed-off-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h | 7 +++++++ drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 17 ++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h index 5c8deac65580..971c0840358f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h @@ -37,4 +37,11 @@ extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; +/* amdgpu_amdkfd*.c */ +void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t value); +void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t value); +void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, + uint32_t vmid, uint64_t value); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index aa9b43b6ba6b..7f79fa4b6ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -46,18 +46,23 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) return base; } -static void mmhub_v9_4_init_gart_pt_regs(struct amdgpu_device *adev, int hubid) +void mmhub_v9_4_setup_vm_pt_regs(struct amdgpu_device *adev, int hubid, + uint32_t vmid, uint64_t value) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmVML2VC0_VM_CONTEXT0_* to + * mmVML2VC0_VM_CONTEXT1_* + */ + int dist = mmVML2VC0_VM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, lower_32_bits(value)); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, + dist * vmid + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, upper_32_bits(value)); } @@ -65,7 +70,9 @@ static void mmhub_v9_4_init_gart_pt_regs(struct amdgpu_device *adev, int hubid) static void mmhub_v9_4_init_gart_aperture_regs(struct amdgpu_device *adev, int hubid) { - mmhub_v9_4_init_gart_pt_regs(adev, hubid); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v9_4_setup_vm_pt_regs(adev, hubid, 0, pt_base); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, -- cgit v1.2.3 From a23e72d7e9dbbc661fc434491ed99ef6efab85a7 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 12 Oct 2018 16:16:15 -0400 Subject: drm/amdkfd: Support two MMHUBs when setting up page table base in KFD 2 mmhubs on arcturus. Signed-off-by: Yong Zhao Signed-off-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 85395f2d83a6..38630e37e8cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -47,6 +47,7 @@ #include "soc15d.h" #include "mmhub_v1_0.h" #include "gfxhub_v1_0.h" +#include "gmc_v9_0.h" #define V9_PIPE_PER_MEC (4) @@ -884,7 +885,12 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, * now, all processes share the same address space size, like * on GFX8 and older. */ - mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); + if (adev->asic_type == CHIP_ARCTURUS) { + /* Two MMHUBs */ + mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base); + mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base); + } else + mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } -- cgit v1.2.3 From 3a68a638a96f1468e267681f833928fd66185a6a Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 12 Dec 2018 16:33:56 -0600 Subject: drm/amdkfd: Change arcturus sdma engines number Arcturus has 8 sdma engines Signed-off-by: Oak Zeng Reviewed-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 81602fb89d87..511bc2523f2f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -329,7 +329,7 @@ static const struct kfd_device_info arcturus_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, + .num_sdma_engines = 8, .num_sdma_queues_per_engine = 8, }; -- cgit v1.2.3 From 35cdc81bfa94d10373ecae279f3c48ca858ac4fd Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 9 Jul 2019 09:40:15 -0500 Subject: drm/amdkfd: Fix sdma_bitmap overflow issue In the original formula, when sdma queue number is 64, the left shift overflows. Use an equivalence that won't overflow. Signed-off-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d7794c8dc9cc..fe1ce348fdcd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -880,8 +880,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) } dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; - dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1; - dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1; + dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); + dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); return 0; } @@ -1019,8 +1019,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm) dqm->sdma_queue_count = 0; dqm->xgmi_sdma_queue_count = 0; dqm->active_runlist = false; - dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1; - dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1; + dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); + dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); -- cgit v1.2.3 From 3e205a0849a760166578b4d95b17e904f23d962e Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 9 Jul 2019 09:59:30 -0500 Subject: drm/amdkfd: Implement kfd2kgd_calls for Arcturus Arcturus shares most of the kfd2kgd_calls with gfx9. But due to SDMA register address change, it can't share SDMA related functions. Export gfx9 kfd2kgd_calls and implement SDMA related functions for Arcturus. Signed-off-by: Oak Zeng Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 324 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 161 ++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 83 ++++++ 6 files changed, 465 insertions(+), 109 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 97671df6a5f0..dc22569e9732 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -163,6 +163,7 @@ amdgpu-y += \ amdgpu_amdkfd_gpuvm.o \ amdgpu_amdkfd_gfx_v8.o \ amdgpu_amdkfd_gfx_v9.o \ + amdgpu_amdkfd_arcturus.o \ amdgpu_amdkfd_gfx_v10.o ifneq ($(CONFIG_DRM_AMDGPU_CIK),) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index af6b1eae94c1..573a6296b33a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -85,9 +85,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: - case CHIP_ARCTURUS: kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); break; + case CHIP_ARCTURUS: + kfd2kgd = amdgpu_amdkfd_arcturus_get_functions(); + break; case CHIP_NAVI10: kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions(); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index b6076d19e442..e519df3fd2b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -140,6 +140,7 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); +struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c new file mode 100644 index 000000000000..4d9101834ba7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -0,0 +1,324 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#undef pr_fmt +#define pr_fmt(fmt) "kfd2kgd: " fmt + +#include +#include +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "sdma0/sdma0_4_2_2_offset.h" +#include "sdma0/sdma0_4_2_2_sh_mask.h" +#include "sdma1/sdma1_4_2_2_offset.h" +#include "sdma1/sdma1_4_2_2_sh_mask.h" +#include "sdma2/sdma2_4_2_2_offset.h" +#include "sdma2/sdma2_4_2_2_sh_mask.h" +#include "sdma3/sdma3_4_2_2_offset.h" +#include "sdma3/sdma3_4_2_2_sh_mask.h" +#include "sdma4/sdma4_4_2_2_offset.h" +#include "sdma4/sdma4_4_2_2_sh_mask.h" +#include "sdma5/sdma5_4_2_2_offset.h" +#include "sdma5/sdma5_4_2_2_sh_mask.h" +#include "sdma6/sdma6_4_2_2_offset.h" +#include "sdma6/sdma6_4_2_2_sh_mask.h" +#include "sdma7/sdma7_4_2_2_offset.h" +#include "sdma7/sdma7_4_2_2_sh_mask.h" +#include "v9_structs.h" +#include "soc15.h" +#include "soc15d.h" +#include "amdgpu_amdkfd_gfx_v9.h" + +#define HQD_N_REGS 56 +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32(addr); \ + } while (0) + +static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) +{ + return (struct amdgpu_device *)kgd; +} + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t base[8] = { + SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA2, 0, + mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA3, 0, + mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA4, 0, + mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA5, 0, + mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA6, 0, + mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA7, 0, + mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL + }; + uint32_t retval; + + retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - + mmSDMA0_RLC0_RB_CNTL); + + pr_debug("sdma base address: 0x%x\n", retval); + + return retval; +} + +static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, + u32 instance, u32 offset) +{ + switch (instance) { + case 0: + return (adev->reg_offset[SDMA0_HWIP][0][0] + offset); + case 1: + return (adev->reg_offset[SDMA1_HWIP][0][1] + offset); + case 2: + return (adev->reg_offset[SDMA2_HWIP][0][1] + offset); + case 3: + return (adev->reg_offset[SDMA3_HWIP][0][1] + offset); + case 4: + return (adev->reg_offset[SDMA4_HWIP][0][1] + offset); + case 5: + return (adev->reg_offset[SDMA5_HWIP][0][1] + offset); + case 6: + return (adev->reg_offset[SDMA6_HWIP][0][1] + offset); + case 7: + return (adev->reg_offset[SDMA7_HWIP][0][1] + offset); + default: + break; + } + return 0; +} + +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev, + m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + data = RREG32(sdmax_gfx_context_cntl); + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(sdmax_gfx_context_cntl, data); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + + return 0; +} + +static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+10) + + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; + reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; + reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_base_addr + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int utimeout) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + + return 0; +} + +static const struct kfd2kgd_calls kfd2kgd = { + .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, + .init_interrupts = kgd_gfx_v9_init_interrupts, + .hqd_load = kgd_gfx_v9_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_gfx_v9_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_gfx_v9_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_gfx_v9_address_watch_disable, + .address_watch_execute = kgd_gfx_v9_address_watch_execute, + .wave_control_execute = kgd_gfx_v9_wave_control_execute, + .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_pasid = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, + .get_atc_vmid_pasid_mapping_valid = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, + .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_tile_config = kgd_gfx_v9_get_tile_config, + .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, + .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, + .get_hive_id = amdgpu_amdkfd_get_hive_id, +}; + +struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) +{ + return (struct kfd2kgd_calls *)&kfd2kgd; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 38630e37e8cc..9d153cf39581 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -59,66 +59,11 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, - uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static void set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ -static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, +int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, struct tile_config *config) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; @@ -136,39 +81,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = amdgpu_amdkfd_get_tile_config, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .get_hive_id = amdgpu_amdkfd_get_hive_id, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -216,7 +128,7 @@ static void release_queue(struct kgd_dev *kgd) unlock_srbm(kgd); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, @@ -233,7 +145,7 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, unlock_srbm(kgd); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, unsigned int vmid) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -294,7 +206,7 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, * but still works */ -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; @@ -344,7 +256,7 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v9_sdma_mqd *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, +int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm) @@ -439,7 +351,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, return 0; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { @@ -576,7 +488,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, +bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -617,7 +529,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) @@ -705,7 +617,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid) { uint32_t reg; @@ -716,7 +628,7 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; } -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, +uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid) { uint32_t reg; @@ -755,7 +667,7 @@ static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid, return 0; } -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) +int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; int vmid; @@ -774,8 +686,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; - if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { - if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid) + if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { + if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid) == pasid) { amdgpu_gmc_flush_gpu_tlb(adev, vmid, flush_type); @@ -787,7 +699,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) return 0; } -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) +int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; @@ -815,12 +727,12 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) return 0; } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd) { return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -829,7 +741,7 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, uint32_t gfx_index_val, uint32_t sq_cmd) { @@ -854,14 +766,14 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_scratch_backing_va(struct kgd_dev *kgd, +void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid) { /* No longer needed on GFXv9. The scratch base address is @@ -870,7 +782,7 @@ static void set_scratch_backing_va(struct kgd_dev *kgd, */ } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -894,3 +806,36 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } + +static const struct kfd2kgd_calls kfd2kgd = { + .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, + .init_interrupts = kgd_gfx_v9_init_interrupts, + .hqd_load = kgd_gfx_v9_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_gfx_v9_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_gfx_v9_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_gfx_v9_address_watch_disable, + .address_watch_execute = kgd_gfx_v9_address_watch_execute, + .wave_control_execute = kgd_gfx_v9_wave_control_execute, + .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_pasid = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, + .get_atc_vmid_pasid_mapping_valid = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, + .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_tile_config = kgd_gfx_v9_get_tile_config, + .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, + .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, + .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, + .get_hive_id = amdgpu_amdkfd_get_hive_id, +}; + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) +{ + return (struct kfd2kgd_calls *)&kfd2kgd; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h new file mode 100644 index 000000000000..b79d2a629768 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -0,0 +1,83 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + + + +void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases); +int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid); +int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); +int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm); +int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs); +bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id); +int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, + unsigned int utimeout, uint32_t pipe_id, + uint32_t queue_id); +int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd); +int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo); +int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd); +uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset); + +uint32_t kgd_gfx_v9_enable_debug_trap(struct kgd_dev *kgd, + uint32_t trap_debug_wave_launch_mode, + uint32_t vmid); +uint32_t kgd_gfx_v9_disable_debug_trap(struct kgd_dev *kgd); +uint32_t kgd_gfx_v9_set_debug_trap_data(struct kgd_dev *kgd, + int trap_data0, + int trap_data1); +uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct kgd_dev *kgd, + uint32_t trap_override, + uint32_t trap_mask); +uint32_t kgd_gfx_v9_set_wave_launch_mode(struct kgd_dev *kgd, + uint8_t wave_launch_mode, + uint32_t vmid); + +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid); +uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid); +void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, + uint64_t page_table_base); +void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid); +int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); +int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); +int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config); -- cgit v1.2.3 From c9ffdf5acd4119fbfeaf31cdbeee70e021fd46a5 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Thu, 14 Mar 2019 20:28:50 -0400 Subject: drm/amdgpu: Set VM_L2_CNTL.PDE_FAULT_CLASSIFICATION to 0 for MMHUB 9.4 Should be set to 0 for mmhub 9.4. Signed-off-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 7f79fa4b6ad1..6b7cdaadbd70 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -195,7 +195,7 @@ static void mmhub_v9_4_init_cache_regs(struct amdgpu_device *adev, int hubid) tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, 0); tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, - PDE_FAULT_CLASSIFICATION, 1); + PDE_FAULT_CLASSIFICATION, 0); tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_CNTL, -- cgit v1.2.3 From 0ad8c5e296fc9ee04e9e918e05f05936ac275623 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 8 Feb 2019 23:06:55 -0500 Subject: drm/amdkfd: Support MMHUB1 in kfd interrupt path Handle interrupts for second mmhub. Signed-off-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index a85904ad0d5f..3ef67d2e0d9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -80,6 +80,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || client_id == SOC15_IH_CLIENTID_UTCL2; } @@ -104,6 +105,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) kfd_signal_hw_exception_event(pasid); else if (client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); -- cgit v1.2.3 From 2f2eab3acc3c8b37d1cdbf00a441373a973690a9 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Thu, 14 Mar 2019 11:44:24 -0500 Subject: drm/amdgpu: Hack xgmi topology info when there is no psp fw This is only needed on emulation platform where psp fw might not be available, to hack xgmi topology info such as hive id and node id. v2: Add offset to hacked hive/node id v3: Don't use introduce new module parameter. Signed-off-by: Oak Zeng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index d11eba09eadd..b024070ad1cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -301,18 +301,23 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) if (!adev->gmc.xgmi.supported) return 0; - ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); - if (ret) { - dev_err(adev->dev, - "XGMI: Failed to get node id\n"); - return ret; - } + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to get hive id\n"); + return ret; + } - ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id); - if (ret) { - dev_err(adev->dev, - "XGMI: Failed to get hive id\n"); - return ret; + ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id); + if (ret) { + dev_err(adev->dev, + "XGMI: Failed to get node id\n"); + return ret; + } + } else { + adev->gmc.xgmi.hive_id = 16; + adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16; } hive = amdgpu_get_xgmi_hive(adev, 1); -- cgit v1.2.3 From eb39aff7e0e33870319dd7c70a293c80e2df26c7 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 20 Mar 2019 16:04:10 -0500 Subject: drm/amdgpu: Enable xgmi support for Arcturus xgmi is a high performance cross-GPU communication channel. Signed-off-by: Oak Zeng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8b17eeed09ad..854cf0fd1791 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -593,7 +593,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) return -EINVAL; } - if (adev->asic_type == CHIP_VEGA20) + if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->gmc.xgmi.supported = true; if (adev->flags & AMD_IS_APU) -- cgit v1.2.3 From db6a49d958db4725a1003d208d6890c55a8a811c Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 9 Jul 2019 10:01:35 -0500 Subject: drm/amdgpu: Clear build undefined warning Add amdgpu_amdkfd_arcturus_get_functions stub when CONFIG_HSA_AMD is undefinded. Signed-off-by: James Zhu Reviewed-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 573a6296b33a..33547a8a928a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -718,6 +718,11 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) return NULL; } +struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) +{ + return NULL; +} + struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void) { return NULL; -- cgit v1.2.3 From eec28ef03c903f3e404c57ad0d204e0a9d7d9701 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 21 May 2019 19:34:41 +0800 Subject: drm/amdgpu: declare sdma firmware binary files for Arcturus So that they are properly picked up as a driver dependency. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 821126247b09..422f6b032c29 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -55,6 +55,14 @@ MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin"); MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); MODULE_FIRMWARE("amdgpu/picasso_sdma.bin"); MODULE_FIRMWARE("amdgpu/raven2_sdma.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma2.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma3.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma4.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma5.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma6.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sdma7.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L -- cgit v1.2.3 From 22a8f442866bf539c7a659923155d9afa03d77bb Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 15 Apr 2019 09:39:06 -0400 Subject: drm/amdgpu/VCN2: put IB internal registers offset to structure So the ring functions can be shared with different VCN versions with different internal registers offsets Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 6 ++++ drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 64 +++++++++++++++++++++------------ 2 files changed, 47 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 99f14fcc1460..bfd8c3cea13a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -145,6 +145,12 @@ struct amdgpu_vcn_reg{ unsigned data1; unsigned cmd; unsigned nop; + unsigned context_id; + unsigned ib_vmid; + unsigned ib_bar_low; + unsigned ib_bar_high; + unsigned ib_size; + unsigned gp_scratch8; unsigned scratch9; unsigned jpeg_pitch; }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index c701868dd57f..9bb29cd3aa50 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -166,6 +166,13 @@ static int vcn_v2_0_sw_init(void *handle) if (r) return r; + adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; + adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; + adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; + adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; + adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; @@ -1485,9 +1492,11 @@ static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring) */ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0)); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); } @@ -1500,7 +1509,9 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) */ static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) { - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); } @@ -1513,12 +1524,13 @@ static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) */ static void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { + struct amdgpu_device *adev = ring->adev; int i; WARN_ON(ring->wptr % 2 || count % 2); for (i = 0; i < count / 2; i++) { - amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.nop, 0)); amdgpu_ring_write(ring, 0); } } @@ -1534,27 +1546,28 @@ static void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { - WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + struct amdgpu_device *adev = ring->adev; - amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID_INTERNAL_OFFSET, 0)); + WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.context_id, 0)); amdgpu_ring_write(ring, seq); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0)); amdgpu_ring_write(ring, addr & 0xffffffff); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0)); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0)); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0)); amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); } @@ -1572,16 +1585,17 @@ static void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, uint32_t flags) { + struct amdgpu_device *adev = ring->adev; unsigned vmid = AMDGPU_JOB_GET_VMID(job); - amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_vmid, 0)); amdgpu_ring_write(ring, vmid); - amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_low, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); - amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_bar_high, 0)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); - amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.ib_size, 0)); amdgpu_ring_write(ring, ib->length_dw); } @@ -1589,16 +1603,18 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) { - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0)); amdgpu_ring_write(ring, reg << 2); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0)); amdgpu_ring_write(ring, val); - amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.gp_scratch8, 0)); amdgpu_ring_write(ring, mask); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); } @@ -1621,13 +1637,15 @@ static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0)); amdgpu_ring_write(ring, reg << 2); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data1, 0)); amdgpu_ring_write(ring, val); - amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); } -- cgit v1.2.3 From cdbd115eaf1d17cff0d84c99e53774acba96eca7 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 9 Jul 2019 10:04:39 -0500 Subject: drm/amdgpu/VCN2: expose rings functions They can be reused by VCN2.x family Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 87 +++++++++++++++++------------------ drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h | 38 +++++++++++++++ 2 files changed, 79 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 9bb29cd3aa50..ebef2f663654 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1490,7 +1490,7 @@ static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring) * * Write a start command to the ring. */ -static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) +void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -1507,7 +1507,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) * * Write a end command to the ring. */ -static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) +void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; @@ -1522,7 +1522,7 @@ static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) * * Write a nop command to the ring. */ -static void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { struct amdgpu_device *adev = ring->adev; int i; @@ -1543,8 +1543,8 @@ static void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun * * Write a fence and a trap command to the ring. */ -static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) +void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) { struct amdgpu_device *adev = ring->adev; @@ -1580,10 +1580,10 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 * * Write ring commands to execute the indirect buffer */ -static void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) +void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) { struct amdgpu_device *adev = ring->adev; unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1599,9 +1599,8 @@ static void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val, - uint32_t mask) +void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) { struct amdgpu_device *adev = ring->adev; @@ -1619,8 +1618,8 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); } -static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) +void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; uint32_t data0, data1, mask; @@ -1634,8 +1633,8 @@ static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); } -static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val) +void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val) { struct amdgpu_device *adev = ring->adev; @@ -1727,8 +1726,8 @@ static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring) * * Write enc a fence and a trap command to the ring. */ -static void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, - u64 seq, unsigned flags) +void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned flags) { WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); @@ -1739,7 +1738,7 @@ static void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP); } -static void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring) +void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, VCN_ENC_CMD_END); } @@ -1752,10 +1751,10 @@ static void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring) * * Write enc ring commands to execute the indirect buffer */ -static void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) +void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1766,9 +1765,8 @@ static void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -static void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val, - uint32_t mask) +void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) { amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT); amdgpu_ring_write(ring, reg << 2); @@ -1776,8 +1774,8 @@ static void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vmid, uint64_t pd_addr) +void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; @@ -1788,8 +1786,7 @@ static void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, lower_32_bits(pd_addr), 0xffffffff); } -static void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val) +void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, reg << 2); @@ -1853,7 +1850,7 @@ static void vcn_v2_0_jpeg_ring_set_wptr(struct amdgpu_ring *ring) * * Write a start command to the ring. */ -static void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) +void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); @@ -1871,7 +1868,7 @@ static void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring) * * Write a end command to the ring. */ -static void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) +void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); @@ -1890,8 +1887,8 @@ static void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring) * * Write a fence and a trap command to the ring. */ -static void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, - unsigned flags) +void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags) { WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); @@ -1939,10 +1936,10 @@ static void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6 * * Write ring commands to execute the indirect buffer. */ -static void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, - struct amdgpu_job *job, - struct amdgpu_ib *ib, - uint32_t flags) +void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, + struct amdgpu_job *job, + struct amdgpu_ib *ib, + uint32_t flags) { unsigned vmid = AMDGPU_JOB_GET_VMID(job); @@ -1990,9 +1987,8 @@ static void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, 0x2); } -static void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val, - uint32_t mask) +void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) { uint32_t reg_offset = (reg << 2); @@ -2018,8 +2014,8 @@ static void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_write(ring, mask); } -static void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned vmid, uint64_t pd_addr) +void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; uint32_t data0, data1, mask; @@ -2033,8 +2029,7 @@ static void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, vcn_v2_0_jpeg_ring_emit_reg_wait(ring, data0, data1, mask); } -static void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, - uint32_t reg, uint32_t val) +void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) { uint32_t reg_offset = (reg << 2); @@ -2052,7 +2047,7 @@ static void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, val); } -static void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) +void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count) { int i; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h index a74227f4663b..8467292f32e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h @@ -24,6 +24,44 @@ #ifndef __VCN_V2_0_H__ #define __VCN_V2_0_H__ +extern void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); +extern void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); +extern void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); +extern void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags); +extern void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, uint32_t flags); +extern void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); +extern void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr); +extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, + uint32_t reg, uint32_t val); + +extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring); +extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned flags); +extern void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, uint32_t flags); +extern void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); +extern void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr); +extern void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); + +extern void vcn_v2_0_jpeg_ring_insert_start(struct amdgpu_ring *ring); +extern void vcn_v2_0_jpeg_ring_insert_end(struct amdgpu_ring *ring); +extern void vcn_v2_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, + unsigned flags); +extern void vcn_v2_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, + struct amdgpu_ib *ib, uint32_t flags); +extern void vcn_v2_0_jpeg_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); +extern void vcn_v2_0_jpeg_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vmid, uint64_t pd_addr); +extern void vcn_v2_0_jpeg_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); +extern void vcn_v2_0_jpeg_ring_nop(struct amdgpu_ring *ring, uint32_t count); + extern const struct amdgpu_ip_block_version vcn_v2_0_ip_block; #endif /* __VCN_V2_0_H__ */ -- cgit v1.2.3 From 28c17d72072b7ce5cc13a527dcbfc045cc995e7d Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 15 Apr 2019 12:21:42 -0400 Subject: drm/amdgpu: add VCN2.5 basic supports i.e. basic VCN IP SW structures VCN is the video codec block on the GPU. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 414 ++++++++++++++++++++++++++++++++++ 2 files changed, 416 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index dc22569e9732..7a1a78c7b329 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -145,7 +145,8 @@ amdgpu-y += \ amdgpu-y += \ amdgpu_vcn.o \ vcn_v1_0.o \ - vcn_v2_0.o + vcn_v2_0.o \ + vcn_v2_5.o # add ATHUB block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c new file mode 100644 index 000000000000..0f553563ceb9 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -0,0 +1,414 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "amdgpu.h" +#include "amdgpu_vcn.h" +#include "soc15.h" +#include "soc15d.h" +#include "vcn_v2_0.h" + +#include "vcn/vcn_2_5_offset.h" +#include "vcn/vcn_2_5_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" + +#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 +#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f +#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 +#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11 +#define mmUVD_NO_OP_INTERNAL_OFFSET 0x29 +#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66 +#define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d + +#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 +#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 +#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c + +#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f + +static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); +static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); +static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev); +static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); +static int vcn_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state); + +/** + * vcn_v2_5_early_init - set function pointers + * + * @handle: amdgpu_device pointer + * + * Set ring and irq function pointers + */ +static int vcn_v2_5_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->vcn.num_enc_rings = 2; + + vcn_v2_5_set_dec_ring_funcs(adev); + vcn_v2_5_set_enc_ring_funcs(adev); + vcn_v2_5_set_jpeg_ring_funcs(adev); + vcn_v2_5_set_irq_funcs(adev); + + return 0; +} + +/** + * vcn_v2_5_sw_init - sw init for VCN block + * + * @handle: amdgpu_device pointer + * + * Load firmware and sw initialization + */ +static int vcn_v2_5_sw_init(void *handle) +{ + struct amdgpu_ring *ring; + int i, r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + /* VCN DEC TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq); + if (r) + return r; + + /* VCN ENC TRAP */ + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.irq); + if (r) + return r; + } + + /* VCN JPEG TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.irq); + if (r) + return r; + + r = amdgpu_vcn_sw_init(adev); + if (r) + return r; + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + const struct common_firmware_header *hdr; + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN; + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + DRM_INFO("PSP loading VCN firmware\n"); + } + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + ring = &adev->vcn.ring_dec; + sprintf(ring->name, "vcn_dec"); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + if (r) + return r; + + adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; + adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; + adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; + adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; + + adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; + adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); + adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; + adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); + adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; + adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); + adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; + adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); + adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; + adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.ring_enc[i]; + sprintf(ring->name, "vcn_enc%d", i); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + if (r) + return r; + } + + ring = &adev->vcn.ring_jpeg; + sprintf(ring->name, "vcn_jpeg"); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + if (r) + return r; + + adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); + + return 0; +} + +/** + * vcn_v2_5_sw_fini - sw fini for VCN block + * + * @handle: amdgpu_device pointer + * + * VCN suspend and free up sw allocation + */ +static int vcn_v2_5_sw_fini(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_vcn_suspend(adev); + if (r) + return r; + + r = amdgpu_vcn_sw_fini(adev); + + return r; +} + +/** + * vcn_v2_5_hw_init - start and test VCN block + * + * @handle: amdgpu_device pointer + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vcn_v2_5_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->vcn.ring_dec; + int i, r; + + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->sched.ready = false; + goto done; + } + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.ring_enc[i]; + ring->sched.ready = false; + continue; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->sched.ready = false; + goto done; + } + } + + ring = &adev->vcn.ring_jpeg; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->sched.ready = false; + goto done; + } + +done: + if (!r) + DRM_INFO("VCN decode and encode initialized successfully.\n"); + + return r; +} + +/** + * vcn_v2_5_hw_fini - stop the hardware block + * + * @handle: amdgpu_device pointer + * + * Stop the VCN block, mark ring as not ready any more + */ +static int vcn_v2_5_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ring *ring = &adev->vcn.ring_dec; + int i; + + if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) + vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + + ring->sched.ready = false; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.ring_enc[i]; + ring->sched.ready = false; + } + + ring = &adev->vcn.ring_jpeg; + ring->sched.ready = false; + + return 0; +} + +/** + * vcn_v2_5_suspend - suspend VCN block + * + * @handle: amdgpu_device pointer + * + * HW fini and suspend VCN block + */ +static int vcn_v2_5_suspend(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = vcn_v2_5_hw_fini(adev); + if (r) + return r; + + r = amdgpu_vcn_suspend(adev); + + return r; +} + +/** + * vcn_v2_5_resume - resume VCN block + * + * @handle: amdgpu_device pointer + * + * Resume firmware and hw init VCN block + */ +static int vcn_v2_5_resume(void *handle) +{ + int r; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + r = amdgpu_vcn_resume(adev); + if (r) + return r; + + r = vcn_v2_5_hw_init(adev); + + return r; +} + +static bool vcn_v2_5_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); +} + +static int vcn_v2_5_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE, ret); + + return ret; +} + +static int vcn_v2_5_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int vcn_v2_5_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("IH: VCN TRAP\n"); + + switch (entry->src_id) { + case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: + amdgpu_fence_process(&adev->vcn.ring_dec); + break; + case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: + amdgpu_fence_process(&adev->vcn.ring_enc[0]); + break; + case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: + amdgpu_fence_process(&adev->vcn.ring_enc[1]); + break; + case VCN_2_0__SRCID__JPEG_DECODE: + amdgpu_fence_process(&adev->vcn.ring_jpeg); + break; + default: + DRM_ERROR("Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { + .set = vcn_v2_5_set_interrupt_state, + .process = vcn_v2_5_process_interrupt, +}; + +static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.irq.funcs = &vcn_v2_5_irq_funcs; +} + +static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { + .name = "vcn_v2_5", + .early_init = vcn_v2_5_early_init, + .late_init = NULL, + .sw_init = vcn_v2_5_sw_init, + .sw_fini = vcn_v2_5_sw_fini, + .hw_init = vcn_v2_5_hw_init, + .hw_fini = vcn_v2_5_hw_fini, + .suspend = vcn_v2_5_suspend, + .resume = vcn_v2_5_resume, + .is_idle = vcn_v2_5_is_idle, + .wait_for_idle = vcn_v2_5_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v2_5_set_clockgating_state, + .set_powergating_state = vcn_v2_5_set_powergating_state, +}; + +const struct amdgpu_ip_block_version vcn_v2_5_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 2, + .minor = 5, + .rev = 0, + .funcs = &vcn_v2_5_ip_funcs, +}; -- cgit v1.2.3 From cbead2bdfcf1ddaa3e65de22a3f88034736a71fd Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 15 Apr 2019 12:41:09 -0400 Subject: drm/amdgpu: add VCN2.5 VCPU start and stop HW engine initialization and suspend sequences. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 467 +++++++++++++++++++++++++++++++++- 1 file changed, 466 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 0f553563ceb9..b6e72fff94f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -307,6 +307,446 @@ static int vcn_v2_5_resume(void *handle) return r; } +/** + * vcn_v2_5_mc_resume - memory controller programming + * + * @adev: amdgpu_device pointer + * + * Let the VCN memory controller know it's offsets + */ +static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) +{ + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t offset; + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.gpu_addr)); + offset = size; + /* No signed header for now from firmware + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + */ + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); + } + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.gpu_addr + offset)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.gpu_addr + offset)); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); +} + +/** + * vcn_v2_5_disable_clock_gating - disable VCN clock gating + * + * @adev: amdgpu_device pointer + * @sw: enable SW clock gating + * + * Disable clock gating for VCN block + */ +static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data; + int ret = 0; + + /* UVD disable CGC */ + data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE); + data &= ~(UVD_CGC_GATE__SYS_MASK + | UVD_CGC_GATE__UDEC_MASK + | UVD_CGC_GATE__MPEG2_MASK + | UVD_CGC_GATE__REGS_MASK + | UVD_CGC_GATE__RBC_MASK + | UVD_CGC_GATE__LMI_MC_MASK + | UVD_CGC_GATE__LMI_UMC_MASK + | UVD_CGC_GATE__IDCT_MASK + | UVD_CGC_GATE__MPRD_MASK + | UVD_CGC_GATE__MPC_MASK + | UVD_CGC_GATE__LBSI_MASK + | UVD_CGC_GATE__LRBBM_MASK + | UVD_CGC_GATE__UDEC_RE_MASK + | UVD_CGC_GATE__UDEC_CM_MASK + | UVD_CGC_GATE__UDEC_IT_MASK + | UVD_CGC_GATE__UDEC_DB_MASK + | UVD_CGC_GATE__UDEC_MP_MASK + | UVD_CGC_GATE__WCB_MASK + | UVD_CGC_GATE__VCPU_MASK + | UVD_CGC_GATE__MMSCH_MASK); + + WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data); + + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); + + data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); + data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK + | UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); + + /* turn on */ + data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE); + data |= (UVD_SUVD_CGC_GATE__SRE_MASK + | UVD_SUVD_CGC_GATE__SIT_MASK + | UVD_SUVD_CGC_GATE__SMP_MASK + | UVD_SUVD_CGC_GATE__SCM_MASK + | UVD_SUVD_CGC_GATE__SDB_MASK + | UVD_SUVD_CGC_GATE__SRE_H264_MASK + | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK + | UVD_SUVD_CGC_GATE__SIT_H264_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCM_H264_MASK + | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK + | UVD_SUVD_CGC_GATE__SDB_H264_MASK + | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCLR_MASK + | UVD_SUVD_CGC_GATE__UVD_SC_MASK + | UVD_SUVD_CGC_GATE__ENT_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK + | UVD_SUVD_CGC_GATE__SITE_MASK + | UVD_SUVD_CGC_GATE__SRE_VP9_MASK + | UVD_SUVD_CGC_GATE__SCM_VP9_MASK + | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK + | UVD_SUVD_CGC_GATE__SDB_VP9_MASK + | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); + WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data); + + data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); + data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); +} + +/** + * vcn_v2_5_enable_clock_gating - enable VCN clock gating + * + * @adev: amdgpu_device pointer + * @sw: enable SW clock gating + * + * Enable clock gating for VCN block + */ +static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + + /* enable UVD CGC */ + data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); + data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK); + WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); + data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); +} + +static int vcn_v2_5_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->vcn.ring_dec; + uint32_t rb_bufsz, tmp; + int i, j, r; + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* set uvd status busy */ + tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); + + /*SW clock gating */ + vcn_v2_5_disable_clock_gating(adev); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* setup mmUVD_LMI_CTRL */ + tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL); + tmp &= ~0xff; + WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp | 0x8| + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + /* setup mmUVD_MPC_CNTL */ + tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp); + + /* setup UVD_MPC_SET_MUXA0 */ + WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + + /* setup UVD_MPC_SET_MUXB0 */ + WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + + /* setup mmUVD_MPC_SET_MUX */ + WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + + vcn_v2_5_mc_resume(adev); + + /* VCN global tiling registers */ + WREG32_SOC15(UVD, 0, mmUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + for (i = 0; i < 10; ++i) { + uint32_t status; + + for (j = 0; j < 100; ++j) { + status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); + if (status & 2) + break; + if (amdgpu_emu_mode == 1) + msleep(500); + else + mdelay(10); + } + r = 0; + if (status & 2) + break; + + DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; + } + + if (r) { + DRM_ERROR("VCN decode not responding, giving up!!!\n"); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0); + + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); + + ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + ring = &adev->vcn.ring_enc[0]; + WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vcn.ring_enc[1]; + WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + + return r; +} + +static int vcn_v2_5_stop(struct amdgpu_device *adev) +{ + uint32_t tmp; + int r; + + /* wait for vcn idle */ + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); + if (r) + return r; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); + if (r) + return r; + + /* block LMI UMC channel */ + tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp); + + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); + if (r) + return r; + + /* block VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* reset VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* clear status */ + WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0); + + vcn_v2_5_enable_clock_gating(adev); + + /* enable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + return 0; +} + static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -328,13 +768,38 @@ static int vcn_v2_5_wait_for_idle(void *handle) static int vcn_v2_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + if (vcn_v2_5_is_idle(handle)) + return -EBUSY; + vcn_v2_5_enable_clock_gating(adev); + } else { + vcn_v2_5_disable_clock_gating(adev); + } + return 0; } static int vcn_v2_5_set_powergating_state(void *handle, enum amd_powergating_state state) { - return 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if(state == adev->vcn.cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = vcn_v2_5_stop(adev); + else + ret = vcn_v2_5_start(adev); + + if(!ret) + adev->vcn.cur_state = state; + + return ret; } static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev, -- cgit v1.2.3 From a7c0e4019f7473055713fa011079dd19a8a16d5f Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 9 Jul 2019 10:09:06 -0500 Subject: drm/amdgpu: add Arcturus to the VCN family including firmware support etc. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 3e1a360dee54..a119a7df0305 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -46,12 +46,14 @@ #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" #define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin" #define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin" +#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); MODULE_FIRMWARE(FIRMWARE_RAVEN2); +MODULE_FIRMWARE(FIRMWARE_ARCTURUS); MODULE_FIRMWARE(FIRMWARE_NAVI10); MODULE_FIRMWARE(FIRMWARE_NAVI14); @@ -76,6 +78,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) else fw_name = FIRMWARE_RAVEN; break; + case CHIP_ARCTURUS: + fw_name = FIRMWARE_ARCTURUS; + break; case CHIP_NAVI10: fw_name = FIRMWARE_NAVI10; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && -- cgit v1.2.3 From 185a579700ec9c24e0a10e49db431c3eca9aba72 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 16 Apr 2019 11:17:46 -0400 Subject: drm/amdgpu/VCN2.5: set decode ring functions Also reuse most of the VCN2.0 decode ring functions Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 78 +++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index b6e72fff94f5..f16a4f682e26 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -747,6 +747,84 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) return 0; } +/** + * vcn_v2_5_dec_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); +} + +/** + * vcn_v2_5_dec_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); +} + +/** + * vcn_v2_5_dec_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); +} + +static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_DEC, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = vcn_v2_5_dec_ring_get_rptr, + .get_wptr = vcn_v2_5_dec_ring_get_wptr, + .set_wptr = vcn_v2_5_dec_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ + 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ + 6, + .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ + .emit_ib = vcn_v2_0_dec_ring_emit_ib, + .emit_fence = vcn_v2_0_dec_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ib = amdgpu_vcn_dec_ring_test_ib, + .insert_nop = vcn_v2_0_dec_ring_insert_nop, + .insert_start = vcn_v2_0_dec_ring_insert_start, + .insert_end = vcn_v2_0_dec_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) +{ + adev->vcn.ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; + DRM_INFO("VCN decode is enabled in VM mode\n"); +} + static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- cgit v1.2.3 From a4767886e5cb687ca04f5f1aaf8cbf42b3f88404 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 22 Apr 2019 12:17:38 -0400 Subject: drm/amdgpu/VCN2.5: set encode ring functions Also reuse most of the VCN2.0 encode ring functions Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 91 +++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index f16a4f682e26..b42f6769ae06 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -819,12 +819,103 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; +/** + * vcn_v2_5_enc_ring_get_rptr - get enc read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware enc read pointer + */ +static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vcn.ring_enc[0]) + return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); + else + return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); +} + +/** + * vcn_v2_5_enc_ring_get_wptr - get enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware enc write pointer + */ +static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vcn.ring_enc[0]) + return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + else + return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); +} + +/** + * vcn_v2_5_enc_ring_set_wptr - set enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the enc write pointer to the hardware + */ +static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring == &adev->vcn.ring_enc[0]) + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + else + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); +} + +static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, + .align_mask = 0x3f, + .nop = VCN_ENC_CMD_NO_OP, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = vcn_v2_5_enc_ring_get_rptr, + .get_wptr = vcn_v2_5_enc_ring_get_wptr, + .set_wptr = vcn_v2_5_enc_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_enc_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v2_0_enc_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; DRM_INFO("VCN decode is enabled in VM mode\n"); } +static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + adev->vcn.ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; + + DRM_INFO("VCN encode is enabled in VM mode\n"); +} + static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- cgit v1.2.3 From e87d5a7a23c7020cf0770726ae34e50ac66f7ced Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 16 Apr 2019 11:32:22 -0400 Subject: drm/amdgpu: add JPEG2.5 HW start and stop JPEG engine initialization and suspend sequences Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 104 ++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index b42f6769ae06..82c9c40e9ae4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -530,6 +530,104 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); } +/** + * jpeg_v2_5_start - start JPEG block + * + * @adev: amdgpu_device pointer + * + * Setup and start the JPEG block + */ +static int jpeg_v2_5_start(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->vcn.ring_jpeg; + uint32_t tmp; + + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* JPEG disable CGC */ + tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); + tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); + + tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); + tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); + + tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); + tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK + | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK + | JPEG_CGC_CTRL__JMCIF_MODE_MASK + | JPEG_CGC_CTRL__JRBBM_MODE_MASK); + WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); + + /* MJPEG global tiling registers */ + WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); + + return 0; +} + +/** + * jpeg_v2_5_stop - stop JPEG block + * + * @adev: amdgpu_device pointer + * + * stop the JPEG block + */ +static int jpeg_v2_5_stop(struct amdgpu_device *adev) +{ + uint32_t tmp; + + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); + tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + return 0; +} + static int vcn_v2_5_start(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->vcn.ring_dec; @@ -688,6 +786,8 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + r = jpeg_v2_5_start(adev); + return r; } @@ -696,6 +796,10 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) uint32_t tmp; int r; + r = jpeg_v2_5_stop(adev); + if (r) + return r; + /* wait for vcn idle */ SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); if (r) -- cgit v1.2.3 From 8001073708108e27112d3c63cedc40b9f66c3bac Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Mon, 22 Apr 2019 12:21:16 -0400 Subject: drm/amdgpu/VCN2.5: set JPEG decode ring functions Also reuse most of the JPEG2.0 decode ring functions Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 78 +++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 82c9c40e9ae4..0ffc0d60fad8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -1004,6 +1004,78 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; +/** + * vcn_v2_5_jpeg_ring_get_rptr - get read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware read pointer + */ +static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); +} + +/** + * vcn_v2_5_jpeg_ring_get_wptr - get write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware write pointer + */ +static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); +} + +/** + * vcn_v2_5_jpeg_ring_set_wptr - set write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the write pointer to the hardware + */ +static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); +} + +static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_JPEG, + .align_mask = 0xf, + .vmhub = AMDGPU_MMHUB_1, + .get_rptr = vcn_v2_5_jpeg_ring_get_rptr, + .get_wptr = vcn_v2_5_jpeg_ring_get_wptr, + .set_wptr = vcn_v2_5_jpeg_ring_set_wptr, + .emit_frame_size = + SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + + 8 + /* vcn_v2_0_jpeg_ring_emit_vm_flush */ + 18 + 18 + /* vcn_v2_0_jpeg_ring_emit_fence x2 vm fence */ + 8 + 16, + .emit_ib_size = 22, /* vcn_v2_0_jpeg_ring_emit_ib */ + .emit_ib = vcn_v2_0_jpeg_ring_emit_ib, + .emit_fence = vcn_v2_0_jpeg_ring_emit_fence, + .emit_vm_flush = vcn_v2_0_jpeg_ring_emit_vm_flush, + .test_ring = amdgpu_vcn_jpeg_ring_test_ring, + .test_ib = amdgpu_vcn_jpeg_ring_test_ib, + .insert_nop = vcn_v2_0_jpeg_ring_nop, + .insert_start = vcn_v2_0_jpeg_ring_insert_start, + .insert_end = vcn_v2_0_jpeg_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v2_0_jpeg_ring_emit_wreg, + .emit_reg_wait = vcn_v2_0_jpeg_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) { adev->vcn.ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; @@ -1020,6 +1092,12 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) DRM_INFO("VCN encode is enabled in VM mode\n"); } +static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) +{ + adev->vcn.ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; + DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); +} + static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; -- cgit v1.2.3 From 08249a3a322b8950db31996cc5b00bcd292e91f4 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 16 Apr 2019 11:42:56 -0400 Subject: drm/amdgpu: enable VCN2.5 on Arcturus VCN is the video decode and encode engine on Arcturus Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++ drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 854cf0fd1791..232adf83a7bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -63,6 +63,7 @@ #include "uvd_v7_0.h" #include "vce_v4_0.h" #include "vcn_v1_0.h" +#include "vcn_v2_5.h" #include "dce_virtual.h" #include "mxgpu_ai.h" #include "amdgpu_smu.h" @@ -684,6 +685,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h new file mode 100644 index 000000000000..8d9c0800b8e0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __VCN_V2_5_H__ +#define __VCN_V2_5_H__ + +extern const struct amdgpu_ip_block_version vcn_v2_5_ip_block; + +#endif /* __VCN_V2_5_H__ */ -- cgit v1.2.3 From 39a5053fb2231d26f296734390bae7d7d36a9130 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Tue, 9 Jul 2019 10:18:36 -0500 Subject: drm/amdgpu: add vcn doorbell range function to nbio7.4 (v2) To setup the aperture for VCN2.5 v2: setup vcn doorbells in vcn2.5 hw_init (Alex) Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 21 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 3 +++ 2 files changed, 24 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index fc45eaeaba6e..d8c9972a315b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -114,6 +114,26 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan WREG32(reg, doorbell_range); } +static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, + int doorbell_index) +{ + u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); + + u32 doorbell_range = RREG32(reg); + + if (use_doorbell) { + doorbell_range = REG_SET_FIELD(doorbell_range, + BIF_MMSCH0_DOORBELL_RANGE, OFFSET, + doorbell_index); + doorbell_range = REG_SET_FIELD(doorbell_range, + BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8); + } else + doorbell_range = REG_SET_FIELD(doorbell_range, + BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0); + + WREG32(reg, doorbell_range); +} + static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable) { @@ -292,6 +312,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .hdp_flush = nbio_v7_4_hdp_flush, .get_memsize = nbio_v7_4_get_memsize, .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range, + .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range, .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture, .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture, .ih_doorbell_range = nbio_v7_4_ih_doorbell_range, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 0ffc0d60fad8..f9d6819f0260 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -205,6 +205,9 @@ static int vcn_v2_5_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.ring_dec; int i, r; + adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ring->doorbell_index); + r = amdgpu_ring_test_ring(ring); if (r) { ring->sched.ready = false; -- cgit v1.2.3 From 530e30fc32d3834288980777825a8ec0303fbccc Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 26 Apr 2019 13:46:21 -0400 Subject: drm/amdgpu: enable the Doorbell support for VCN2.5 Including decode, encode, and JPEG decode rings Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 64 ++++++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 4 ++ 2 files changed, 56 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index f9d6819f0260..840737df19c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -128,6 +128,8 @@ static int vcn_v2_5_sw_init(void *handle) return r; ring = &adev->vcn.ring_dec; + ring->use_doorbell = true; + ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; sprintf(ring->name, "vcn_dec"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); if (r) @@ -153,6 +155,8 @@ static int vcn_v2_5_sw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.ring_enc[i]; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; sprintf(ring->name, "vcn_enc%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); if (r) @@ -160,6 +164,8 @@ static int vcn_v2_5_sw_init(void *handle) } ring = &adev->vcn.ring_jpeg; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; sprintf(ring->name, "vcn_jpeg"); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); if (r) @@ -879,7 +885,10 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); } /** @@ -893,7 +902,12 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + } } static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { @@ -954,10 +968,17 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); - else - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + if (ring == &adev->vcn.ring_enc[0]) { + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + } else { + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + } } /** @@ -971,10 +992,21 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - else - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + if (ring == &adev->vcn.ring_enc[0]) { + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + } + } else { + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + } + } } static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { @@ -1032,7 +1064,10 @@ static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); + if (ring->use_doorbell) + return adev->wb.wb[ring->wptr_offs]; + else + return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); } /** @@ -1046,7 +1081,12 @@ static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + if (ring->use_doorbell) { + adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + } } static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 0db84386252a..79223188bd47 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -85,6 +85,10 @@ void vega20_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3; adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5; adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7; + adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCN0_1; + adev->doorbell_index.vcn.vcn_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCN2_3; + adev->doorbell_index.vcn.vcn_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCN4_5; + adev->doorbell_index.vcn.vcn_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCN6_7; adev->doorbell_index.first_non_cp = AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP; adev->doorbell_index.last_non_cp = AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP; -- cgit v1.2.3 From 6fba59060c6c87cdb80cd2b3de0b16fc09ba4b03 Mon Sep 17 00:00:00 2001 From: Chengming Gui Date: Tue, 9 Jul 2019 10:52:20 -0500 Subject: drm/amdgpu/powerplay: add arcturus ppt functions add arcturus_ppsmc.h arcturus_ppt.c and arcturus_ppt.h files. This is the initial power management support for Arcturus. Signed-off-by: Chengming Gui Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/Makefile | 2 +- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 124 +++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/arcturus_ppt.h | 28 +++++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 10 ++ drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h | 120 ++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 4 + 6 files changed, 287 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c create mode 100644 drivers/gpu/drm/amd/powerplay/arcturus_ppt.h create mode 100644 drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile index 727c5cff231c..e05a7e3d6d8d 100644 --- a/drivers/gpu/drm/amd/powerplay/Makefile +++ b/drivers/gpu/drm/amd/powerplay/Makefile @@ -35,7 +35,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$( include $(AMD_POWERPLAY) -POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o navi10_ppt.o +POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o arcturus_ppt.o navi10_ppt.o AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c new file mode 100644 index 000000000000..534e450df4bb --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -0,0 +1,124 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "pp_debug.h" +#include +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "smu_v11_0.h" +#include "smu11_driver_if_arcturus.h" +#include "soc15_common.h" +#include "atom.h" +#include "power_state.h" +#include "arcturus_ppt.h" +#include "arcturus_ppsmc.h" +#include "nbio/nbio_7_4_sh_mask.h" + +#define MSG_MAP(msg, index) \ + [SMU_MSG_##msg] = index + +static int arcturus_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion), + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow), + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures), + MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow), + MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh), + MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow), + MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable), + MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable), + MSG_MAP(SetSystemVirtualDramAddrHigh, PPSMC_MSG_SetSystemVirtualDramAddrHigh), + MSG_MAP(SetSystemVirtualDramAddrLow, PPSMC_MSG_SetSystemVirtualDramAddrLow), + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco), + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask), + MSG_MAP(SetDfSwitchType, PPSMC_MSG_SetDfSwitchType), + MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm), + MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit), + MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0), + MSG_MAP(PowerDownVcn01, PPSMC_MSG_PowerDownVcn01), + MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1), + MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload), + MSG_MAP(PrepareMp1ForReset, PPSMC_MSG_PrepareMp1ForReset), + MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown), + MSG_MAP(SoftReset, PPSMC_MSG_SoftReset), + MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc), + MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc), + MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize), + MSG_MAP(GetDebugData, PPSMC_MSG_GetDebugData), + MSG_MAP(WaflTest, PPSMC_MSG_WaflTest), + MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode), + MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable), +}; + +static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index) +{ + int val; + + if (index >= SMU_MSG_MAX_COUNT) + return -EINVAL; + + val = arcturus_message_map[index]; + if (val > PPSMC_Message_Count) + return -EINVAL; + + return val; +} + +static const struct pptable_funcs arcturus_ppt_funcs = { + .get_smu_msg_index = arcturus_get_smu_msg_index, +}; + +void arcturus_set_ppt_funcs(struct smu_context *smu) +{ + smu->ppt_funcs = &arcturus_ppt_funcs; + smu->smc_if_version = SMU11_DRIVER_IF_VERSION; +} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h new file mode 100644 index 000000000000..7b808d091b31 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h @@ -0,0 +1,28 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __ARCTURUS_PPT_H__ +#define __ARCTURUS_PPT_H__ + +extern void arcturus_set_ppt_funcs(struct smu_context *smu); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 1af992fb0bde..d940e7bef534 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -242,6 +242,16 @@ enum smu_message_type SMU_MSG_PowerDownJpeg, SMU_MSG_BacoAudioD3PME, SMU_MSG_ArmD3, + SMU_MSG_RunGfxDcBtc, + SMU_MSG_RunSocDcBtc, + SMU_MSG_SetMemoryChannelEnable, + SMU_MSG_SetDfSwitchType, + SMU_MSG_GetVoltageByDpm, + SMU_MSG_GetVoltageByDpmOverdrive, + SMU_MSG_PowerUpVcn0, + SMU_MSG_PowerDownVcn01, + SMU_MSG_PowerUpVcn1, + SMU_MSG_PowerDownVcn1, SMU_MSG_MAX_COUNT, }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h new file mode 100644 index 000000000000..b86bb2bc8a31 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h @@ -0,0 +1,120 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef ARCTURUS_PP_SMC_H +#define ARCTURUS_PP_SMC_H + +#pragma pack(push, 1) + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 +#define PPSMC_MSG_UseBackupPPTable 0x15 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x18 +#define PPSMC_MSG_ExitBaco 0x19 +#define PPSMC_MSG_ArmD3 0x1A + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x1B +#define PPSMC_MSG_SetSoftMaxByFreq 0x1C +#define PPSMC_MSG_SetHardMinByFreq 0x1D +#define PPSMC_MSG_SetHardMaxByFreq 0x1E +#define PPSMC_MSG_GetMinDpmFreq 0x1F +#define PPSMC_MSG_GetMaxDpmFreq 0x20 +#define PPSMC_MSG_GetDpmFreqByIndex 0x21 + +#define PPSMC_MSG_SetWorkloadMask 0x22 +#define PPSMC_MSG_SetDfSwitchType 0x23 +#define PPSMC_MSG_GetVoltageByDpm 0x24 +#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x25 + +#define PPSMC_MSG_SetPptLimit 0x26 +#define PPSMC_MSG_GetPptLimit 0x27 + +//Power Gating +#define PPSMC_MSG_PowerUpVcn0 0x28 +#define PPSMC_MSG_PowerDownVcn01 0x29 +#define PPSMC_MSG_PowerUpVcn1 0x2A +#define PPSMC_MSG_PowerDownVcn1 0x2B + +//Resets and reload +#define PPSMC_MSG_PrepareMp1ForUnload 0x2C +#define PPSMC_MSG_PrepareMp1ForReset 0x2D +#define PPSMC_MSG_PrepareMp1ForShutdown 0x2E +#define PPSMC_MSG_SoftReset 0x2F + +//BTC +#define PPSMC_MSG_RunAfllBtc 0x30 +#define PPSMC_MSG_RunGfxDcBtc 0x31 +#define PPSMC_MSG_RunSocDcBtc 0x32 + +//Debug +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x34 +#define PPSMC_MSG_DramLogSetDramSize 0x35 +#define PPSMC_MSG_GetDebugData 0x36 + +//WAFL and XGMI +#define PPSMC_MSG_WaflTest 0x37 +#define PPSMC_MSG_SetXgmiMode 0x38 + +//Others +#define PPSMC_MSG_SetMemoryChannelEnable 0x39 + +#define PPSMC_Message_Count 0x3A + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_Msg; +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 1315958e5d81..e8aedd762b80 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -33,6 +33,7 @@ #include "soc15_common.h" #include "atom.h" #include "vega20_ppt.h" +#include "arcturus_ppt.h" #include "navi10_ppt.h" #include "asic_reg/thm/thm_11_0_2_offset.h" @@ -1803,6 +1804,9 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu) case CHIP_VEGA20: vega20_set_ppt_funcs(smu); break; + case CHIP_ARCTURUS: + arcturus_set_ppt_funcs(smu); + break; case CHIP_NAVI10: case CHIP_NAVI14: navi10_set_ppt_funcs(smu); -- cgit v1.2.3 From 1fb8cfc2a3a5fa499bce091ad7fb0a7082394183 Mon Sep 17 00:00:00 2001 From: Chengming Gui Date: Mon, 27 May 2019 19:01:39 +0800 Subject: drm/amdgpu/powerplay: add smu11 driver interface for arcturus. (v2) add smu11_driver_if_arcturus.h file. v2: add license, fix header guard (Alex) Signed-off-by: Chengming Gui Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- .../amd/powerplay/inc/smu11_driver_if_arcturus.h | 878 +++++++++++++++++++++ 1 file changed, 878 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h new file mode 100644 index 000000000000..7a9969e075d4 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h @@ -0,0 +1,878 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + + +#ifndef SMU11_DRIVER_IF_ARCTURUS_H +#define SMU11_DRIVER_IF_ARCTURUS_H + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +#define SMU11_DRIVER_IF_VERSION 0x06 + +#define PPTABLE_ARCTURUS_SMU_VERSION 3 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_XGMI_LEVELS 2 + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) +#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1) + +// Feature Control Defines +// DPM +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_MP0CLK_BIT 5 +#define FEATURE_DPM_XGMI_BIT 6 +// Idle +#define FEATURE_DS_GFXCLK_BIT 7 +#define FEATURE_DS_SOCCLK_BIT 8 +#define FEATURE_DS_LCLK_BIT 9 +#define FEATURE_DS_FCLK_BIT 10 +#define FEATURE_DS_UCLK_BIT 11 +#define FEATURE_GFX_ULV_BIT 12 +#define FEATURE_DPM_VCN_BIT 13 +#define FEATURE_RSMU_SMN_CG_BIT 14 +#define FEATURE_WAFL_CG_BIT 15 +// Throttler/Response +#define FEATURE_PPT_BIT 16 +#define FEATURE_TDC_BIT 17 +#define FEATURE_APCC_PLUS_BIT 18 +#define FEATURE_VR0HOT_BIT 19 +#define FEATURE_VR1HOT_BIT 20 +#define FEATURE_FW_CTF_BIT 21 +#define FEATURE_FAN_CONTROL_BIT 22 +#define FEATURE_THERMAL_BIT 23 +// Other +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24 +#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25 + +#define FEATURE_SPARE_26_BIT 26 +#define FEATURE_SPARE_27_BIT 27 +#define FEATURE_SPARE_28_BIT 28 +#define FEATURE_SPARE_29_BIT 29 +#define FEATURE_SPARE_30_BIT 30 +#define FEATURE_SPARE_31_BIT 31 +#define FEATURE_SPARE_32_BIT 32 +#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_SPARE_34_BIT 34 +#define FEATURE_SPARE_35_BIT 35 +#define FEATURE_SPARE_36_BIT 36 +#define FEATURE_SPARE_37_BIT 37 +#define FEATURE_SPARE_38_BIT 38 +#define FEATURE_SPARE_39_BIT 39 +#define FEATURE_SPARE_40_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + + +#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) +#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) +#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) +#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) +#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT ) +#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) +#define FEATURE_DPM_XGMI_MASK (1 << FEATURE_DPM_XGMI_BIT ) + +#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) +#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT ) +#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT ) +#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT ) +#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT ) + +#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) +#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) +#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT ) +#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) +#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) +#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) +#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) +#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) + +#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT ) +#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK ) + + +//FIXME need updating +// Debug Overrides Bitmask +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002 + +// I2C Config Bit Defines +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +// Throttler Control/Status Bits +#define THROTTLER_PADDING_BIT 0 +#define THROTTLER_TEMP_EDGE_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_TEMP_MEM_BIT 3 +#define THROTTLER_TEMP_VR_GFX_BIT 4 +#define THROTTLER_TEMP_VR_MEM_BIT 5 +#define THROTTLER_TEMP_VR_SOC_BIT 6 +#define THROTTLER_TDC_GFX_BIT 7 +#define THROTTLER_TDC_SOC_BIT 8 +#define THROTTLER_PPT0_BIT 9 +#define THROTTLER_PPT1_BIT 10 +#define THROTTLER_PPT2_BIT 11 +#define THROTTLER_PPT3_BIT 12 +#define THROTTLER_PPM_BIT 13 +#define THROTTLER_FIT_BIT 14 +#define THROTTLER_APCC_BIT 15 + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 1 +#define WORKLOAD_PPLIB_VIDEO_BIT 2 +#define WORKLOAD_PPLIB_COMPUTE_BIT 3 +#define WORKLOAD_PPLIB_CUSTOM_BIT 4 +#define WORKLOAD_PPLIB_COUNT 5 + +//XGMI performance states +#define XGMI_STATE_D0 1 +#define XGMI_STATE_D3 0 + +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 8 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_MEM, + I2C_CONTROLLER_NAME_SPARE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_MEM, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_0, + I2C_CONTROLLER_PROTOCOL_VR_1, + I2C_CONTROLLER_PROTOCOL_TMP_0, + I2C_CONTROLLER_PROTOCOL_TMP_1, + I2C_CONTROLLER_PROTOCOL_SPARE_0, + I2C_CONTROLLER_PROTOCOL_SPARE_1, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t Padding[2]; + uint32_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) + +typedef struct { + uint8_t RegisterAddr; ////only valid for write, ignored for read + uint8_t Cmd; //Read(0) or Write(1) + uint8_t Data; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Slow(0) or Fast(1) + uint16_t SlaveAddress; + uint8_t NumCmds; //Number of commands + uint8_t Padding[3]; + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; + + uint32_t MmHubPadding[8]; // SMU internal use + +} SwI2cRequest_t; // SW I2C Request Table + +//D3HOT sequences +//sequence codes from spec: atlvp4p01.amd.com:1677@//gpu/doc/soc_arch/spec/feature/BACO/Navi/Navi2x/ +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +}D3HOTSequence_e; + +//THis is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + GFXCLK_SOURCE_PLL = 0, + GFXCLK_SOURCE_AFLL, + GFXCLK_SOURCE_COUNT, +} GfxclkSrc_e; + +typedef enum { + PPCLK_GFXCLK, + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_SOC, + TEMP_VR_MEM, + TEMP_COUNT +} TEMP_TYPE_e; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + VOLTAGE_MODE_AVFS = 0, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + MEMORY_TYPE_GDDR6 = 0, + MEMORY_TYPE_HBM, +} MemoryType_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, +} PwrConfig_e; + +typedef enum { + XGMI_LINK_RATE_12 = 0, // 12Gbps + XGMI_LINK_RATE_16, // 16Gbps + XGMI_LINK_RATE_22, // 22Gbps + XGMI_LINK_RATE_25, // 25Gbps + XGMI_LINK_RATE_COUNT +} XGMI_LINK_RATE_e; + +typedef enum { + XGMI_LINK_WIDTH_2 = 0, // x2 + XGMI_LINK_WIDTH_4, // x4 + XGMI_LINK_WIDTH_8, // x8 + XGMI_LINK_WIDTH_16, // x16 + XGMI_LINK_WIDTH_COUNT +} XGMI_LINK_WIDTH_e; + +typedef struct { + uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t padding; + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) + uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin + uint16_t Padding16; +} DpmDescriptor_t; + +typedef struct { + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[2]; + + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; + uint16_t TdcLimitSoc; // Amps + uint16_t TdcLimitSocTau; // Time constant of LPF in ms + uint16_t TdcLimitGfx; // Amps + uint16_t TdcLimitGfxTau; // Time constant of LPF in ms + + uint16_t TedgeLimit; // Celcius + uint16_t ThotspotLimit; // Celcius + uint16_t TmemLimit; // Celcius + uint16_t Tvr_gfxLimit; // Celcius + uint16_t Tvr_memLimit; // Celcius + uint16_t Tvr_socLimit; // Celcius + uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) + + uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold + uint16_t PpmTemperatureThreshold; + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See Throtter masks defines + + // SECTION: ULV Settings + uint16_t UlvVoltageOffsetGfx; // In mV(Q2) + uint16_t UlvPadding; // Padding + + uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV + uint8_t Padding234[3]; + + // SECTION: Voltage Control Parameters + uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX + uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits + uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint32_t Paddingclks[16]; + + // SECTION: DPM Config 2 + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) + + // GFXCLK DPM + uint16_t GfxclkFidle; // In MHz + uint16_t GfxclkSlewRate; // for PLL babystepping??? + uint8_t Padding567[4]; + uint16_t GfxclkDsMaxFreq; // In MHz + uint8_t GfxclkSource; // 0 = PLL, 1 = AFLL + uint8_t Padding456; + + // GFXCLK Thermal DPM (formerly 'Boost' Settings) + uint16_t EnableTdpm; + uint16_t TdpmHighHystTemperature; + uint16_t TdpmLowHystTemperature; + uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability. + + // SECTION: Fan Control + uint16_t FanStopTemp; //Celcius + uint16_t FanStartTemp; //Celcius + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainVrGfx; + uint16_t FanGainVrSoc; + uint16_t FanGainVrMem; + uint16_t FanGainHbm; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + uint8_t FanTempInputSelect; + uint8_t padding8_Fan; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + + // SECTION: AVFS + // Overrides + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_Avfs[2]; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve + DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb + DroopInt_t dBtcGbGfxAfll; // GHz->V BtcGb + DroopInt_t dBtcGbSoc; // GHz->V BtcGb + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint16_t SsFmin[10]; // PPtable value to function similar to VFTFmin for SS Curve; Size is PPCLK_COUNT rounded to nearest multiple of 2 + + // SECTION: XGMI + uint8_t XgmiLinkSpeed [NUM_XGMI_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_LEVELS]; + + uint16_t XgmiFclkFreq [NUM_XGMI_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_LEVELS]; + + // Temperature Dependent Vmin + uint16_t VDDGFX_TVmin; //Celcius + uint16_t VDDSOC_TVmin; //Celcius + uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 + uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 + uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 + uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 + + uint16_t VDDGFX_TVminHystersis; // Celcius + uint16_t VDDSOC_TVminHystersis; // Celcius + + + // SECTION: Advanced Options + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode + uint16_t PaddingUlv; // Padding + + // Total Power configuration, use defines from PwrConfig_e + uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured + uint8_t TotalPowerSpare1; + uint16_t TotalPowerSpare2; + + // APCC Settings + uint16_t PccThresholdLow; + uint16_t PccThresholdHigh; + uint32_t PaddingAPCC[6]; //FIXME pending SPEC + + // SECTION: Reserved + uint32_t Reserved[11]; + + // SECTION: BOARD PARAMETERS + + // SVI2 Board Parameters + uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMemVrMapping; // Use VR_MAPPING* bitfields + uint8_t BoardVrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN) + uint8_t Padding8_V[2]; + + // Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t MemMaxCurrent; // in Amps + int8_t MemOffset; // in Amps + uint8_t Padding_TelemetryMem; + + uint16_t BoardMaxCurrent; // in Amps + int8_t BoardOffset; // in Amps + uint8_t Padding_TelemetryBoardInput; + + // GPIO Settings + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadEnabled; // on or off + uint8_t UclkSpreadPercent; // Q4.4 + uint16_t UclkSpreadFreq; // kHz + + // FCLK Spread Spectrum + uint8_t FclkSpreadEnabled; // on or off + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // GFXCLK Fll Spread Spectrum + uint8_t FllGfxclkSpreadEnabled; // on or off + uint8_t FllGfxclkSpreadPercent; // Q4.4 + uint16_t FllGfxclkSpreadFreq; // kHz + + // I2C Controller Structure + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + // Memory section + uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. + + uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines + uint8_t PaddingMem[3]; + + // Total board power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPadding; + + uint32_t BoardReserved[10]; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; // SMU internal use + +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + + uint16_t Padding; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfig_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t CurrSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureHBM ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureVrMem ; + uint32_t ThrottlerStatus ; + + // Padding - ignore + uint32_t MmHubPadding[7]; // SMU internal use +} SmuMetrics_t; + + +typedef struct { + uint16_t avgPsmCount[45]; + uint16_t minPsmCount[45]; + float avgPsmVoltage[45]; + float minPsmVoltage[45]; + + uint16_t avgScsPsmCount; + uint16_t minScsPsmCount; + float avgScsPsmVoltage; + float minScsPsmVoltage; + + uint32_t MmHubPadding[6]; // SMU internal use +} AvfsDebugTable_t; + +typedef struct { + uint8_t AvfsVersion; + uint8_t Padding; + uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; + + uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits + + + int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units + + uint32_t EnabledAvfsModules; + + uint32_t MmHubPadding[7]; // SMU internal use +} AvfsFuseOverride_t; + +/* NOT CURRENTLY USED +typedef struct { + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint8_t Gfx_UseRlcBusy; + uint8_t PaddingGfx[3]; + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint8_t Mem_UseRlcBusy; + uint8_t PaddingMem[3]; + uint16_t Mem_MinActiveFreq; // MHz + uint16_t Mem_BoosterFreq; // MHz + uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Mem_PD_Data_limit_a; // Q16 + uint32_t Mem_PD_Data_limit_b; // Q16 + uint32_t Mem_PD_Data_limit_c; // Q16 + uint32_t Mem_PD_Data_error_coeff; // Q16 + uint32_t Mem_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit; // Q16 + uint8_t Mem_UpHystLimit; + uint8_t Mem_DownHystLimit; + uint16_t Mem_Fps; + + uint32_t MmHubPadding[8]; // SMU internal use +} DpmActivityMonitorCoeffInt_t; +*/ + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram +#define TABLE_PPTABLE 0 +#define TABLE_AVFS 1 +#define TABLE_AVFS_PSM_DEBUG 2 +#define TABLE_AVFS_FUSE_OVERRIDE 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +//#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 7 +#define TABLE_WAFL_XGMI_TOPOLOGY 8 +#define TABLE_COUNT 9 + +// These defines are used with the SMC_MSG_SetUclkFastSwitch message. +typedef enum { + DF_SWITCH_TYPE_FAST = 0, + DF_SWITCH_TYPE_SLOW, + DF_SWITCH_TYPE_COUNT, +} DF_SWITCH_TYPE_e; + +typedef enum { + DRAM_BIT_WIDTH_DISABLED = 0, + DRAM_BIT_WIDTH_X_8, + DRAM_BIT_WIDTH_X_16, + DRAM_BIT_WIDTH_X_32, + DRAM_BIT_WIDTH_X_64, // NOT USED. + DRAM_BIT_WIDTH_X_128, + DRAM_BIT_WIDTH_COUNT, +} DRAM_BIT_WIDTH_TYPE_e; + +#define REMOVE_FMAX_MARGIN_BIT 0x0 +#define REMOVE_DCTOL_MARGIN_BIT 0x1 +#define REMOVE_PLATFORM_MARGIN_BIT 0x2 + +#endif -- cgit v1.2.3 From e7773c1c1133c1fabeaa5442e03d3cef22431392 Mon Sep 17 00:00:00 2001 From: Chengming Gui Date: Tue, 9 Jul 2019 11:04:17 -0500 Subject: drm/amd/powerplay: get smc firmware and pptable get smc firmware and pptable for arcturus Signed-off-by: Chengming Gui Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index e8aedd762b80..3505b92a6540 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -46,6 +46,7 @@ #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/vega20_smc.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); @@ -156,6 +157,9 @@ static int smu_v11_0_init_microcode(struct smu_context *smu) case CHIP_VEGA20: chip_name = "vega20"; break; + case CHIP_ARCTURUS: + chip_name = "arcturus"; + break; case CHIP_NAVI10: chip_name = "navi10"; break; @@ -207,7 +211,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu) uint32_t i; uint32_t mp1_fw_flags; - hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; src = (const uint32_t *)(adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); @@ -300,7 +304,8 @@ static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uin return 0; } -static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, uint32_t *size, uint32_t pptable_id) +static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, + uint32_t *size, uint32_t pptable_id) { struct amdgpu_device *adev = smu->adev; const struct smc_firmware_header_v2_1 *v2_1; -- cgit v1.2.3 From 75b2fce2d89e7025a88d39efc194413f9a3386ea Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 4 Jun 2019 14:58:49 +0800 Subject: drm/amdgpu: skip get/update xgmi topology info when no psp exists We don't currently have psp support for arcturus so provide a alternative mechanism in the meantime. Signed-off-by: Le Ma Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 47 +++++++++++++++++--------------- 1 file changed, 25 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index b024070ad1cf..41e648d56eca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -296,7 +296,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) struct amdgpu_xgmi *entry; struct amdgpu_device *tmp_adev = NULL; - int count = 0, ret = -EINVAL; + int count = 0, ret = 0; if (!adev->gmc.xgmi.supported) return 0; @@ -337,29 +337,32 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) top_info->num_nodes = count; hive->number_devices = count; - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - /* update node list for other device in the hive */ - if (tmp_adev != adev) { - top_info = &tmp_adev->psp.xgmi_context.top_info; - top_info->nodes[count - 1].node_id = adev->gmc.xgmi.node_id; - top_info->num_nodes = count; + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + /* update node list for other device in the hive */ + if (tmp_adev != adev) { + top_info = &tmp_adev->psp.xgmi_context.top_info; + top_info->nodes[count - 1].node_id = + adev->gmc.xgmi.node_id; + top_info->num_nodes = count; + } + ret = amdgpu_xgmi_update_topology(hive, tmp_adev); + if (ret) + goto exit; } - ret = amdgpu_xgmi_update_topology(hive, tmp_adev); - if (ret) - goto exit; - } - /* get latest topology info for each device from psp */ - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { - ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, - &tmp_adev->psp.xgmi_context.top_info); - if (ret) { - dev_err(tmp_adev->dev, - "XGMI: Get topology failure on device %llx, hive %llx, ret %d", - tmp_adev->gmc.xgmi.node_id, - tmp_adev->gmc.xgmi.hive_id, ret); - /* To do : continue with some node failed or disable the whole hive */ - goto exit; + /* get latest topology info for each device from psp */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, + &tmp_adev->psp.xgmi_context.top_info); + if (ret) { + dev_err(tmp_adev->dev, + "XGMI: Get topology failure on device %llx, hive %llx, ret %d", + tmp_adev->gmc.xgmi.node_id, + tmp_adev->gmc.xgmi.hive_id, ret); + /* To do : continue with some node failed or disable the whole hive */ + goto exit; + } } } -- cgit v1.2.3 From 7d0670f44199f8f38227726843952b7eb3e407ab Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 26 Apr 2019 16:36:44 +0800 Subject: drm/amdgpu: set system aperture to cover whole FB region in mmhub v9.4 In XGMI configuration, the FB region covers vram region from peer device, adjust system aperture to cover all of them Signed-off-by: Le Ma Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 6b7cdaadbd70..c0eb8f0a2182 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -114,12 +114,11 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev, WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - min(adev->gmc.vram_start, adev->gmc.agp_start) - >> 18); + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); WREG32_SOC15_OFFSET(MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, - max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); /* Set default page address. */ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + -- cgit v1.2.3 From 5fb7c66508223d62238b5c3574f0ca36effa52af Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 20 May 2019 17:04:05 +0800 Subject: drm/amdgpu: correct ip for mmHDP_READ_CACHE_INVALIDATE register access Use the proper IP index for HDP registers. Signed-off-by: Le Ma Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 232adf83a7bf..a076001b326d 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -703,7 +703,7 @@ static void soc15_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1); + WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); else amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); -- cgit v1.2.3 From b6689cf7b9cd2600ebd6981e19fb5f697819a60b Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Wed, 5 Jun 2019 15:45:26 -0500 Subject: drm/amdkfd: Set number of xgmi optimized SDMA engines for arcturus some sdma engines are optimized for xgmi on arcturus. Signed-off-by: Oak Zeng Reviewed-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 511bc2523f2f..cc22289c92d1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -329,7 +329,8 @@ static const struct kfd_device_info arcturus_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 8, + .num_sdma_engines = 2, + .num_xgmi_sdma_engines = 6, .num_sdma_queues_per_engine = 8, }; -- cgit v1.2.3 From 8a6fcd35326e95dea685fbccce9ed20172080484 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 4 Jun 2019 14:41:48 -0400 Subject: drm/amdgpu/: add clientID for 2nd vcn instance add clientID for 2nd vcn instance, remove unused SOC15_IH_CLIENTID_SYSHUB. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/soc15_ih_clientid.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h index 0f386b2e1f4f..1794ad1fc4fc 100644 --- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h +++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h @@ -42,7 +42,6 @@ enum soc15_ih_clientid { SOC15_IH_CLIENTID_SE1SH = 0x0b, SOC15_IH_CLIENTID_SE2SH = 0x0c, SOC15_IH_CLIENTID_SE3SH = 0x0d, - SOC15_IH_CLIENTID_SYSHUB = 0x0e, SOC15_IH_CLIENTID_UVD1 = 0x0e, SOC15_IH_CLIENTID_THM = 0x0f, SOC15_IH_CLIENTID_UVD = 0x10, @@ -64,6 +63,7 @@ enum soc15_ih_clientid { SOC15_IH_CLIENTID_MAX, SOC15_IH_CLIENTID_VCN = SOC15_IH_CLIENTID_UVD, + SOC15_IH_CLIENTID_VCN1 = SOC15_IH_CLIENTID_UVD1, SOC15_IH_CLIENTID_SDMA2 = SOC15_IH_CLIENTID_ACP, SOC15_IH_CLIENTID_SDMA3 = SOC15_IH_CLIENTID_DCE, SOC15_IH_CLIENTID_SDMA4 = SOC15_IH_CLIENTID_ISP, -- cgit v1.2.3 From a07d163c90bf7d649998fb43ac9eb9a01764d662 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 4 Jun 2019 14:44:33 -0400 Subject: drm/amdgpu/: add ucodeID for 2nd vcn instance add ucodeID for 2nd vcn instance Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index c1fb6dc86440..2be106e81eda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -281,6 +281,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_UVD1, AMDGPU_UCODE_ID_VCE, AMDGPU_UCODE_ID_VCN, + AMDGPU_UCODE_ID_VCN1, AMDGPU_UCODE_ID_DMCU_ERAM, AMDGPU_UCODE_ID_DMCU_INTV, AMDGPU_UCODE_ID_VCN0_RAM, -- cgit v1.2.3 From 6da061dca995ea5a784762cd1f9b38b4afd78fc0 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 4 Jun 2019 14:47:10 -0400 Subject: drm/amdgpu/: add doorbell assignment for 2nd vcn instance add doorbell assignment for 2nd vcn instance Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h index 790263dcc064..3fa18003d4d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h @@ -130,13 +130,18 @@ typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT AMDGPU_VEGA20_DOORBELL_IH = 0x178, /* MMSCH: 392~407 * overlap the doorbell assignment with VCN as they are mutually exclusive - * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD + * VCN engine's doorbell is 32 bit and two VCN ring share one QWORD */ - AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ + AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* VNC0 */ AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189, AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A, AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B, + AMDGPU_VEGA20_DOORBELL64_VCN8_9 = 0x18C, /* VNC1 */ + AMDGPU_VEGA20_DOORBELL64_VCNa_b = 0x18D, + AMDGPU_VEGA20_DOORBELL64_VCNc_d = 0x18E, + AMDGPU_VEGA20_DOORBELL64_VCNe_f = 0x18F, + AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188, AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189, AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A, -- cgit v1.2.3 From 8b75a521c05fbb16c944ef30d0503942c219886d Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 9 Jul 2019 11:07:51 -0500 Subject: drm/amdgpu/: increase AMDGPU_MAX_RINGS to add 2nd vcn instance increase AMDGPU_MAX_RINGS to add 2nd vcn instance Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4410c97ac9b7..930316e60155 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -29,7 +29,7 @@ #include /* max number of rings */ -#define AMDGPU_MAX_RINGS 24 +#define AMDGPU_MAX_RINGS 28 #define AMDGPU_MAX_GFX_RINGS 2 #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 -- cgit v1.2.3 From 989b6a0549977faf0b5b8d7e1c2634e880c579a2 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 10 Jul 2019 10:50:24 -0500 Subject: drm/amdgpu: add vcn nbio doorbell range setting for 2nd vcn instance add vcn nbio doorbell range setting for 2nd vcn instance Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c | 2 +- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 20 +++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 +- 5 files changed, 21 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b601da7eb97c..29123aa16113 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -674,7 +674,7 @@ struct amdgpu_nbio_funcs { void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, bool use_doorbell, int doorbell_index, int doorbell_size); void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, - int doorbell_index); + int doorbell_index, int instance); void (*enable_doorbell_aperture)(struct amdgpu_device *adev, bool enable); void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index 835d7b1a841f..c05d78d4efc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -92,7 +92,7 @@ static void nbio_v2_3_sdma_doorbell_range(struct amdgpu_device *adev, int instan } static void nbio_v2_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, - int doorbell_index) + int doorbell_index, int instance) { u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index d8c9972a315b..910fffced43b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -42,6 +42,14 @@ #define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L #define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L +#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc +#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 +//BIF_MMSCH1_DOORBELL_RANGE +#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2 +#define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10 +#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL +#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L + static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, @@ -115,11 +123,17 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan } static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, - int doorbell_index) + int doorbell_index, int instance) { - u32 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); + u32 reg; + u32 doorbell_range; + + if (instance) + reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE); + else + reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); - u32 doorbell_range = RREG32(reg); + doorbell_range = RREG32(reg); if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index ebef2f663654..b6b77a063c34 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -245,7 +245,7 @@ static int vcn_v2_0_hw_init(void *handle) int i, r; adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ring->doorbell_index); + ring->doorbell_index, 0); ring->sched.ready = true; r = amdgpu_ring_test_ring(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 840737df19c0..75fdb6881ac0 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -212,7 +212,7 @@ static int vcn_v2_5_hw_init(void *handle) int i, r; adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ring->doorbell_index); + ring->doorbell_index, 0); r = amdgpu_ring_test_ring(ring); if (r) { -- cgit v1.2.3 From c01b6a1d38675652199d12b898c1c23b96b5055f Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 10 Jul 2019 10:53:34 -0500 Subject: drm/amdgpu: modify amdgpu_vcn to support multiple instances Arcturus has dual-VCN. Need Restruct amdgpu_device::vcn to support multiple vcns. There are no any logical changes here Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 68 ++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 24 +++++-- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 110 ++++++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 106 +++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 87 ++++++++++++------------- 7 files changed, 210 insertions(+), 197 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f539a2a92774..82b871fdfb45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -154,15 +154,15 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, num_rings = 1; break; case AMDGPU_HW_IP_VCN_DEC: - rings[0] = &adev->vcn.ring_dec; + rings[0] = &adev->vcn.inst[0].ring_dec; num_rings = 1; break; case AMDGPU_HW_IP_VCN_ENC: - rings[0] = &adev->vcn.ring_enc[0]; + rings[0] = &adev->vcn.inst[0].ring_enc[0]; num_rings = 1; break; case AMDGPU_HW_IP_VCN_JPEG: - rings[0] = &adev->vcn.ring_jpeg; + rings[0] = &adev->vcn.inst[0].ring_jpeg; num_rings = 1; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0cf7e8606fd3..4824a2b5f29b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -408,7 +408,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - if (adev->vcn.ring_dec.sched.ready) + if (adev->vcn.inst[0].ring_dec.sched.ready) ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; @@ -416,14 +416,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_enc_rings; i++) - if (adev->vcn.ring_enc[i].sched.ready) + if (adev->vcn.inst[0].ring_enc[i].sched.ready) ++num_rings; ib_start_alignment = 64; ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - if (adev->vcn.ring_jpeg.sched.ready) + if (adev->vcn.inst[0].ring_jpeg.sched.ready) ++num_rings; ib_start_alignment = 16; ib_size_alignment = 16; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index a119a7df0305..c102267da85d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -147,8 +147,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo, - &adev->vcn.gpu_addr, &adev->vcn.cpu_addr); + AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[0].vcpu_bo, + &adev->vcn.inst[0].gpu_addr, &adev->vcn.inst[0].cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); return r; @@ -171,7 +171,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) { int i; - kvfree(adev->vcn.saved_bo); + kvfree(adev->vcn.inst[0].saved_bo); if (adev->vcn.indirect_sram) { amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, @@ -179,16 +179,16 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) (void **)&adev->vcn.dpg_sram_cpu_addr); } - amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, - &adev->vcn.gpu_addr, - (void **)&adev->vcn.cpu_addr); + amdgpu_bo_free_kernel(&adev->vcn.inst[0].vcpu_bo, + &adev->vcn.inst[0].gpu_addr, + (void **)&adev->vcn.inst[0].cpu_addr); - amdgpu_ring_fini(&adev->vcn.ring_dec); + amdgpu_ring_fini(&adev->vcn.inst[0].ring_dec); for (i = 0; i < adev->vcn.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->vcn.ring_enc[i]); + amdgpu_ring_fini(&adev->vcn.inst[0].ring_enc[i]); - amdgpu_ring_fini(&adev->vcn.ring_jpeg); + amdgpu_ring_fini(&adev->vcn.inst[0].ring_jpeg); release_firmware(adev->vcn.fw); @@ -202,17 +202,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) cancel_delayed_work_sync(&adev->vcn.idle_work); - if (adev->vcn.vcpu_bo == NULL) + if (adev->vcn.inst[0].vcpu_bo == NULL) return 0; - size = amdgpu_bo_size(adev->vcn.vcpu_bo); - ptr = adev->vcn.cpu_addr; + size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo); + ptr = adev->vcn.inst[0].cpu_addr; - adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL); - if (!adev->vcn.saved_bo) + adev->vcn.inst[0].saved_bo = kvmalloc(size, GFP_KERNEL); + if (!adev->vcn.inst[0].saved_bo) return -ENOMEM; - memcpy_fromio(adev->vcn.saved_bo, ptr, size); + memcpy_fromio(adev->vcn.inst[0].saved_bo, ptr, size); return 0; } @@ -222,16 +222,16 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) unsigned size; void *ptr; - if (adev->vcn.vcpu_bo == NULL) + if (adev->vcn.inst[0].vcpu_bo == NULL) return -EINVAL; - size = amdgpu_bo_size(adev->vcn.vcpu_bo); - ptr = adev->vcn.cpu_addr; + size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo); + ptr = adev->vcn.inst[0].cpu_addr; - if (adev->vcn.saved_bo != NULL) { - memcpy_toio(ptr, adev->vcn.saved_bo, size); - kvfree(adev->vcn.saved_bo); - adev->vcn.saved_bo = NULL; + if (adev->vcn.inst[0].saved_bo != NULL) { + memcpy_toio(ptr, adev->vcn.inst[0].saved_bo, size); + kvfree(adev->vcn.inst[0].saved_bo); + adev->vcn.inst[0].saved_bo = NULL; } else { const struct common_firmware_header *hdr; unsigned offset; @@ -239,7 +239,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vcn.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset, + memcpy_toio(adev->vcn.inst[0].cpu_addr, adev->vcn.fw->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); @@ -258,7 +258,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) unsigned int i; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]); } if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { @@ -269,7 +269,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) + if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; @@ -277,8 +277,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) adev->vcn.pause_dpg_mode(adev, &new_state); } - fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); - fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_dec); if (fences == 0) { amdgpu_gfx_off_ctrl(adev, true); @@ -312,14 +312,14 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) unsigned int i; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]); } if (fences) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) + if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; @@ -345,7 +345,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD); + WREG32(adev->vcn.inst[0].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; @@ -353,7 +353,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.external.scratch9); + tmp = RREG32(adev->vcn.inst[0].external.scratch9); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -664,7 +664,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD); + WREG32(adev->vcn.inst[0].external.jpeg_pitch, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; @@ -674,7 +674,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.external.jpeg_pitch); + tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -748,7 +748,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) } for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.external.jpeg_pitch); + tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch); if (tmp == 0xDEADBEEF) break; udelay(1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index bfd8c3cea13a..d2fc47a954ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -30,6 +30,8 @@ #define AMDGPU_VCN_FIRMWARE_OFFSET 256 #define AMDGPU_VCN_MAX_ENC_RINGS 3 +#define AMDGPU_MAX_VCN_INSTANCES 2 + #define VCN_DEC_CMD_FENCE 0x00000000 #define VCN_DEC_CMD_TRAP 0x00000001 #define VCN_DEC_CMD_WRITE_REG 0x00000004 @@ -155,30 +157,38 @@ struct amdgpu_vcn_reg{ unsigned jpeg_pitch; }; -struct amdgpu_vcn { +struct amdgpu_vcn_inst { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - unsigned fw_version; void *saved_bo; - struct delayed_work idle_work; - const struct firmware *fw; /* VCN firmware */ struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_ring ring_jpeg; struct amdgpu_irq_src irq; + struct amdgpu_vcn_reg external; +}; + +struct amdgpu_vcn { + unsigned fw_version; + struct delayed_work idle_work; + const struct firmware *fw; /* VCN firmware */ unsigned num_enc_rings; enum amd_powergating_state cur_state; struct dpg_pause_state pause_state; - struct amdgpu_vcn_reg internal, external; - int (*pause_dpg_mode)(struct amdgpu_device *adev, - struct dpg_pause_state *new_state); bool indirect_sram; struct amdgpu_bo *dpg_sram_bo; void *dpg_sram_cpu_addr; uint64_t dpg_sram_gpu_addr; uint32_t *dpg_sram_curr_addr; + + uint8_t num_vcn_inst; + struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; + struct amdgpu_vcn_reg internal; + + int (*pause_dpg_mode)(struct amdgpu_device *adev, + struct dpg_pause_state *new_state); }; int amdgpu_vcn_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 916e32533c1b..93b3500e522b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -63,6 +63,7 @@ static int vcn_v1_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->vcn.num_vcn_inst = 1; adev->vcn.num_enc_rings = 2; vcn_v1_0_set_dec_ring_funcs(adev); @@ -87,20 +88,21 @@ static int vcn_v1_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* VCN DEC TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq); + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_1_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst->irq); if (r) return r; /* VCN ENC TRAP */ for (i = 0; i < adev->vcn.num_enc_rings; ++i) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_1_0__SRCID__UVD_ENC_GENERAL_PURPOSE, - &adev->vcn.irq); + &adev->vcn.inst->irq); if (r) return r; } /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.irq); + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, 126, &adev->vcn.inst->irq); if (r) return r; @@ -122,39 +124,39 @@ static int vcn_v1_0_sw_init(void *handle) if (r) return r; - ring = &adev->vcn.ring_dec; + ring = &adev->vcn.inst->ring_dec; sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) return r; - adev->vcn.internal.scratch9 = adev->vcn.external.scratch9 = + adev->vcn.internal.scratch9 = adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); - adev->vcn.internal.data0 = adev->vcn.external.data0 = + adev->vcn.internal.data0 = adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); - adev->vcn.internal.data1 = adev->vcn.external.data1 = + adev->vcn.internal.data1 = adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); - adev->vcn.internal.cmd = adev->vcn.external.cmd = + adev->vcn.internal.cmd = adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); - adev->vcn.internal.nop = adev->vcn.external.nop = + adev->vcn.internal.nop = adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst->ring_enc[i]; sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) return r; } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) return r; adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode; - adev->vcn.internal.jpeg_pitch = adev->vcn.external.jpeg_pitch = + adev->vcn.internal.jpeg_pitch = adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); return 0; @@ -191,7 +193,7 @@ static int vcn_v1_0_sw_fini(void *handle) static int vcn_v1_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; r = amdgpu_ring_test_helper(ring); @@ -199,14 +201,14 @@ static int vcn_v1_0_hw_init(void *handle) goto done; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst->ring_enc[i]; ring->sched.ready = true; r = amdgpu_ring_test_helper(ring); if (r) goto done; } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; r = amdgpu_ring_test_helper(ring); if (r) goto done; @@ -229,7 +231,7 @@ done: static int vcn_v1_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || RREG32_SOC15(VCN, 0, mmUVD_STATUS)) @@ -304,9 +306,9 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) offset = 0; } else { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr)); + lower_32_bits(adev->vcn.inst->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr)); + upper_32_bits(adev->vcn.inst->gpu_addr)); offset = size; WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); @@ -316,17 +318,17 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) /* cache window 1: stack */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset)); + lower_32_bits(adev->vcn.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset)); + upper_32_bits(adev->vcn.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); @@ -374,9 +376,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) offset = 0; } else { WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); + lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); + upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); offset = size; WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); @@ -386,9 +388,9 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) /* cache window 1: stack */ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); + lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); + upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, 0xFFFFFFFF, 0); WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, @@ -396,10 +398,10 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) /* cache window 2: context */ WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), + lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0xFFFFFFFF, 0); WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), + upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0xFFFFFFFF, 0); WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, @@ -779,7 +781,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) */ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; int i, j, r; @@ -932,21 +934,21 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - ring = &adev->vcn.ring_enc[0]; + ring = &adev->vcn.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->vcn.ring_enc[1]; + ring = &adev->vcn.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); @@ -968,7 +970,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; @@ -1106,7 +1108,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); /* initialize JPEG wptr */ - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); /* copy patch commands to the jpeg ring */ @@ -1255,21 +1257,21 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); /* Restore */ - ring = &adev->vcn.ring_enc[0]; + ring = &adev->vcn.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - ring = &adev->vcn.ring_enc[1]; + ring = &adev->vcn.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - ring = &adev->vcn.ring_dec; + ring = &adev->vcn.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, @@ -1315,7 +1317,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); /* Restore */ - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | @@ -1329,7 +1331,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); - ring = &adev->vcn.ring_dec; + ring = &adev->vcn.inst->ring_dec; WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, @@ -1596,7 +1598,7 @@ static uint64_t vcn_v1_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) + if (ring == &adev->vcn.inst->ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); @@ -1613,7 +1615,7 @@ static uint64_t vcn_v1_0_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) + if (ring == &adev->vcn.inst->ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); @@ -1630,7 +1632,7 @@ static void vcn_v1_0_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) + if (ring == &adev->vcn.inst->ring_enc[0]) WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else @@ -2114,16 +2116,16 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->vcn.ring_dec); + amdgpu_fence_process(&adev->vcn.inst->ring_dec); break; case 119: - amdgpu_fence_process(&adev->vcn.ring_enc[0]); + amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]); break; case 120: - amdgpu_fence_process(&adev->vcn.ring_enc[1]); + amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; case 126: - amdgpu_fence_process(&adev->vcn.ring_jpeg); + amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -2295,7 +2297,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_jpeg_ring_vm_funcs = { static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; + adev->vcn.inst->ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs; DRM_INFO("VCN decode is enabled in VM mode\n"); } @@ -2304,14 +2306,14 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_enc_rings; ++i) - adev->vcn.ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs; + adev->vcn.inst->ring_enc[i].funcs = &vcn_v1_0_enc_ring_vm_funcs; DRM_INFO("VCN encode is enabled in VM mode\n"); } static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs; + adev->vcn.inst->ring_jpeg.funcs = &vcn_v1_0_jpeg_ring_vm_funcs; DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); } @@ -2322,8 +2324,8 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = { static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; - adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; + adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst->irq.funcs = &vcn_v1_0_irq_funcs; } const struct amdgpu_ip_block_version vcn_v1_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index b6b77a063c34..31539e6a16b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -92,6 +92,7 @@ static int vcn_v2_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->vcn.num_vcn_inst = 1; adev->vcn.num_enc_rings = 2; vcn_v2_0_set_dec_ring_funcs(adev); @@ -118,7 +119,7 @@ static int vcn_v2_0_sw_init(void *handle) /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, - &adev->vcn.irq); + &adev->vcn.inst->irq); if (r) return r; @@ -126,15 +127,14 @@ static int vcn_v2_0_sw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, - &adev->vcn.irq); + &adev->vcn.inst->irq); if (r) return r; } /* VCN JPEG TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__JPEG_DECODE, - &adev->vcn.irq); + VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst->irq); if (r) return r; @@ -156,13 +156,13 @@ static int vcn_v2_0_sw_init(void *handle) if (r) return r; - ring = &adev->vcn.ring_dec; + ring = &adev->vcn.inst->ring_dec; ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) return r; @@ -174,38 +174,38 @@ static int vcn_v2_0_sw_init(void *handle) adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; - adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); + adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; - adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); + adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; - adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); + adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; - adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); + adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; - adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); + adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst->ring_enc[i]; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) return r; } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0); if (r) return r; adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode; adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); + adev->vcn.inst->external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); return 0; } @@ -241,7 +241,7 @@ static int vcn_v2_0_sw_fini(void *handle) static int vcn_v2_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -255,7 +255,7 @@ static int vcn_v2_0_hw_init(void *handle) } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst->ring_enc[i]; ring->sched.ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -264,7 +264,7 @@ static int vcn_v2_0_hw_init(void *handle) } } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; ring->sched.ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -290,7 +290,7 @@ done: static int vcn_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i; if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || @@ -301,11 +301,11 @@ static int vcn_v2_0_hw_fini(void *handle) ring->sched.ready = false; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst->ring_enc[i]; ring->sched.ready = false; } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst->ring_jpeg; ring->sched.ready = false; return 0; @@ -375,9 +375,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) offset = 0; } else { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr)); + lower_32_bits(adev->vcn.inst->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr)); + upper_32_bits(adev->vcn.inst->gpu_addr)); offset = size; /* No signed header for now from firmware WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, @@ -390,17 +390,17 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) /* cache window 1: stack */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset)); + lower_32_bits(adev->vcn.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset)); + upper_32_bits(adev->vcn.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); @@ -436,10 +436,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec } else { WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.gpu_addr), 0, indirect); + lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.gpu_addr), 0, indirect); + upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect); offset = size; WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), @@ -457,10 +457,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec if (!indirect) { WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.gpu_addr + offset), 0, indirect); + lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.gpu_addr + offset), 0, indirect); + upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); } else { @@ -477,10 +477,10 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec /* cache window 2: context */ WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); + upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); WREG32_SOC15_DPG_MODE_2_0(SOC15_DPG_MODE_OFFSET_2_0( @@ -668,7 +668,7 @@ static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_device *adev, */ static int jpeg_v2_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.ring_jpeg; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_jpeg; uint32_t tmp; int r = 0; @@ -930,7 +930,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev) static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) { - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; vcn_v2_0_enable_static_power_gating(adev); @@ -1056,7 +1056,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect) static int vcn_v2_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; int i, j, r; @@ -1207,14 +1207,14 @@ static int vcn_v2_0_start(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); - ring = &adev->vcn.ring_enc[0]; + ring = &adev->vcn.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->vcn.ring_enc[1]; + ring = &adev->vcn.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); @@ -1361,14 +1361,14 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); /* Restore */ - ring = &adev->vcn.ring_enc[0]; + ring = &adev->vcn.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - ring = &adev->vcn.ring_enc[1]; + ring = &adev->vcn.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); @@ -1660,7 +1660,7 @@ static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) + if (ring == &adev->vcn.inst->ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); @@ -1677,7 +1677,7 @@ static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) { + if (ring == &adev->vcn.inst->ring_enc[0]) { if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else @@ -1701,7 +1701,7 @@ static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) { + if (ring == &adev->vcn.inst->ring_enc[0]) { if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); @@ -2075,16 +2075,16 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: - amdgpu_fence_process(&adev->vcn.ring_dec); + amdgpu_fence_process(&adev->vcn.inst->ring_dec); break; case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: - amdgpu_fence_process(&adev->vcn.ring_enc[0]); + amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]); break; case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: - amdgpu_fence_process(&adev->vcn.ring_enc[1]); + amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]); break; case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.ring_jpeg); + amdgpu_fence_process(&adev->vcn.inst->ring_jpeg); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -2233,7 +2233,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_jpeg_ring_vm_funcs = { static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs; + adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs; DRM_INFO("VCN decode is enabled in VM mode\n"); } @@ -2242,14 +2242,14 @@ static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_enc_rings; ++i) - adev->vcn.ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs; + adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs; DRM_INFO("VCN encode is enabled in VM mode\n"); } static void vcn_v2_0_set_jpeg_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs; + adev->vcn.inst->ring_jpeg.funcs = &vcn_v2_0_jpeg_ring_vm_funcs; DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); } @@ -2260,8 +2260,8 @@ static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = { static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; - adev->vcn.irq.funcs = &vcn_v2_0_irq_funcs; + adev->vcn.inst->irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs; } const struct amdgpu_ip_block_version vcn_v2_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 75fdb6881ac0..e27351267c9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -66,6 +66,7 @@ static int vcn_v2_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->vcn.num_vcn_inst = 1; adev->vcn.num_enc_rings = 2; vcn_v2_5_set_dec_ring_funcs(adev); @@ -91,21 +92,21 @@ static int vcn_v2_5_sw_init(void *handle) /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.irq); + VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[0].irq); if (r) return r; /* VCN ENC TRAP */ for (i = 0; i < adev->vcn.num_enc_rings; ++i) { r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.irq); + i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[0].irq); if (r) return r; } /* VCN JPEG TRAP */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.irq); + VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[0].irq); if (r) return r; @@ -127,11 +128,11 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; - ring = &adev->vcn.ring_dec; + ring = &adev->vcn.inst[0].ring_dec; ring->use_doorbell = true; ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0); if (r) return r; @@ -143,36 +144,36 @@ static int vcn_v2_5_sw_init(void *handle) adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; - adev->vcn.external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); + adev->vcn.inst[0].external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; - adev->vcn.external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); + adev->vcn.inst[0].external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; - adev->vcn.external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); + adev->vcn.inst[0].external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; - adev->vcn.external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); + adev->vcn.inst[0].external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; - adev->vcn.external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); + adev->vcn.inst[0].external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst[0].ring_enc[i]; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0); if (r) return r; } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst[0].ring_jpeg; ring->use_doorbell = true; ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0); if (r) return r; adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); + adev->vcn.inst[0].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); return 0; } @@ -208,7 +209,7 @@ static int vcn_v2_5_sw_fini(void *handle) static int vcn_v2_5_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec; int i, r; adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -221,7 +222,7 @@ static int vcn_v2_5_hw_init(void *handle) } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst[0].ring_enc[i]; ring->sched.ready = false; continue; r = amdgpu_ring_test_ring(ring); @@ -231,7 +232,7 @@ static int vcn_v2_5_hw_init(void *handle) } } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst[0].ring_jpeg; r = amdgpu_ring_test_ring(ring); if (r) { ring->sched.ready = false; @@ -255,7 +256,7 @@ done: static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec; int i; if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) @@ -264,11 +265,11 @@ static int vcn_v2_5_hw_fini(void *handle) ring->sched.ready = false; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.ring_enc[i]; + ring = &adev->vcn.inst[0].ring_enc[i]; ring->sched.ready = false; } - ring = &adev->vcn.ring_jpeg; + ring = &adev->vcn.inst[0].ring_jpeg; ring->sched.ready = false; return 0; @@ -338,9 +339,9 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) offset = 0; } else { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr)); + lower_32_bits(adev->vcn.inst[0].gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr)); + upper_32_bits(adev->vcn.inst[0].gpu_addr)); offset = size; /* No signed header for now from firmware WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, @@ -352,17 +353,17 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) /* cache window 1: stack */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset)); + lower_32_bits(adev->vcn.inst[0].gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset)); + upper_32_bits(adev->vcn.inst[0].gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); /* cache window 2: context */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + lower_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + upper_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); } @@ -548,7 +549,7 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) */ static int jpeg_v2_5_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.ring_jpeg; + struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_jpeg; uint32_t tmp; /* disable anti hang mechanism */ @@ -639,7 +640,7 @@ static int jpeg_v2_5_stop(struct amdgpu_device *adev) static int vcn_v2_5_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.ring_dec; + struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec; uint32_t rb_bufsz, tmp; int i, j, r; @@ -781,14 +782,14 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); - ring = &adev->vcn.ring_enc[0]; + ring = &adev->vcn.inst[0].ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->vcn.ring_enc[1]; + ring = &adev->vcn.inst[0].ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); @@ -951,7 +952,7 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) + if (ring == &adev->vcn.inst[0].ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); @@ -968,7 +969,7 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) { + if (ring == &adev->vcn.inst[0].ring_enc[0]) { if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else @@ -992,7 +993,7 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.ring_enc[0]) { + if (ring == &adev->vcn.inst[0].ring_enc[0]) { if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); @@ -1121,7 +1122,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; + adev->vcn.inst[0].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; DRM_INFO("VCN decode is enabled in VM mode\n"); } @@ -1130,14 +1131,14 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_enc_rings; ++i) - adev->vcn.ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; + adev->vcn.inst[0].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; DRM_INFO("VCN encode is enabled in VM mode\n"); } static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; + adev->vcn.inst[0].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); } @@ -1212,16 +1213,16 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: - amdgpu_fence_process(&adev->vcn.ring_dec); + amdgpu_fence_process(&adev->vcn.inst[0].ring_dec); break; case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: - amdgpu_fence_process(&adev->vcn.ring_enc[0]); + amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[0]); break; case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: - amdgpu_fence_process(&adev->vcn.ring_enc[1]); + amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[1]); break; case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.ring_jpeg); + amdgpu_fence_process(&adev->vcn.inst[0].ring_jpeg); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -1239,8 +1240,8 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 2; - adev->vcn.irq.funcs = &vcn_v2_5_irq_funcs; + adev->vcn.inst[0].irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst[0].irq.funcs = &vcn_v2_5_irq_funcs; } static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { -- cgit v1.2.3 From fa739f4b06864d3530e8e461eed223d3566c3633 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 10 Jul 2019 11:06:37 -0500 Subject: drm/amdgpu: add multiple instances support for Arcturus Arcturus has dual-VCN. Need add multiple instances support for Arcturus. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 20 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 166 +++-- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 1178 +++++++++++++++++-------------- 3 files changed, 737 insertions(+), 627 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 4824a2b5f29b..e3776c77784b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -408,23 +408,29 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - if (adev->vcn.inst[0].ring_dec.sched.ready) - ++num_rings; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.inst[i].ring_dec.sched.ready) + ++num_rings; + } ib_start_alignment = 16; ib_size_alignment = 16; break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; - for (i = 0; i < adev->vcn.num_enc_rings; i++) - if (adev->vcn.inst[0].ring_enc[i].sched.ready) - ++num_rings; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (j = 0; j < adev->vcn.num_enc_rings; j++) + if (adev->vcn.inst[i].ring_enc[j].sched.ready) + ++num_rings; + } ib_start_alignment = 64; ib_size_alignment = 1; break; case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; - if (adev->vcn.inst[0].ring_jpeg.sched.ready) - ++num_rings; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.inst[i].ring_jpeg.sched.ready) + ++num_rings; + } ib_start_alignment = 16; ib_size_alignment = 16; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index c102267da85d..e116342511b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -65,7 +65,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) const char *fw_name; const struct common_firmware_header *hdr; unsigned char fw_check; - int r; + int i, r; INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler); @@ -146,12 +146,15 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[0].vcpu_bo, - &adev->vcn.inst[0].gpu_addr, &adev->vcn.inst[0].cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); - return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, + &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r); + return r; + } } if (adev->vcn.indirect_sram) { @@ -169,26 +172,28 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) { - int i; - - kvfree(adev->vcn.inst[0].saved_bo); + int i, j; if (adev->vcn.indirect_sram) { amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, - &adev->vcn.dpg_sram_gpu_addr, - (void **)&adev->vcn.dpg_sram_cpu_addr); + &adev->vcn.dpg_sram_gpu_addr, + (void **)&adev->vcn.dpg_sram_cpu_addr); } - amdgpu_bo_free_kernel(&adev->vcn.inst[0].vcpu_bo, - &adev->vcn.inst[0].gpu_addr, - (void **)&adev->vcn.inst[0].cpu_addr); + for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + kvfree(adev->vcn.inst[j].saved_bo); - amdgpu_ring_fini(&adev->vcn.inst[0].ring_dec); + amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, + &adev->vcn.inst[j].gpu_addr, + (void **)&adev->vcn.inst[j].cpu_addr); - for (i = 0; i < adev->vcn.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->vcn.inst[0].ring_enc[i]); + amdgpu_ring_fini(&adev->vcn.inst[j].ring_dec); - amdgpu_ring_fini(&adev->vcn.inst[0].ring_jpeg); + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]); + + amdgpu_ring_fini(&adev->vcn.inst[j].ring_jpeg); + } release_firmware(adev->vcn.fw); @@ -199,21 +204,23 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; + int i; cancel_delayed_work_sync(&adev->vcn.idle_work); - if (adev->vcn.inst[0].vcpu_bo == NULL) - return 0; + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.inst[i].vcpu_bo == NULL) + return 0; - size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo); - ptr = adev->vcn.inst[0].cpu_addr; + size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); + ptr = adev->vcn.inst[i].cpu_addr; - adev->vcn.inst[0].saved_bo = kvmalloc(size, GFP_KERNEL); - if (!adev->vcn.inst[0].saved_bo) - return -ENOMEM; - - memcpy_fromio(adev->vcn.inst[0].saved_bo, ptr, size); + adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL); + if (!adev->vcn.inst[i].saved_bo) + return -ENOMEM; + memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + } return 0; } @@ -221,32 +228,34 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; + int i; - if (adev->vcn.inst[0].vcpu_bo == NULL) - return -EINVAL; - - size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo); - ptr = adev->vcn.inst[0].cpu_addr; - - if (adev->vcn.inst[0].saved_bo != NULL) { - memcpy_toio(ptr, adev->vcn.inst[0].saved_bo, size); - kvfree(adev->vcn.inst[0].saved_bo); - adev->vcn.inst[0].saved_bo = NULL; - } else { - const struct common_firmware_header *hdr; - unsigned offset; - - hdr = (const struct common_firmware_header *)adev->vcn.fw->data; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->vcn.inst[0].cpu_addr, adev->vcn.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); - size -= le32_to_cpu(hdr->ucode_size_bytes); - ptr += le32_to_cpu(hdr->ucode_size_bytes); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.inst[i].vcpu_bo == NULL) + return -EINVAL; + + size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo); + ptr = adev->vcn.inst[i].cpu_addr; + + if (adev->vcn.inst[i].saved_bo != NULL) { + memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); + kvfree(adev->vcn.inst[i].saved_bo); + adev->vcn.inst[i].saved_bo = NULL; + } else { + const struct common_firmware_header *hdr; + unsigned offset; + + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + size -= le32_to_cpu(hdr->ucode_size_bytes); + ptr += le32_to_cpu(hdr->ucode_size_bytes); + } + memset_io(ptr, 0, size); } - memset_io(ptr, 0, size); } - return 0; } @@ -254,31 +263,34 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, vcn.idle_work.work); - unsigned int fences = 0; - unsigned int i; + unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; + unsigned int i, j; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]); - } + for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); + } - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { - struct dpg_pause_state new_state; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; - if (fences) - new_state.fw_based = VCN_DPG_STATE__PAUSE; - else - new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + if (fence[j]) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg)) - new_state.jpeg = VCN_DPG_STATE__PAUSE; - else - new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + if (amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; - adev->vcn.pause_dpg_mode(adev, &new_state); - } + adev->vcn.pause_dpg_mode(adev, &new_state); + } - fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg); - fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_dec); + fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_jpeg); + fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); + fences += fence[j]; + } if (fences == 0) { amdgpu_gfx_off_ctrl(adev, true); @@ -312,14 +324,14 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) unsigned int i; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); } if (fences) new_state.fw_based = VCN_DPG_STATE__PAUSE; else new_state.fw_based = VCN_DPG_STATE__UNPAUSE; - if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg)) + if (amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_jpeg)) new_state.jpeg = VCN_DPG_STATE__PAUSE; else new_state.jpeg = VCN_DPG_STATE__UNPAUSE; @@ -345,7 +357,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32(adev->vcn.inst[0].external.scratch9, 0xCAFEDEAD); + WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; @@ -353,7 +365,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[0].external.scratch9); + tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -664,7 +676,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32(adev->vcn.inst[0].external.jpeg_pitch, 0xCAFEDEAD); + WREG32(adev->vcn.inst[ring->me].external.jpeg_pitch, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) return r; @@ -674,7 +686,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch); + tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); if (tmp == 0xDEADBEEF) break; udelay(1); @@ -748,7 +760,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) } for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch); + tmp = RREG32(adev->vcn.inst[ring->me].external.jpeg_pitch); if (tmp == 0xDEADBEEF) break; udelay(1); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index e27351267c9e..b7dc069b637c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -48,6 +48,8 @@ #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +#define VCN25_MAX_HW_INSTANCES_ARCTURUS 2 + static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev); @@ -55,6 +57,11 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); static int vcn_v2_5_set_powergating_state(void *handle, enum amd_powergating_state state); +static int amdgpu_ih_clientid_vcns[] = { + SOC15_IH_CLIENTID_VCN, + SOC15_IH_CLIENTID_VCN1 +}; + /** * vcn_v2_5_early_init - set function pointers * @@ -65,8 +72,11 @@ static int vcn_v2_5_set_powergating_state(void *handle, static int vcn_v2_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->asic_type == CHIP_ARCTURUS) - adev->vcn.num_vcn_inst = 1; + adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; + else + adev->vcn.num_vcn_inst = 1; adev->vcn.num_enc_rings = 2; vcn_v2_5_set_dec_ring_funcs(adev); @@ -87,29 +97,31 @@ static int vcn_v2_5_early_init(void *handle) static int vcn_v2_5_sw_init(void *handle) { struct amdgpu_ring *ring; - int i, r; + int i, j, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* VCN DEC TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[0].irq); - if (r) - return r; + for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + /* VCN DEC TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], + VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq); + if (r) + return r; + + /* VCN ENC TRAP */ + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], + i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq); + if (r) + return r; + } - /* VCN ENC TRAP */ - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[0].irq); + /* VCN JPEG TRAP */ + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], + VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[j].irq); if (r) return r; } - /* VCN JPEG TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, - VCN_2_0__SRCID__JPEG_DECODE, &adev->vcn.inst[0].irq); - if (r) - return r; - r = amdgpu_vcn_sw_init(adev); if (r) return r; @@ -121,6 +133,13 @@ static int vcn_v2_5_sw_init(void *handle) adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw; adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + + if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) { + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); + } DRM_INFO("PSP loading VCN firmware\n"); } @@ -128,52 +147,54 @@ static int vcn_v2_5_sw_init(void *handle) if (r) return r; - ring = &adev->vcn.inst[0].ring_dec; - ring->use_doorbell = true; - ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1; - sprintf(ring->name, "vcn_dec"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0); - if (r) - return r; - - adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; - adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; - adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; - adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; - adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; - adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; - - adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; - adev->vcn.inst[0].external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9); - adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; - adev->vcn.inst[0].external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0); - adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; - adev->vcn.inst[0].external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1); - adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; - adev->vcn.inst[0].external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD); - adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; - adev->vcn.inst[0].external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP); - - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[0].ring_enc[i]; + for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; + adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; + adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; + adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; + adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; + + adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; + adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9); + adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; + adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0); + adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; + adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1); + adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; + adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD); + adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; + adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP); + + adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; + adev->vcn.inst[j].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, j, mmUVD_JPEG_PITCH); + + ring = &adev->vcn.inst[j].ring_dec; ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i; - sprintf(ring->name, "vcn_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0); + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j; + sprintf(ring->name, "vcn_dec_%d", j); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); if (r) return r; - } - ring = &adev->vcn.inst[0].ring_jpeg; - ring->use_doorbell = true; - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1; - sprintf(ring->name, "vcn_jpeg"); - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0); - if (r) - return r; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.inst[j].ring_enc[i]; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j; + sprintf(ring->name, "vcn_enc_%d.%d", j, i); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + if (r) + return r; + } - adev->vcn.internal.jpeg_pitch = mmUVD_JPEG_PITCH_INTERNAL_OFFSET; - adev->vcn.inst[0].external.jpeg_pitch = SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH); + ring = &adev->vcn.inst[j].ring_jpeg; + ring->use_doorbell = true; + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8*j; + sprintf(ring->name, "vcn_jpeg_%d", j); + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); + if (r) + return r; + } return 0; } @@ -209,36 +230,39 @@ static int vcn_v2_5_sw_fini(void *handle) static int vcn_v2_5_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec; - int i, r; + struct amdgpu_ring *ring; + int i, j, r; - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, - ring->doorbell_index, 0); + for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + ring = &adev->vcn.inst[j].ring_dec; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; - goto done; - } + adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ring->doorbell_index, j); - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[0].ring_enc[i]; - ring->sched.ready = false; - continue; r = amdgpu_ring_test_ring(ring); if (r) { ring->sched.ready = false; goto done; } - } - ring = &adev->vcn.inst[0].ring_jpeg; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; - goto done; - } + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.inst[j].ring_enc[i]; + ring->sched.ready = false; + continue; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->sched.ready = false; + goto done; + } + } + ring = &adev->vcn.inst[j].ring_jpeg; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->sched.ready = false; + goto done; + } + } done: if (!r) DRM_INFO("VCN decode and encode initialized successfully.\n"); @@ -256,21 +280,25 @@ done: static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec; + struct amdgpu_ring *ring; int i; - if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) - vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ring = &adev->vcn.inst[i].ring_dec; - ring->sched.ready = false; + if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) + vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[0].ring_enc[i]; ring->sched.ready = false; - } - ring = &adev->vcn.inst[0].ring_jpeg; - ring->sched.ready = false; + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + ring = &adev->vcn.inst[i].ring_enc[i]; + ring->sched.ready = false; + } + + ring = &adev->vcn.inst[i].ring_jpeg; + ring->sched.ready = false; + } return 0; } @@ -328,44 +356,47 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) { uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t offset; + int i; - /* cache window 0: fw */ - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); - offset = 0; - } else { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[0].gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[0].gpu_addr)); - offset = size; - /* No signed header for now from firmware - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = size; + /* No signed header for now from firmware + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + */ + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); + } + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); } - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); - - /* cache window 1: stack */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[0].gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[0].gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); - - /* cache window 2: context */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[0].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); } /** @@ -380,106 +411,109 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; int ret = 0; + int i; - /* UVD disable CGC */ - data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) - data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - else - data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; - data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); - - data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE); - data &= ~(UVD_CGC_GATE__SYS_MASK - | UVD_CGC_GATE__UDEC_MASK - | UVD_CGC_GATE__MPEG2_MASK - | UVD_CGC_GATE__REGS_MASK - | UVD_CGC_GATE__RBC_MASK - | UVD_CGC_GATE__LMI_MC_MASK - | UVD_CGC_GATE__LMI_UMC_MASK - | UVD_CGC_GATE__IDCT_MASK - | UVD_CGC_GATE__MPRD_MASK - | UVD_CGC_GATE__MPC_MASK - | UVD_CGC_GATE__LBSI_MASK - | UVD_CGC_GATE__LRBBM_MASK - | UVD_CGC_GATE__UDEC_RE_MASK - | UVD_CGC_GATE__UDEC_CM_MASK - | UVD_CGC_GATE__UDEC_IT_MASK - | UVD_CGC_GATE__UDEC_DB_MASK - | UVD_CGC_GATE__UDEC_MP_MASK - | UVD_CGC_GATE__WCB_MASK - | UVD_CGC_GATE__VCPU_MASK - | UVD_CGC_GATE__MMSCH_MASK); - - WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data); - - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); - - data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK - | UVD_CGC_CTRL__UDEC_CM_MODE_MASK - | UVD_CGC_CTRL__UDEC_IT_MODE_MASK - | UVD_CGC_CTRL__UDEC_DB_MODE_MASK - | UVD_CGC_CTRL__UDEC_MP_MODE_MASK - | UVD_CGC_CTRL__SYS_MODE_MASK - | UVD_CGC_CTRL__UDEC_MODE_MASK - | UVD_CGC_CTRL__MPEG2_MODE_MASK - | UVD_CGC_CTRL__REGS_MODE_MASK - | UVD_CGC_CTRL__RBC_MODE_MASK - | UVD_CGC_CTRL__LMI_MC_MODE_MASK - | UVD_CGC_CTRL__LMI_UMC_MODE_MASK - | UVD_CGC_CTRL__IDCT_MODE_MASK - | UVD_CGC_CTRL__MPRD_MODE_MASK - | UVD_CGC_CTRL__MPC_MODE_MASK - | UVD_CGC_CTRL__LBSI_MODE_MASK - | UVD_CGC_CTRL__LRBBM_MODE_MASK - | UVD_CGC_CTRL__WCB_MODE_MASK - | UVD_CGC_CTRL__VCPU_MODE_MASK - | UVD_CGC_CTRL__MMSCH_MODE_MASK); - WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); - - /* turn on */ - data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE); - data |= (UVD_SUVD_CGC_GATE__SRE_MASK - | UVD_SUVD_CGC_GATE__SIT_MASK - | UVD_SUVD_CGC_GATE__SMP_MASK - | UVD_SUVD_CGC_GATE__SCM_MASK - | UVD_SUVD_CGC_GATE__SDB_MASK - | UVD_SUVD_CGC_GATE__SRE_H264_MASK - | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK - | UVD_SUVD_CGC_GATE__SIT_H264_MASK - | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK - | UVD_SUVD_CGC_GATE__SCM_H264_MASK - | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK - | UVD_SUVD_CGC_GATE__SDB_H264_MASK - | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK - | UVD_SUVD_CGC_GATE__SCLR_MASK - | UVD_SUVD_CGC_GATE__UVD_SC_MASK - | UVD_SUVD_CGC_GATE__ENT_MASK - | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK - | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK - | UVD_SUVD_CGC_GATE__SITE_MASK - | UVD_SUVD_CGC_GATE__SRE_VP9_MASK - | UVD_SUVD_CGC_GATE__SCM_VP9_MASK - | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK - | UVD_SUVD_CGC_GATE__SDB_VP9_MASK - | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); - WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data); - - data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); - data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK - | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK - | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK - | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK - | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK - | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK - | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK - | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK - | UVD_SUVD_CGC_CTRL__IME_MODE_MASK - | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* UVD disable CGC */ + data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE); + data &= ~(UVD_CGC_GATE__SYS_MASK + | UVD_CGC_GATE__UDEC_MASK + | UVD_CGC_GATE__MPEG2_MASK + | UVD_CGC_GATE__REGS_MASK + | UVD_CGC_GATE__RBC_MASK + | UVD_CGC_GATE__LMI_MC_MASK + | UVD_CGC_GATE__LMI_UMC_MASK + | UVD_CGC_GATE__IDCT_MASK + | UVD_CGC_GATE__MPRD_MASK + | UVD_CGC_GATE__MPC_MASK + | UVD_CGC_GATE__LBSI_MASK + | UVD_CGC_GATE__LRBBM_MASK + | UVD_CGC_GATE__UDEC_RE_MASK + | UVD_CGC_GATE__UDEC_CM_MASK + | UVD_CGC_GATE__UDEC_IT_MASK + | UVD_CGC_GATE__UDEC_DB_MASK + | UVD_CGC_GATE__UDEC_MP_MASK + | UVD_CGC_GATE__WCB_MASK + | UVD_CGC_GATE__VCPU_MASK + | UVD_CGC_GATE__MMSCH_MASK); + + WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data); + + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, ret); + + data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); + data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK + | UVD_CGC_CTRL__MMSCH_MODE_MASK); + WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); + + /* turn on */ + data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE); + data |= (UVD_SUVD_CGC_GATE__SRE_MASK + | UVD_SUVD_CGC_GATE__SIT_MASK + | UVD_SUVD_CGC_GATE__SMP_MASK + | UVD_SUVD_CGC_GATE__SCM_MASK + | UVD_SUVD_CGC_GATE__SDB_MASK + | UVD_SUVD_CGC_GATE__SRE_H264_MASK + | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK + | UVD_SUVD_CGC_GATE__SIT_H264_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCM_H264_MASK + | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK + | UVD_SUVD_CGC_GATE__SDB_H264_MASK + | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK + | UVD_SUVD_CGC_GATE__SCLR_MASK + | UVD_SUVD_CGC_GATE__UVD_SC_MASK + | UVD_SUVD_CGC_GATE__ENT_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK + | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK + | UVD_SUVD_CGC_GATE__SITE_MASK + | UVD_SUVD_CGC_GATE__SRE_VP9_MASK + | UVD_SUVD_CGC_GATE__SCM_VP9_MASK + | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK + | UVD_SUVD_CGC_GATE__SDB_VP9_MASK + | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); + WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data); + + data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL); + data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); + } } /** @@ -493,51 +527,54 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data = 0; + int i; - /* enable UVD CGC */ - data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) - data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - else - data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); - - data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK - | UVD_CGC_CTRL__UDEC_CM_MODE_MASK - | UVD_CGC_CTRL__UDEC_IT_MODE_MASK - | UVD_CGC_CTRL__UDEC_DB_MODE_MASK - | UVD_CGC_CTRL__UDEC_MP_MODE_MASK - | UVD_CGC_CTRL__SYS_MODE_MASK - | UVD_CGC_CTRL__UDEC_MODE_MASK - | UVD_CGC_CTRL__MPEG2_MODE_MASK - | UVD_CGC_CTRL__REGS_MODE_MASK - | UVD_CGC_CTRL__RBC_MODE_MASK - | UVD_CGC_CTRL__LMI_MC_MODE_MASK - | UVD_CGC_CTRL__LMI_UMC_MODE_MASK - | UVD_CGC_CTRL__IDCT_MODE_MASK - | UVD_CGC_CTRL__MPRD_MODE_MASK - | UVD_CGC_CTRL__MPC_MODE_MASK - | UVD_CGC_CTRL__LBSI_MODE_MASK - | UVD_CGC_CTRL__LRBBM_MODE_MASK - | UVD_CGC_CTRL__WCB_MODE_MASK - | UVD_CGC_CTRL__VCPU_MODE_MASK); - WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data); - - data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL); - data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK - | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK - | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK - | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK - | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK - | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK - | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK - | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK - | UVD_SUVD_CGC_CTRL__IME_MODE_MASK - | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); - WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* enable UVD CGC */ + data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); + data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK + | UVD_CGC_CTRL__UDEC_CM_MODE_MASK + | UVD_CGC_CTRL__UDEC_IT_MODE_MASK + | UVD_CGC_CTRL__UDEC_DB_MODE_MASK + | UVD_CGC_CTRL__UDEC_MP_MODE_MASK + | UVD_CGC_CTRL__SYS_MODE_MASK + | UVD_CGC_CTRL__UDEC_MODE_MASK + | UVD_CGC_CTRL__MPEG2_MODE_MASK + | UVD_CGC_CTRL__REGS_MODE_MASK + | UVD_CGC_CTRL__RBC_MODE_MASK + | UVD_CGC_CTRL__LMI_MC_MODE_MASK + | UVD_CGC_CTRL__LMI_UMC_MODE_MASK + | UVD_CGC_CTRL__IDCT_MODE_MASK + | UVD_CGC_CTRL__MPRD_MODE_MASK + | UVD_CGC_CTRL__MPC_MODE_MASK + | UVD_CGC_CTRL__LBSI_MODE_MASK + | UVD_CGC_CTRL__LRBBM_MODE_MASK + | UVD_CGC_CTRL__WCB_MODE_MASK + | UVD_CGC_CTRL__VCPU_MODE_MASK); + WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data); + + data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL); + data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK + | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK + | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK + | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK + | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK + | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK + | UVD_SUVD_CGC_CTRL__IME_MODE_MASK + | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); + WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); + } } /** @@ -549,60 +586,64 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) */ static int jpeg_v2_5_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_jpeg; + struct amdgpu_ring *ring; uint32_t tmp; + int i; - /* disable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), 0, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); - - /* JPEG disable CGC */ - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; - tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; - tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK - | JPEG_CGC_GATE__JPEG2_DEC_MASK - | JPEG_CGC_GATE__JMCIF_MASK - | JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK - | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK - | JPEG_CGC_CTRL__JMCIF_MODE_MASK - | JPEG_CGC_CTRL__JRBBM_MODE_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL, tmp); - - /* MJPEG global tiling registers */ - WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - /* enable JMI channel */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), 0, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - /* enable System Interrupt for JRBC */ - WREG32_P(SOC15_REG_OFFSET(VCN, 0, mmJPEG_SYS_INT_EN), - JPEG_SYS_INT_EN__DJRBC_MASK, - ~JPEG_SYS_INT_EN__DJRBC_MASK); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ring = &adev->vcn.inst[i].ring_jpeg; + /* disable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + + /* JPEG disable CGC */ + tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); + tmp |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + tmp |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + tmp |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); + + tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); + tmp &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK + | JPEG_CGC_GATE__JPEG2_DEC_MASK + | JPEG_CGC_GATE__JMCIF_MASK + | JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); + + tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL); + tmp &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK + | JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK + | JPEG_CGC_CTRL__JMCIF_MODE_MASK + | JPEG_CGC_CTRL__JRBBM_MODE_MASK); + WREG32_SOC15(VCN, i, mmJPEG_CGC_CTRL, tmp); + + /* MJPEG global tiling registers */ + WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmJPEG_DEC_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* enable JMI channel */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), 0, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + /* enable System Interrupt for JRBC */ + WREG32_P(SOC15_REG_OFFSET(VCN, i, mmJPEG_SYS_INT_EN), + JPEG_SYS_INT_EN__DJRBC_MASK, + ~JPEG_SYS_INT_EN__DJRBC_MASK); + + WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_RPTR, 0); + WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR, 0); + WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(UVD, i, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4); + ring->wptr = RREG32_SOC15(UVD, i, mmUVD_JRBC_RB_WPTR); + } return 0; } @@ -617,185 +658,194 @@ static int jpeg_v2_5_start(struct amdgpu_device *adev) static int jpeg_v2_5_stop(struct amdgpu_device *adev) { uint32_t tmp; + int i; - /* reset JMI */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JMI_CNTL), - UVD_JMI_CNTL__SOFT_RESET_MASK, - ~UVD_JMI_CNTL__SOFT_RESET_MASK); - - tmp = RREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE); - tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK - |JPEG_CGC_GATE__JPEG2_DEC_MASK - |JPEG_CGC_GATE__JMCIF_MASK - |JPEG_CGC_GATE__JRBBM_MASK); - WREG32_SOC15(VCN, 0, mmJPEG_CGC_GATE, tmp); - - /* enable anti hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_POWER_STATUS), - UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, - ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* reset JMI */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), + UVD_JMI_CNTL__SOFT_RESET_MASK, + ~UVD_JMI_CNTL__SOFT_RESET_MASK); + + tmp = RREG32_SOC15(VCN, i, mmJPEG_CGC_GATE); + tmp |= (JPEG_CGC_GATE__JPEG_DEC_MASK + |JPEG_CGC_GATE__JPEG2_DEC_MASK + |JPEG_CGC_GATE__JMCIF_MASK + |JPEG_CGC_GATE__JRBBM_MASK); + WREG32_SOC15(VCN, i, mmJPEG_CGC_GATE, tmp); + + /* enable anti hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), + UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK, + ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK); + } return 0; } static int vcn_v2_5_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->vcn.inst[0].ring_dec; + struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; - int i, j, r; + int i, j, k, r; - /* disable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, - ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); - /* set uvd status busy */ - tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; - WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); + /* set uvd status busy */ + tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); + } /*SW clock gating */ vcn_v2_5_disable_clock_gating(adev); - /* enable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); - - /* disable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); - - /* setup mmUVD_LMI_CTRL */ - tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL); - tmp &= ~0xff; - WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp | 0x8| - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__MASK_MC_URGENT_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); - - /* setup mmUVD_MPC_CNTL */ - tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL); - tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; - tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; - WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp); - - /* setup UVD_MPC_SET_MUXA0 */ - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, - ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); - - /* setup UVD_MPC_SET_MUXB0 */ - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, - ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | - (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | - (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); - - /* setup mmUVD_MPC_SET_MUX */ - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, - ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | - (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | - (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* setup mmUVD_LMI_CTRL */ + tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL); + tmp &= ~0xff; + WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8| + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + /* setup mmUVD_MPC_CNTL */ + tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); + + /* setup UVD_MPC_SET_MUXA0 */ + WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + + /* setup UVD_MPC_SET_MUXB0 */ + WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); + + /* setup mmUVD_MPC_SET_MUX */ + WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + } vcn_v2_5_mc_resume(adev); - /* VCN global tiling registers */ - WREG32_SOC15(UVD, 0, mmUVD_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmUVD_GFX8_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* VCN global tiling registers */ + WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); - /* enable LMI MC and UMC channels */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - /* unblock VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_ARB_CTRL), 0, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); - - for (i = 0; i < 10; ++i) { - uint32_t status; + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); - for (j = 0; j < 100; ++j) { - status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); + for (k = 0; k < 10; ++k) { + uint32_t status; + + for (j = 0; j < 100; ++j) { + status = RREG32_SOC15(UVD, i, mmUVD_STATUS); + if (status & 2) + break; + if (amdgpu_emu_mode == 1) + msleep(500); + else + mdelay(10); + } + r = 0; if (status & 2) break; - if (amdgpu_emu_mode == 1) - msleep(500); - else - mdelay(10); - } - r = 0; - if (status & 2) - break; - - DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); - mdelay(10); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, - ~UVD_VCPU_CNTL__BLK_RST_MASK); - mdelay(10); - r = -1; - } + DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); - if (r) { - DRM_ERROR("VCN decode not responding, giving up!!!\n"); - return r; - } + mdelay(10); + r = -1; + } - /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - UVD_MASTINT_EN__VCPU_EN_MASK, - ~UVD_MASTINT_EN__VCPU_EN_MASK); - - /* clear the busy bit of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0); - - /* force RBC into idle state */ - rb_bufsz = order_base_2(ring->ring_size); - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); - - /* programm the RB_BASE for ring buffer */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - - /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); - - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - lower_32_bits(ring->wptr)); - ring = &adev->vcn.inst[0].ring_enc[0]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); - - ring = &adev->vcn.inst[0].ring_enc[1]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + if (r) { + DRM_ERROR("VCN decode not responding, giving up!!!\n"); + return r; + } + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0); + + ring = &adev->vcn.inst[i].ring_dec; + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0); + + ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + ring = &adev->vcn.inst[i].ring_enc[0]; + WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4); + + ring = &adev->vcn.inst[i].ring_enc[1]; + WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + } r = jpeg_v2_5_start(adev); return r; @@ -804,59 +854,61 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) static int vcn_v2_5_stop(struct amdgpu_device *adev) { uint32_t tmp; - int r; + int i, r; r = jpeg_v2_5_stop(adev); if (r) return r; - /* wait for vcn idle */ - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); - if (r) - return r; + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* wait for vcn idle */ + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); + if (r) + return r; - tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | - UVD_LMI_STATUS__READ_CLEAN_MASK | - UVD_LMI_STATUS__WRITE_CLEAN_MASK | - UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); - if (r) - return r; + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); + if (r) + return r; - /* block LMI UMC channel */ - tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2); - tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; - WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp); + /* block LMI UMC channel */ + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); - tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| - UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp, r); - if (r) - return r; + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp, r); + if (r) + return r; - /* block VCPU register access */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_ARB_CTRL), - UVD_RB_ARB_CTRL__VCPU_DIS_MASK, - ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + /* block VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); - /* reset VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + /* reset VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); - /* disable VCPU clock */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, - ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); - /* clear status */ - WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0); + /* clear status */ + WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); - vcn_v2_5_enable_clock_gating(adev); + vcn_v2_5_enable_clock_gating(adev); - /* enable register anti-hang mechanism */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, - ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + /* enable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + } return 0; } @@ -872,7 +924,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); } /** @@ -889,7 +941,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); } /** @@ -907,7 +959,7 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } } @@ -952,10 +1004,10 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.inst[0].ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); else - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); } /** @@ -969,16 +1021,16 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.inst[0].ring_enc[0]) { + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); } else { if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); } } @@ -993,19 +1045,19 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->vcn.inst[0].ring_enc[0]) { + if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); } } else { if (ring->use_doorbell) { adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); } } } @@ -1051,7 +1103,7 @@ static uint64_t vcn_v2_5_jpeg_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_RPTR); } /** @@ -1068,7 +1120,7 @@ static uint64_t vcn_v2_5_jpeg_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; else - return RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR); } /** @@ -1086,7 +1138,7 @@ static void vcn_v2_5_jpeg_ring_set_wptr(struct amdgpu_ring *ring) adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr); WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); } else { - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, ring->me, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr)); } } @@ -1122,40 +1174,62 @@ static const struct amdgpu_ring_funcs vcn_v2_5_jpeg_ring_vm_funcs = { static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.inst[0].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; - DRM_INFO("VCN decode is enabled in VM mode\n"); + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; + adev->vcn.inst[i].ring_dec.me = i; + DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i); + } } static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) { - int i; - - for (i = 0; i < adev->vcn.num_enc_rings; ++i) - adev->vcn.inst[0].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; + int i, j; - DRM_INFO("VCN encode is enabled in VM mode\n"); + for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; + adev->vcn.inst[j].ring_enc[i].me = j; + } + DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j); + } } static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) { - adev->vcn.inst[0].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; - DRM_INFO("VCN jpeg decode is enabled in VM mode\n"); + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; + adev->vcn.inst[i].ring_jpeg.me = i; + DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i); + } } static bool vcn_v2_5_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i, ret = 1; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); + } - return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); + return ret; } static int vcn_v2_5_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; + int i, ret = 0; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE, ret); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE, ret); + if (ret) + return ret; + } return ret; } @@ -1209,20 +1283,34 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_VCN: + ip_instance = 0; + break; + case SOC15_IH_CLIENTID_VCN1: + ip_instance = 1; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + DRM_DEBUG("IH: VCN TRAP\n"); switch (entry->src_id) { case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: - amdgpu_fence_process(&adev->vcn.inst[0].ring_dec); + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec); break; case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: - amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[0]); + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]); break; case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: - amdgpu_fence_process(&adev->vcn.inst[0].ring_enc[1]); + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]); break; case VCN_2_0__SRCID__JPEG_DECODE: - amdgpu_fence_process(&adev->vcn.inst[0].ring_jpeg); + amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_jpeg); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -1240,8 +1328,12 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = { static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) { - adev->vcn.inst[0].irq.num_types = adev->vcn.num_enc_rings + 2; - adev->vcn.inst[0].irq.funcs = &vcn_v2_5_irq_funcs; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2; + adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; + } } static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { -- cgit v1.2.3 From cd1fd7b3818a5724374ff80d9a13a4243b7b9df6 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Wed, 10 Jul 2019 12:07:29 -0500 Subject: drm/amdgpu: add harvest support for Arcturus Add VCN harvest support for Arcturus Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 9 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 11 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 4 +++ drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 55 +++++++++++++++++++++++++++++++-- 4 files changed, 77 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e3776c77784b..60ad1a9d56bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -409,6 +409,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->uvd.harvest_config & (1 << i)) + continue; + if (adev->vcn.inst[i].ring_dec.sched.ready) ++num_rings; } @@ -418,6 +421,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->uvd.harvest_config & (1 << i)) + continue; + for (j = 0; j < adev->vcn.num_enc_rings; j++) if (adev->vcn.inst[i].ring_enc[j].sched.ready) ++num_rings; @@ -428,6 +434,9 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, case AMDGPU_HW_IP_VCN_JPEG: type = AMD_IP_BLOCK_TYPE_VCN; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->uvd.harvest_config & (1 << i)) + continue; + if (adev->vcn.inst[i].ring_jpeg.sched.ready) ++num_rings; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e116342511b8..4d3e6f1876c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -148,6 +148,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo, &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr); @@ -181,6 +184,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) } for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + if (adev->vcn.harvest_config & (1 << j)) + continue; kvfree(adev->vcn.inst[j].saved_bo); amdgpu_bo_free_kernel(&adev->vcn.inst[j].vcpu_bo, @@ -209,6 +214,8 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) cancel_delayed_work_sync(&adev->vcn.idle_work); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; if (adev->vcn.inst[i].vcpu_bo == NULL) return 0; @@ -231,6 +238,8 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; if (adev->vcn.inst[i].vcpu_bo == NULL) return -EINVAL; @@ -267,6 +276,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) unsigned int i, j; for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + if (adev->vcn.harvest_config & (1 << j)) + continue; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index d2fc47a954ab..38f0d53a6381 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -32,6 +32,9 @@ #define AMDGPU_MAX_VCN_INSTANCES 2 +#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) +#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) + #define VCN_DEC_CMD_FENCE 0x00000000 #define VCN_DEC_CMD_TRAP 0x00000001 #define VCN_DEC_CMD_WRITE_REG 0x00000004 @@ -187,6 +190,7 @@ struct amdgpu_vcn { struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; struct amdgpu_vcn_reg internal; + unsigned harvest_config; int (*pause_dpg_mode)(struct amdgpu_device *adev, struct dpg_pause_state *new_state); }; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index b7dc069b637c..ef8bb67844be 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -72,11 +72,24 @@ static int amdgpu_ih_clientid_vcns[] = { static int vcn_v2_5_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_ARCTURUS) + if (adev->asic_type == CHIP_ARCTURUS) { + u32 harvest; + int i; adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS; - else + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING); + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + adev->vcn.harvest_config |= 1 << i; + } + + if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | + AMDGPU_VCN_HARVEST_VCN1)) + /* both instances are harvested, disable the block */ + return -ENOENT; + } else adev->vcn.num_vcn_inst = 1; + adev->vcn.num_enc_rings = 2; vcn_v2_5_set_dec_ring_funcs(adev); @@ -101,6 +114,8 @@ static int vcn_v2_5_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + if (adev->vcn.harvest_config & (1 << j)) + continue; /* VCN DEC TRAP */ r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq); @@ -148,6 +163,8 @@ static int vcn_v2_5_sw_init(void *handle) return r; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + if (adev->vcn.harvest_config & (1 << j)) + continue; adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; @@ -234,6 +251,8 @@ static int vcn_v2_5_hw_init(void *handle) int i, j, r; for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + if (adev->vcn.harvest_config & (1 << j)) + continue; ring = &adev->vcn.inst[j].ring_dec; adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, @@ -284,6 +303,8 @@ static int vcn_v2_5_hw_fini(void *handle) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; ring = &adev->vcn.inst[i].ring_dec; if (RREG32_SOC15(VCN, i, mmUVD_STATUS)) @@ -359,6 +380,8 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, @@ -414,6 +437,8 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* UVD disable CGC */ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -530,6 +555,8 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* enable UVD CGC */ data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL); if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -591,6 +618,8 @@ static int jpeg_v2_5_start(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; ring = &adev->vcn.inst[i].ring_jpeg; /* disable anti hang mechanism */ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JPEG_POWER_STATUS), 0, @@ -661,6 +690,8 @@ static int jpeg_v2_5_stop(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* reset JMI */ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_JMI_CNTL), UVD_JMI_CNTL__SOFT_RESET_MASK, @@ -689,6 +720,8 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) int i, j, k, r; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); @@ -702,6 +735,8 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) vcn_v2_5_disable_clock_gating(adev); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* enable VCPU clock */ WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); @@ -749,6 +784,8 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) vcn_v2_5_mc_resume(adev); for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* VCN global tiling registers */ WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG, adev->gfx.config.gb_addr_config); @@ -861,6 +898,8 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) return r; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; /* wait for vcn idle */ SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, r); if (r) @@ -1177,6 +1216,8 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs; adev->vcn.inst[i].ring_dec.me = i; DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i); @@ -1188,6 +1229,8 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) int i, j; for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + if (adev->vcn.harvest_config & (1 << j)) + continue; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; adev->vcn.inst[j].ring_enc[i].me = j; @@ -1201,6 +1244,8 @@ static void vcn_v2_5_set_jpeg_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; adev->vcn.inst[i].ring_jpeg.funcs = &vcn_v2_5_jpeg_ring_vm_funcs; adev->vcn.inst[i].ring_jpeg.me = i; DRM_INFO("VCN(%d) jpeg decode is enabled in VM mode\n", i); @@ -1213,6 +1258,8 @@ static bool vcn_v2_5_is_idle(void *handle) int i, ret = 1; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); } @@ -1225,6 +1272,8 @@ static int vcn_v2_5_wait_for_idle(void *handle) int i, ret = 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, UVD_STATUS__IDLE, ret); if (ret) @@ -1331,6 +1380,8 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 2; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; } -- cgit v1.2.3 From 6c54afc7e83e801cccfcf773c92f7b4f614face8 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 22 Apr 2019 17:14:59 +0800 Subject: drm/amdgpu: assign fb_start/end in mmhub v9.4 interface Align with mmhub v1.0. Signed-off-by: Le Ma Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index c0eb8f0a2182..33b0de54a5da 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -39,10 +39,17 @@ u64 mmhub_v9_4_get_fb_location(struct amdgpu_device *adev) { /* The base should be same b/t 2 mmhubs on Acrturus. Read one here. */ u64 base = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE); + u64 top = RREG32_SOC15(MMHUB, 0, mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP); base &= VMSHAREDVC0_MC_VM_FB_LOCATION_BASE__FB_BASE_MASK; base <<= 24; + top &= VMSHAREDVC0_MC_VM_FB_LOCATION_TOP__FB_TOP_MASK; + top <<= 24; + + adev->gmc.fb_start = base; + adev->gmc.fb_end = top; + return base; } -- cgit v1.2.3 From 48c69cda452f4817d040908d743a818187767f17 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 9 Jul 2019 13:10:53 -0500 Subject: drm/amdgpu: add pci DID for Arcturus GL-XL. Add device ids for Arcturus. Signed-off-by: Feifei Xu Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2e8b4238efd..170c780b3bd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -996,6 +996,10 @@ static const struct pci_device_id pciidlist[] = { /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, + /* Arcturus */ + {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, + {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, + {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, /* Navi10 */ {0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, -- cgit v1.2.3 From ca1961a2f587a2e967060425f1d4d99f33e83d9f Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 27 Jun 2019 14:47:42 +0800 Subject: drm/amdgpu: add arct sdma golden settings Golden SDMA register settings from the hw team. Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 45 ++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 422f6b032c29..921a6dd9cbae 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -34,6 +34,18 @@ #include "sdma0/sdma0_4_2_sh_mask.h" #include "sdma1/sdma1_4_2_offset.h" #include "sdma1/sdma1_4_2_sh_mask.h" +#include "sdma2/sdma2_4_2_2_offset.h" +#include "sdma2/sdma2_4_2_2_sh_mask.h" +#include "sdma3/sdma3_4_2_2_offset.h" +#include "sdma3/sdma3_4_2_2_sh_mask.h" +#include "sdma4/sdma4_4_2_2_offset.h" +#include "sdma4/sdma4_4_2_2_sh_mask.h" +#include "sdma5/sdma5_4_2_2_offset.h" +#include "sdma5/sdma5_4_2_2_sh_mask.h" +#include "sdma6/sdma6_4_2_2_offset.h" +#include "sdma6/sdma6_4_2_2_sh_mask.h" +#include "sdma7/sdma7_4_2_2_offset.h" +#include "sdma7/sdma7_4_2_2_sh_mask.h" #include "hdp/hdp_4_0_offset.h" #include "sdma0/sdma0_4_1_default.h" @@ -210,6 +222,34 @@ static const struct soc15_reg_golden golden_settings_sdma_rv2[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00003001) }; +static const struct soc15_reg_golden golden_settings_sdma_arct[] = +{ + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07), + SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002) +}; + static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) { @@ -318,6 +358,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma1_4_2, ARRAY_SIZE(golden_settings_sdma1_4_2)); break; + case CHIP_ARCTURUS: + soc15_program_register_sequence(adev, + golden_settings_sdma_arct, + ARRAY_SIZE(golden_settings_sdma_arct)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_sdma_4_1, -- cgit v1.2.3 From 582870de568d0a275f747c4ec84c870c3b950902 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 27 Jun 2019 15:08:48 +0800 Subject: drm/amdgpu: add arct gc golden settings Golden GC register settings from the hw team. Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d8143b1e635a..22e5ea56adad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -111,6 +111,19 @@ MODULE_FIRMWARE("amdgpu/arcturus_mec.bin"); MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin"); MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin"); +#define mmTCP_CHAN_STEER_0_ARCT 0x0b03 +#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0 +#define mmTCP_CHAN_STEER_1_ARCT 0x0b04 +#define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX 0 +#define mmTCP_CHAN_STEER_2_ARCT 0x0b09 +#define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX 0 +#define mmTCP_CHAN_STEER_3_ARCT 0x0b0a +#define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX 0 +#define mmTCP_CHAN_STEER_4_ARCT 0x0b0b +#define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX 0 +#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c +#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 + static const struct soc15_reg_golden golden_settings_gc_9_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), @@ -278,6 +291,18 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) }; +static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), +}; + static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = { mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0, @@ -347,6 +372,11 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_0_vg20, ARRAY_SIZE(golden_settings_gc_9_0_vg20)); break; + case CHIP_ARCTURUS: + soc15_program_register_sequence(adev, + golden_settings_gc_9_4_1_arct, + ARRAY_SIZE(golden_settings_gc_9_4_1_arct)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_gc_9_1, ARRAY_SIZE(golden_settings_gc_9_1)); -- cgit v1.2.3 From d57c3d5634a305ab1575e573ef0fb0af73ac2957 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 27 Jun 2019 18:05:30 +0800 Subject: drm/amdgpu: init arct external rev id Properly set the external silicon revision id. Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index a076001b326d..cd518f402b3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1017,6 +1017,7 @@ static int soc15_common_early_init(void *handle) adev->asic_funcs = &vega20_asic_funcs; adev->cg_flags = 0; adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x32; break; default: /* FIXME: not supported yet */ -- cgit v1.2.3 From bfa3a9bb9859849ef2e8c0ee4628e8ed5aa13024 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 28 Jun 2019 11:07:53 +0800 Subject: drm/amdgpu: keep stolen memory for arct Any dce register read back from arct is invalid. use hard code stolen memory for arct until we validate the s3. Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index a05040249a50..9b6afed0129f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -645,6 +645,7 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_RAVEN: + case CHIP_ARCTURUS: return true; case CHIP_VEGA12: case CHIP_VEGA20: @@ -945,7 +946,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) { - u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); + u32 d1vga_control; unsigned size; /* @@ -955,6 +956,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) if (gmc_v9_0_keep_stolen_memory(adev)) return 9 * 1024 * 1024; + d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ } else { -- cgit v1.2.3 From 22f5ea4ca04cb68910bccdc8c6c0f1a44237570f Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 28 Jun 2019 13:22:32 +0800 Subject: drm/amdgpu: init gds config for arct arct has 4KB gds (4 banks inside) so the max_wave_id should be 0xfff Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 22e5ea56adad..b269af125cd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5486,6 +5486,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) adev->gds.gds_size = 0x10000; break; case CHIP_RAVEN: + case CHIP_ARCTURUS: adev->gds.gds_size = 0x1000; break; default: @@ -5507,6 +5508,9 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) else adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */ break; + case CHIP_ARCTURUS: + adev->gds.gds_compute_max_wave_id = 0xfff; + break; default: /* this really depends on the chip */ adev->gds.gds_compute_max_wave_id = 0x7ff; -- cgit v1.2.3 From a80955176ddd89ced22181d738ce8d77b90d0802 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Fri, 28 Jun 2019 15:08:04 +0800 Subject: drm/amdgpu: clean up nonexistent firmware declaration for Arcturus CPG firmwares are not used. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b269af125cd1..76e7e103f245 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -104,9 +104,6 @@ MODULE_FIRMWARE("amdgpu/raven2_mec2.bin"); MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_ce.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_pfp.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_me.bin"); MODULE_FIRMWARE("amdgpu/arcturus_mec.bin"); MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin"); MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin"); -- cgit v1.2.3 From 54bd77f3d08d4d044b13041286817d9205ecf688 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Fri, 28 Jun 2019 05:45:39 -0400 Subject: amd/powerplay: No SW XGMI dpm for Arcturus rev 2 xgmi dpm is handled by the SMU. Signed-off-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 2 +- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 11 +++++++++++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 41e648d56eca..4e8d60eec0fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -248,7 +248,7 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); - if (is_support_sw_smu(adev)) + if (is_support_sw_smu_xgmi(adev)) ret = smu_set_xgmi_pstate(&adev->smu, pstate); if (ret) dev_err(adev->dev, diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index d977d68320c9..c8048b865161 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -370,6 +370,17 @@ bool is_support_sw_smu(struct amdgpu_device *adev) return false; } +bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) +{ + if (amdgpu_dpm != 1) + return false; + + if (adev->asic_type == CHIP_VEGA20) + return true; + + return false; +} + int smu_sys_get_pp_table(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index d940e7bef534..514d31518853 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -951,6 +951,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int void *table_data, bool drv2smu); bool is_support_sw_smu(struct amdgpu_device *adev); +bool is_support_sw_smu_xgmi(struct amdgpu_device *adev); int smu_reset(struct smu_context *smu); int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size); -- cgit v1.2.3 From 3baa24f0fc11d826313c312c4154a43a47474367 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 9 Jul 2019 13:13:33 -0500 Subject: drm/amdkfd: Add arcturus CWSR trap handler CWSR (compute wave save/restore) is used for preempting compute queues. Signed-off-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 198 ++++++ .../drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm | 746 +++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 + 3 files changed, 948 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 826913c70766..ee700a69c68e 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -860,3 +860,201 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0x00000000, }; +static const uint32_t cwsr_trap_arcturus_hex[] = { + 0xbf820001, 0xbf8200ca, + 0xb8f0f802, 0x89708670, + 0xb8f1f803, 0x8671ff71, + 0x00000400, 0xbf850008, + 0xb8f1f803, 0x8671ff71, + 0x000001ff, 0xbf850001, + 0x806c846c, 0x866dff6d, + 0x0000ffff, 0xbe801f6c, + 0xb8f1f803, 0x8671ff71, + 0x00000100, 0xbf840006, + 0xbef60080, 0xb9760203, + 0x866dff6d, 0x0000ffff, + 0x80ec886c, 0x82ed806d, + 0xbef60080, 0xb9760283, + 0xbef20068, 0xbef30069, + 0xb8f62407, 0x8e769b76, + 0x876d766d, 0xb8f603c7, + 0x8e769a76, 0x876d766d, + 0xb8f6f807, 0x8676ff76, + 0x00007fff, 0xb976f807, + 0xbeee007e, 0xbeef007f, + 0xbefe0180, 0xbf900004, + 0xbf8e0002, 0xbf88fffe, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0x8676ff7f, 0x08000000, + 0x8f768376, 0x87777677, + 0x8676ff7f, 0x70000000, + 0x8f768176, 0x87777677, + 0xbefb007c, 0xbefa0080, + 0xbf8a0000, 0x8676ff7f, + 0x04000000, 0xbf840012, + 0xbefe00c1, 0xbeff00c1, + 0xb8f14306, 0x8671c171, + 0xbf84000d, 0x8e718671, + 0x8e718271, 0xbef60071, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xbf800000, + 0x807cff7c, 0x00000100, + 0x807aff7a, 0x00000100, + 0xbf0a717c, 0xbf85fffa, + 0xbefe00c1, 0xbeff00c1, + 0xb8f12a05, 0x80718171, + 0x8e718271, 0x8e768871, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xbf11017c, + 0x8071ff71, 0x00001000, + 0x7e000300, 0xe0724000, + 0x7a1d0000, 0x807c817c, + 0x807aff7a, 0x00000100, + 0xbf0a717c, 0xbf85fff8, + 0xbf9c0000, 0xbefe00c1, + 0xbeff00c1, 0xb8f12a05, + 0x80718171, 0x8e718271, + 0x8e768871, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xbf11017c, 0x8071ff71, + 0x00001000, 0xd3d84000, + 0x18000100, 0x7e000000, + 0x7e000000, 0xe0724000, + 0x7a1d0000, 0x807c817c, + 0x807aff7a, 0x00000100, + 0xbf0a717c, 0xbf85fff5, + 0xbf9c0000, 0xb8f11605, + 0x80718171, 0x8e718471, + 0x8e768871, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xbf800000, 0xbe802a00, + 0x7e000200, 0xe0724000, + 0x7a1d0000, 0x807aff7a, + 0x00000100, 0x807c817c, + 0xbf0a717c, 0xbf85fff7, + 0xbef60084, 0xbef600ff, + 0x01000000, 0x7e00027b, + 0xe0724000, 0x7a1d0000, + 0x807aff7a, 0x00000100, + 0x7e00026c, 0xe0724000, + 0x7a1d0000, 0x807aff7a, + 0x00000100, 0x7e00026d, + 0xe0724000, 0x7a1d0000, + 0x807aff7a, 0x00000100, + 0x7e00026e, 0xe0724000, + 0x7a1d0000, 0x807aff7a, + 0x00000100, 0x7e00026f, + 0xe0724000, 0x7a1d0000, + 0x807aff7a, 0x00000100, + 0x7e000270, 0xe0724000, + 0x7a1d0000, 0x807aff7a, + 0x00000100, 0xb8f1f803, + 0x7e000271, 0xe0724000, + 0x7a1d0000, 0x807aff7a, + 0x00000100, 0x7e000272, + 0xe0724000, 0x7a1d0000, + 0x807aff7a, 0x00000100, + 0x7e000273, 0xe0724000, + 0x7a1d0000, 0x807aff7a, + 0x00000100, 0xb8fbf801, + 0x7e00027b, 0xe0724000, + 0x7a1d0000, 0x807aff7a, + 0x00000100, 0xbf8200bb, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0x8672ff7f, 0x08000000, + 0x8f728372, 0x87777277, + 0x8672ff7f, 0x70000000, + 0x8f728172, 0x87777277, + 0xbef80080, 0x8672ff7f, + 0x04000000, 0xbf840011, + 0xbefe00c1, 0xbeff00c1, + 0xb8ef4306, 0x866fc16f, + 0xbf84000c, 0x8e6f866f, + 0x8e6f826f, 0xbef6006f, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0x807cff7c, + 0x00000100, 0x8078ff78, + 0x00000100, 0xbf0a6f7c, + 0xbf85fffa, 0xbefe00c1, + 0xbeff00c1, 0xb8ef2a05, + 0x806f816f, 0x8e6f826f, + 0x8e76886f, 0xbef600ff, + 0x01000000, 0xbef20078, + 0x8078ff78, 0x00000100, + 0xbefc0081, 0xbf11087c, + 0x806fff6f, 0x00008000, + 0xe0524000, 0x781d0000, + 0xbf8c0f70, 0x7e000300, + 0x807c817c, 0x8078ff78, + 0x00000100, 0xbf0a6f7c, + 0xbf85fff7, 0xbf9c0000, + 0xbefe00c1, 0xbeff00c1, + 0xb8ef2a05, 0x806f816f, + 0x8e6f826f, 0x8e76886f, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xbf11087c, + 0x806fff6f, 0x00008000, + 0xe0524000, 0x781d0000, + 0xbf8c0f70, 0xd3d94000, + 0x18000100, 0x807c817c, + 0x8078ff78, 0x00000100, + 0xbf0a6f7c, 0xbf85fff6, + 0xbf9c0000, 0xe0524000, + 0x721d0000, 0xb8ef1605, + 0x806f816f, 0x8e6f846f, + 0x8e76886f, 0xbef600ff, + 0x01000000, 0xc0211cba, + 0x00000078, 0x8078ff78, + 0x00000100, 0xbefc0081, + 0xc021003a, 0x00000078, + 0x8078ff78, 0x00000100, + 0xbf8cc07f, 0xbe802c00, + 0xbf800000, 0x807c817c, + 0xbf0a6f7c, 0xbf85fff6, + 0xbe800072, 0xbef60084, + 0xbef600ff, 0x01000000, + 0xc0211bfa, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211b3a, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211b7a, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211eba, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211efa, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211c3a, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211c7a, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211a3a, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211a7a, 0x00000078, + 0x8078ff78, 0x00000100, + 0xc0211cfa, 0x00000078, + 0x8078ff78, 0x00000100, + 0xbf8cc07f, 0xbef2006d, + 0x866dff72, 0x0000ffff, + 0xbefc006f, 0xbefe007a, + 0xbeff007b, 0x866f71ff, + 0x000003ff, 0xb96f4803, + 0x866f71ff, 0xfffff800, + 0x8f6f8b6f, 0xb96fa2c3, + 0xb973f801, 0x866fff72, + 0xf8000000, 0x8f6f9b6f, + 0x8e6f906f, 0xbef30080, + 0x87736f73, 0x866fff72, + 0x04000000, 0x8f6f9a6f, + 0x8e6f8f6f, 0x87736f73, + 0x866fff70, 0x00800000, + 0x8f6f976f, 0xb973f807, + 0x86fe7e7e, 0x86ea6a6a, + 0xb970f802, 0xbf8a0000, + 0x95806f6c, 0xbf810000, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm new file mode 100644 index 000000000000..b83e2a643ddb --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm @@ -0,0 +1,746 @@ +shader main + +asic(DEFAULT) + +type(CS) + +/*************************************************************************/ +/* control on how to run the shader */ +/*************************************************************************/ +//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run) +var EMU_RUN_HACK = 0 +var EMU_RUN_HACK_RESTORE_NORMAL = 0 +var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0 +var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0 +var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK +var SAVE_LDS = 0 +var WG_BASE_ADDR_LO = 0x9000a000 +var WG_BASE_ADDR_HI = 0x0 +var WAVE_SPACE = 0x6000 //memory size that each wave occupies in workgroup state mem +var CTX_SAVE_CONTROL = 0x0 +var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL +var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run) +var SGPR_SAVE_USE_SQC = 0 //use SQC D$ to do the write +var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //need to change BUF_DATA_FORMAT in S_SAVE_BUF_RSRC_WORD3_MISC from 0 to BUF_DATA_FORMAT_32 if set to 1 (i.e. 0x00827FAC) +var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing + +/**************************************************************************/ +/* variables */ +/**************************************************************************/ +var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 +var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 +var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 + +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6 +var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24 +var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits + +var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400 +var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask +var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10 +var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100 +var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8 +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0 +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21 + +var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME +var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME +var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME + +var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24 +var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27 + + +/* Save */ +var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes +var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE + +var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit +var S_SAVE_SPI_INIT_ATC_SHIFT = 27 +var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype +var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28 +var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG +var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26 + +var S_SAVE_PC_HI_RCNT_SHIFT = 27 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used +var S_SAVE_PC_HI_RCNT_MASK = 0xF8000000 //FIXME +var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 26 //FIXME +var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x04000000 //FIXME + +var s_save_spi_init_lo = exec_lo +var s_save_spi_init_hi = exec_hi + +var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} +var s_save_pc_hi = ttmp1 +var s_save_exec_lo = ttmp2 +var s_save_exec_hi = ttmp3 +var s_save_status = ttmp4 +var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine +var s_save_xnack_mask_lo = ttmp6 +var s_save_xnack_mask_hi = ttmp7 +var s_save_buf_rsrc0 = ttmp8 +var s_save_buf_rsrc1 = ttmp9 +var s_save_buf_rsrc2 = ttmp10 +var s_save_buf_rsrc3 = ttmp11 + +var s_save_mem_offset = ttmp14 +var s_save_alloc_size = s_save_trapsts //conflict +var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time) +var s_save_m0 = ttmp15 + +/* Restore */ +var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE +var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC + +var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit +var S_RESTORE_SPI_INIT_ATC_SHIFT = 27 +var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype +var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28 +var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG +var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26 + +var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT +var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK +var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT +var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK + +var s_restore_spi_init_lo = exec_lo +var s_restore_spi_init_hi = exec_hi + +var s_restore_mem_offset = ttmp12 +var s_restore_alloc_size = ttmp3 +var s_restore_tmp = ttmp6 +var s_restore_mem_offset_save = s_restore_tmp //no conflict + +var s_restore_m0 = s_restore_alloc_size //no conflict + +var s_restore_mode = ttmp7 + +var s_restore_pc_lo = ttmp0 +var s_restore_pc_hi = ttmp1 +var s_restore_exec_lo = ttmp14 +var s_restore_exec_hi = ttmp15 +var s_restore_status = ttmp4 +var s_restore_trapsts = ttmp5 +var s_restore_xnack_mask_lo = xnack_mask_lo +var s_restore_xnack_mask_hi = xnack_mask_hi +var s_restore_buf_rsrc0 = ttmp8 +var s_restore_buf_rsrc1 = ttmp9 +var s_restore_buf_rsrc2 = ttmp10 +var s_restore_buf_rsrc3 = ttmp11 + +/**************************************************************************/ +/* trap handler entry points */ +/**************************************************************************/ + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore + //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC + s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC + s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f. + s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE + //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE + s_branch L_SKIP_RESTORE //NOT restore, SAVE actually + else + s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save + end + +L_JUMP_TO_RESTORE: + s_branch L_RESTORE //restore + +L_SKIP_RESTORE: + + s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC + s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save + s_cbranch_scc1 L_SAVE //this is the operation for save + + // ********* Handle non-CWSR traps ******************* + if (!EMU_RUN_HACK) + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception + s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly. + s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0 + + L_EXCP_CASE: + s_and_b32 ttmp1, ttmp1, 0xFFFF + s_rfe_b64 [ttmp0, ttmp1] + end + // ********* End handling of non-CWSR traps ******************* + +/**************************************************************************/ +/* save routine */ +/**************************************************************************/ + +L_SAVE: + + //check whether there is mem_viol + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK + s_cbranch_scc0 L_NO_PC_REWIND + + //if so, need rewind PC assuming GDS operation gets NACKed + s_mov_b32 s_save_tmp, 0 //clear mem_viol bit + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit + s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] + s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8 + s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc + +L_NO_PC_REWIND: + s_mov_b32 s_save_tmp, 0 //clear saveCtx bit + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit + + s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK + s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT + s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY + s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS + s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG + + s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp + + /* inform SPI the readiness and wait for SPI's go signal */ + s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI + s_mov_b32 s_save_exec_hi, exec_hi + s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive + if (EMU_RUN_HACK) + + else + s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC + end + + L_SLEEP: + s_sleep 0x2 + + if (EMU_RUN_HACK) + + else + s_cbranch_execz L_SLEEP + end + + + /* setup Resource Contants */ + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE)) + //calculate wd_addr using absolute thread id + v_readlane_b32 s_save_tmp, v9, 0 + s_lshr_b32 s_save_tmp, s_save_tmp, 6 + s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE + s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO + s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI + s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL + else + end + if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE)) + s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO + s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI + s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL + else + end + + + s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo + s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited + s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK + s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position + s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC TODO: ATC deprecated, no need anymore. + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK + s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position + s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE + + s_mov_b32 s_save_m0, m0 //save M0 + + /* global mem offset */ + s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0 + + + /* the first wave in the threadgroup */ + s_barrier //FIXME not performance-optimal "LDS is used? wait for other waves in the same TG" + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here + s_cbranch_scc0 L_SAVE_VGPR + + /* save LDS */ + ////////////////////////////// + L_SAVE_LDS: + + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size + s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_SAVE_VGPR //no lds used? jump to L_SAVE_VGPR + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes + s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + s_mov_b32 m0, 0x0 //lds_offset initial value = 0 + s_nop 0x0 //Manually inserted wait states + + L_SAVE_LDS_LOOP: + if (SAVE_LDS) + buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 + end + s_add_u32 m0, m0, 256 //every buffer_store_lds does 256 bytes + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //mem offset increased by 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete? + + + /* save VGPRs */ + ////////////////////////////// + L_SAVE_VGPR: + + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size + s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible + s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + s_mov_b32 m0, 0x0 //VGPR initial index value =0 + s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 + s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later + + L_SAVE_VGPR_LOOP: + v_mov_b32 v0, v0 //v0 = v[0+m0] + + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + end + + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //every buffer_store_dword does 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? + s_set_gpr_idx_off + + + /* save ACC_VGPRs */ + ////////////////////////////// + L_SAVE_ACC_VGPR: + + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size + s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible + s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + s_mov_b32 m0, 0x0 //VGPR initial index value =0 + s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 + s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later + + L_SAVE_ACC_VGPR_LOOP: + v_accvgpr_read v0, v0 + v_nop + v_nop + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + end + + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //every buffer_store_dword does 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_ACC_VGPR_LOOP //VGPR save is complete? + s_set_gpr_idx_off + + + /* save SGPRs */ + ////////////////////////////// + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size + s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) + + if (SGPR_SAVE_USE_SQC) + s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes + else + s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) + end + + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + s_mov_b32 m0, 0x0 //SGPR initial index value =0 + s_nop 0x0 //Manually inserted wait states + + L_SAVE_SGPR_LOOP: + s_movrels_b32 s0, s0 //s0 = s[0+m0] + write_sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4 + s_add_u32 m0, m0, 1 //next sgpr index + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_SGPR_LOOP //SGPR save is complete? + + /* save HW registers */ + ////////////////////////////// + L_SAVE_HWREG: + s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + + write_sgpr_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0 + + if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME)) + s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 + s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over + end + + write_sgpr_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC + write_sgpr_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) + write_sgpr_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC + write_sgpr_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) + write_sgpr_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS + + //s_save_trapsts conflicts with s_save_alloc_size + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + write_sgpr_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS + + write_sgpr_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO + write_sgpr_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI + + //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2 + s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE + write_sgpr_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) + + /* S_PGM_END_SAVED */ //FIXME graphics ONLY + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT)) + s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] + s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 + s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over + s_rfe_b64 s_save_pc_lo //Return to the main shader program + else + end + + + s_branch L_END_PGM + + + +/**************************************************************************/ +/* restore routine */ +/**************************************************************************/ + +L_RESTORE: + /* Setup Resource Contants */ + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) + //calculate wd_addr using absolute thread id + v_readlane_b32 s_restore_tmp, v9, 0 + s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 + s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE + s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO + s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI + s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL + else + end + + s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo + s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) + s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK + s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position + s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC TODO: ATC deprecated, no need anymore. + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK + s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position + s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE + + /* global mem offset */ + s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0 + + /* the first wave in the threadgroup */ + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK + s_cbranch_scc0 L_RESTORE_VGPR + + /* restore LDS */ + ////////////////////////////// + L_RESTORE_LDS: + + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size + s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes + s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + s_mov_b32 m0, 0x0 //lds_offset initial value = 0 + + L_RESTORE_LDS_LOOP: + if (SAVE_LDS) + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 + end + s_add_u32 m0, m0, 256 //every buffer_load_dword does 256 bytes + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete? + + + /* restore VGPRs */ + ////////////////////////////// + L_RESTORE_VGPR: + + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) + s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 + s_mov_b32 m0, 1 //VGPR initial index value = 1 + s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8 + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later + + L_RESTORE_VGPR_LOOP: + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 + end + s_waitcnt vmcnt(0) //ensure data ready + v_mov_b32 v0, v0 //v[0+m0] = v0 + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //every buffer_load_dword does 256 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete? + s_set_gpr_idx_off + + + /* restore ACC_VGPRs */ + ////////////////////////////// + L_RESTORE_ACC_VGPR: + + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) + s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + s_mov_b32 m0, 0 //VGPR initial index value = 0 + s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8 + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later + + L_RESTORE_ACC_VGPR_LOOP: + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 + end + s_waitcnt vmcnt(0) //ensure data ready + v_accvgpr_write v0, v0 //v[0+m0] = v0 + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //every buffer_load_dword does 256 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_ACC_VGPR_LOOP //VGPR restore (except v0) is complete? + s_set_gpr_idx_off + /* VGPR restore on v0 */ + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 + end + + /* restore SGPRs */ + ////////////////////////////// + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) + + if (SGPR_SAVE_USE_SQC) + s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes + else + s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) + end + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + read_sgpr_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp + s_mov_b32 m0, 0x1 //SGPR initial index value =1 //go on with with s1 + + L_RESTORE_SGPR_LOOP: + read_sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made + s_waitcnt lgkmcnt(0) //ensure data ready + s_movreld_b32 s0, s0 //s[0+m0] = s0 + s_nop 0 // hazard SALU M0=> S_MOVREL + s_add_u32 m0, m0, 1 //next sgpr index + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_SGPR_LOOP //SGPR restore (except s0) is complete? + s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */ + + /* restore HW registers */ + ////////////////////////////// + L_RESTORE_HWREG: + s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + read_sgpr_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0 + read_sgpr_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC + read_sgpr_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) + read_sgpr_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC + read_sgpr_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) + read_sgpr_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS + read_sgpr_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS + read_sgpr_from_mem(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO + read_sgpr_from_mem(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI + read_sgpr_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE + + s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS + + s_mov_b32 s_restore_tmp, s_restore_pc_hi + s_and_b32 s_restore_pc_hi, s_restore_tmp, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS + + //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) + s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) + s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over + end + if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL)) + s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal + s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over + end + + s_mov_b32 m0, s_restore_m0 + s_mov_b32 exec_lo, s_restore_exec_lo + s_mov_b32 exec_hi, s_restore_exec_hi + + s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0 + s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts + s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0 + //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore + s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode + //reuse s_restore_m0 as a temp register + s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_RCNT_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT + s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT + s_mov_b32 s_restore_mode, 0x0 //IB_STS is zero + s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0 + s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_FIRST_REPLAY_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT + s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT + s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0 + s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT + s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_mode + + s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 + s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status + + s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time + + +// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution + s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc + + +/**************************************************************************/ +/* the END */ +/**************************************************************************/ +L_END_PGM: + s_endpgm + +end + + +/**************************************************************************/ +/* the helper functions */ +/**************************************************************************/ + +function write_sgpr_to_mem(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf) + if (use_sqc) + s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on + s_mov_b32 m0, s_mem_offset + s_buffer_store_dword s, s_rsrc, m0 glc:1 + s_add_u32 s_mem_offset, s_mem_offset, 4 + s_mov_b32 m0, exec_lo + elsif (use_mtbuf) + v_mov_b32 v0, s + tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + s_add_u32 s_mem_offset, s_mem_offset, 256 + else + v_mov_b32 v0, s + buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1 + s_add_u32 s_mem_offset, s_mem_offset, 256 + end +end + + + +function read_sgpr_from_mem(s, s_rsrc, s_mem_offset, use_sqc) + s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1 + if (use_sqc) + s_add_u32 s_mem_offset, s_mem_offset, 4 + else + s_add_u32 s_mem_offset, s_mem_offset, 256 + end +end + + + + + diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index cc22289c92d1..c3cd683e1e97 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -553,6 +553,10 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx8_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); + } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) { + BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); + kfd->cwsr_isa = cwsr_trap_arcturus_hex; + kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); } else if (kfd->device_info->asic_family < CHIP_NAVI10) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx9_hex; -- cgit v1.2.3 From f9cf36fcafee8e12ffc6026dc8410c17aa21c819 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sat, 29 Jun 2019 22:22:13 +0800 Subject: drm/amdgpu: skip gfx 9 common golden settings for arct They are not needed by arct Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 76e7e103f245..b9c347d2b989 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -390,8 +390,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) break; } - soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, - (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); + if (adev->asic_type != CHIP_ARCTURUS) + soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, + (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); } static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) -- cgit v1.2.3 From fc1e272e8d21ebe5c992db534b855c604b060cdb Mon Sep 17 00:00:00 2001 From: Le Ma Date: Sun, 30 Jun 2019 11:35:32 +0800 Subject: drm/amdgpu: limit sdma instances to 2 for Arcturus in BU phase Another 6 sdma instances do not work at present. Disable them to unblock KFD for silicon bringup as a workaround Signed-off-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 921a6dd9cbae..8d33cb730b63 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1602,7 +1602,7 @@ static int sdma_v4_0_early_init(void *handle) if (adev->asic_type == CHIP_RAVEN) adev->sdma.num_instances = 1; else if (adev->asic_type == CHIP_ARCTURUS) - adev->sdma.num_instances = 8; + adev->sdma.num_instances = 2; else adev->sdma.num_instances = 2; -- cgit v1.2.3 From e30d90fca3fc1aa24e8b9f182a857c32f6186be2 Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 9 Jul 2019 13:16:37 -0500 Subject: drm/amdkfd: Add device id for real asics Add pci device ids. Signed-off-by: Oak Zeng Reviewed-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index c3cd683e1e97..3b9fe629a126 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -469,7 +469,9 @@ static const struct kfd_deviceid supported_devices[] = { { 0x66a4, &vega20_device_info }, /* Vega20 */ { 0x66a7, &vega20_device_info }, /* Vega20 */ { 0x66af, &vega20_device_info }, /* Vega20 */ - /* Navi10 */ + { 0x738C, &arcturus_device_info }, /* Arcturus */ + { 0x7388, &arcturus_device_info }, /* Arcturus */ + { 0x738E, &arcturus_device_info }, /* Arcturus */ { 0x7310, &navi10_device_info }, /* Navi10 */ { 0x7312, &navi10_device_info }, /* Navi10 */ { 0x7318, &navi10_device_info }, /* Navi10 */ -- cgit v1.2.3 From 5ddd4a9a7c25a6a23a79f973e7a87b1403503719 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Mon, 1 Jul 2019 00:48:40 -0400 Subject: drm/amdgpu: Add more detail to the VM fault printing With the printing, we don't need to parse the value on our own any more. Signed-off-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 9b6afed0129f..320b5413738e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -359,12 +359,26 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, entry->src_id, entry->ring_id, entry->vmid, entry->pasid, task_info.process_name, task_info.tgid, task_info.task_name, task_info.pid); - dev_err(adev->dev, " in page starting at address 0x%016llx from %d\n", + dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n", addr, entry->client_id); - if (!amdgpu_sriov_vf(adev)) + if (!amdgpu_sriov_vf(adev)) { dev_err(adev->dev, "VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n", status); + dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS)); + dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR)); + dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS)); + dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n", + REG_GET_FIELD(status, + VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR)); + + } } return 0; -- cgit v1.2.3 From 37f86a9b3617d55ad8189e1b7e6468b85dba4b88 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Mon, 1 Jul 2019 15:46:56 -0500 Subject: drm/amdkfd: Merge gfx9/arcturus trap handlers, add ACC VGPR save ACC VGPRs are a secondary VGPR set of same size as the primary VGPRs. Save them as a block immediately following VGPRs. Signed-off-by: Jay Cornwall Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 627 ++++++++++++----- .../drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm | 746 --------------------- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 83 ++- 3 files changed, 538 insertions(+), 918 deletions(-) delete mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index ee700a69c68e..c45ba0013ca5 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -861,200 +861,487 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { - 0xbf820001, 0xbf8200ca, - 0xb8f0f802, 0x89708670, - 0xb8f1f803, 0x8671ff71, - 0x00000400, 0xbf850008, - 0xb8f1f803, 0x8671ff71, - 0x000001ff, 0xbf850001, - 0x806c846c, 0x866dff6d, - 0x0000ffff, 0xbe801f6c, - 0xb8f1f803, 0x8671ff71, - 0x00000100, 0xbf840006, - 0xbef60080, 0xb9760203, - 0x866dff6d, 0x0000ffff, + 0xbf820001, 0xbf8202bd, + 0xb8f8f802, 0x89788678, + 0xb8fbf803, 0x866eff7b, + 0x00000400, 0xbf85003b, + 0x866eff7b, 0x00000800, + 0xbf850003, 0x866eff7b, + 0x00000100, 0xbf84000c, + 0x866eff78, 0x00002000, + 0xbf840005, 0xbf8e0010, + 0xb8eef803, 0x866eff6e, + 0x00000400, 0xbf84fffb, + 0x8778ff78, 0x00002000, 0x80ec886c, 0x82ed806d, - 0xbef60080, 0xb9760283, - 0xbef20068, 0xbef30069, - 0xb8f62407, 0x8e769b76, - 0x876d766d, 0xb8f603c7, - 0x8e769a76, 0x876d766d, - 0xb8f6f807, 0x8676ff76, - 0x00007fff, 0xb976f807, - 0xbeee007e, 0xbeef007f, - 0xbefe0180, 0xbf900004, - 0xbf8e0002, 0xbf88fffe, + 0xb8eef807, 0x866fff6e, + 0x001f8000, 0x8e6f8b6f, + 0x8977ff77, 0xfc000000, + 0x87776f77, 0x896eff6e, + 0x001f8000, 0xb96ef807, + 0xb8faf812, 0xb8fbf813, + 0x8efa887a, 0xc0071bbd, + 0x00000000, 0xbf8cc07f, + 0xc0071ebd, 0x00000008, + 0xbf8cc07f, 0x86ee6e6e, + 0xbf840001, 0xbe801d6e, + 0xb8fbf803, 0x867bff7b, + 0x000001ff, 0xbf850002, + 0x806c846c, 0x826d806d, + 0x866dff6d, 0x0000ffff, + 0x8f6e8b77, 0x866eff6e, + 0x001f8000, 0xb96ef807, + 0x86fe7e7e, 0x86ea6a6a, + 0x8f6e8378, 0xb96ee0c2, + 0xbf800002, 0xb9780002, + 0xbe801f6c, 0x866dff6d, + 0x0000ffff, 0xbefa0080, + 0xb97a0283, 0xb8fa2407, + 0x8e7a9b7a, 0x876d7a6d, + 0xb8fa03c7, 0x8e7a9a7a, + 0x876d7a6d, 0xb8faf807, + 0x867aff7a, 0x00007fff, + 0xb97af807, 0xbeee007e, + 0xbeef007f, 0xbefe0180, + 0xbf900004, 0x877a8478, + 0xb97af802, 0xbf8e0002, + 0xbf88fffe, 0xb8fa2a05, + 0x807a817a, 0x8e7a8a7a, + 0x8e7a817a, 0xb8fb1605, + 0x807b817b, 0x8e7b867b, + 0x807a7b7a, 0x807a7e7a, + 0x827b807f, 0x867bff7b, + 0x0000ffff, 0xc04b1c3d, + 0x00000050, 0xbf8cc07f, + 0xc04b1d3d, 0x00000060, + 0xbf8cc07f, 0xc0431e7d, + 0x00000074, 0xbf8cc07f, 0xbef4007e, 0x8675ff7f, 0x0000ffff, 0x8775ff75, 0x00040000, 0xbef60080, 0xbef700ff, 0x00807fac, - 0x8676ff7f, 0x08000000, - 0x8f768376, 0x87777677, - 0x8676ff7f, 0x70000000, - 0x8f768176, 0x87777677, - 0xbefb007c, 0xbefa0080, - 0xbf8a0000, 0x8676ff7f, - 0x04000000, 0xbf840012, - 0xbefe00c1, 0xbeff00c1, - 0xb8f14306, 0x8671c171, - 0xbf84000d, 0x8e718671, - 0x8e718271, 0xbef60071, - 0xbef600ff, 0x01000000, + 0x867aff7f, 0x08000000, + 0x8f7a837a, 0x87777a77, + 0x867aff7f, 0x70000000, + 0x8f7a817a, 0x87777a77, + 0xbef1007c, 0xbef00080, + 0xb8f02a05, 0x80708170, + 0x8e708a70, 0x8e708170, + 0xb8fa1605, 0x807a817a, + 0x8e7a867a, 0x80707a70, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xbefe007c, + 0xbefc0070, 0xc0611c7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611b3a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611b7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611bba, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611bfa, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611e3a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xb8fbf803, + 0xbefe007c, 0xbefc0070, + 0xc0611efa, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611a3a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xbefe007c, 0xbefc0070, + 0xc0611a7a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0xb8f1f801, + 0xbefe007c, 0xbefc0070, + 0xc0611c7a, 0x0000007c, + 0xbf8cc07f, 0x80708470, + 0xbefc007e, 0x867aff7f, + 0x04000000, 0xbeef0080, + 0x876f6f7a, 0xb8f02a05, + 0x80708170, 0x8e708a70, + 0x8e708170, 0xb8fb1605, + 0x807b817b, 0x8e7b847b, + 0x8e76827b, 0xbef600ff, + 0x01000000, 0xbef20174, + 0x80747074, 0x82758075, 0xbefc0080, 0xbf800000, - 0x807cff7c, 0x00000100, - 0x807aff7a, 0x00000100, - 0xbf0a717c, 0xbf85fffa, + 0xbe802b00, 0xbe822b02, + 0xbe842b04, 0xbe862b06, + 0xbe882b08, 0xbe8a2b0a, + 0xbe8c2b0c, 0xbe8e2b0e, + 0xc06b003a, 0x00000000, + 0xbf8cc07f, 0xc06b013a, + 0x00000010, 0xbf8cc07f, + 0xc06b023a, 0x00000020, + 0xbf8cc07f, 0xc06b033a, + 0x00000030, 0xbf8cc07f, + 0x8074c074, 0x82758075, + 0x807c907c, 0xbf0a7b7c, + 0xbf85ffe7, 0xbef40172, + 0xbef00080, 0xbefe00c1, + 0xbeff00c1, 0xbee80080, + 0xbee90080, 0xbef600ff, + 0x01000000, 0x867aff78, + 0x00400000, 0xbf850003, + 0xb8faf803, 0x897a7aff, + 0x10000000, 0xbf85004d, + 0xbe840080, 0xd2890000, + 0x00000900, 0x80048104, + 0xd2890001, 0x00000900, + 0x80048104, 0xd2890002, + 0x00000900, 0x80048104, + 0xd2890003, 0x00000900, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000901, + 0x80048104, 0xd2890001, + 0x00000901, 0x80048104, + 0xd2890002, 0x00000901, + 0x80048104, 0xd2890003, + 0x00000901, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000902, 0x80048104, + 0xd2890001, 0x00000902, + 0x80048104, 0xd2890002, + 0x00000902, 0x80048104, + 0xd2890003, 0x00000902, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000903, + 0x80048104, 0xd2890001, + 0x00000903, 0x80048104, + 0xd2890002, 0x00000903, + 0x80048104, 0xd2890003, + 0x00000903, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbf820008, 0xe0724000, + 0x701d0000, 0xe0724100, + 0x701d0100, 0xe0724200, + 0x701d0200, 0xe0724300, + 0x701d0300, 0xbefe00c1, + 0xbeff00c1, 0xb8fb4306, + 0x867bc17b, 0xbf840064, + 0xbf8a0000, 0x867aff6f, + 0x04000000, 0xbf840060, + 0x8e7b867b, 0x8e7b827b, + 0xbef6007b, 0xb8f02a05, + 0x80708170, 0x8e708a70, + 0x8e708170, 0xb8fa1605, + 0x807a817a, 0x8e7a867a, + 0x80707a70, 0x8070ff70, + 0x00000080, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xd28c0002, 0x000100c1, + 0xd28d0003, 0x000204c1, + 0x867aff78, 0x00400000, + 0xbf850003, 0xb8faf803, + 0x897a7aff, 0x10000000, + 0xbf850030, 0x24040682, + 0xd86e4000, 0x00000002, + 0xbf8cc07f, 0xbe840080, + 0xd2890000, 0x00000900, + 0x80048104, 0xd2890001, + 0x00000900, 0x80048104, + 0xd2890002, 0x00000900, + 0x80048104, 0xd2890003, + 0x00000900, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000901, 0x80048104, + 0xd2890001, 0x00000901, + 0x80048104, 0xd2890002, + 0x00000901, 0x80048104, + 0xd2890003, 0x00000901, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0x680404ff, + 0x00000200, 0xd0c9006a, + 0x0000f702, 0xbf87ffd2, + 0xbf820015, 0xd1060002, + 0x00011103, 0x7e0602ff, + 0x00000200, 0xbefc00ff, + 0x00010000, 0xbe800077, + 0x8677ff77, 0xff7fffff, + 0x8777ff77, 0x00058000, + 0xd8ec0000, 0x00000002, + 0xbf8cc07f, 0xe0765000, + 0x701d0002, 0x68040702, + 0xd0c9006a, 0x0000f702, + 0xbf87fff7, 0xbef70000, + 0xbef000ff, 0x00000400, 0xbefe00c1, 0xbeff00c1, - 0xb8f12a05, 0x80718171, - 0x8e718271, 0x8e768871, + 0xb8fb2a05, 0x807b817b, + 0x8e7b827b, 0x8e76887b, 0xbef600ff, 0x01000000, + 0xbefc0084, 0xbf0a7b7c, + 0xbf84006d, 0xbf11017c, + 0x807bff7b, 0x00001000, + 0x867aff78, 0x00400000, + 0xbf850003, 0xb8faf803, + 0x897a7aff, 0x10000000, + 0xbf850051, 0xbe840080, + 0xd2890000, 0x00000900, + 0x80048104, 0xd2890001, + 0x00000900, 0x80048104, + 0xd2890002, 0x00000900, + 0x80048104, 0xd2890003, + 0x00000900, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000901, 0x80048104, + 0xd2890001, 0x00000901, + 0x80048104, 0xd2890002, + 0x00000901, 0x80048104, + 0xd2890003, 0x00000901, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000902, + 0x80048104, 0xd2890001, + 0x00000902, 0x80048104, + 0xd2890002, 0x00000902, + 0x80048104, 0xd2890003, + 0x00000902, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000903, 0x80048104, + 0xd2890001, 0x00000903, + 0x80048104, 0xd2890002, + 0x00000903, 0x80048104, + 0xd2890003, 0x00000903, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0x807c847c, + 0xbf0a7b7c, 0xbf85ffb1, + 0xbf9c0000, 0xbf820012, + 0x7e000300, 0x7e020301, + 0x7e040302, 0x7e060303, + 0xe0724000, 0x701d0000, + 0xe0724100, 0x701d0100, + 0xe0724200, 0x701d0200, + 0xe0724300, 0x701d0300, + 0x807c847c, 0x8070ff70, + 0x00000400, 0xbf0a7b7c, + 0xbf85ffef, 0xbf9c0000, 0xbefc0080, 0xbf11017c, - 0x8071ff71, 0x00001000, - 0x7e000300, 0xe0724000, - 0x7a1d0000, 0x807c817c, - 0x807aff7a, 0x00000100, - 0xbf0a717c, 0xbf85fff8, - 0xbf9c0000, 0xbefe00c1, - 0xbeff00c1, 0xb8f12a05, - 0x80718171, 0x8e718271, - 0x8e768871, 0xbef600ff, - 0x01000000, 0xbefc0080, - 0xbf11017c, 0x8071ff71, - 0x00001000, 0xd3d84000, - 0x18000100, 0x7e000000, - 0x7e000000, 0xe0724000, - 0x7a1d0000, 0x807c817c, - 0x807aff7a, 0x00000100, - 0xbf0a717c, 0xbf85fff5, - 0xbf9c0000, 0xb8f11605, - 0x80718171, 0x8e718471, - 0x8e768871, 0xbef600ff, - 0x01000000, 0xbefc0080, - 0xbf800000, 0xbe802a00, - 0x7e000200, 0xe0724000, - 0x7a1d0000, 0x807aff7a, - 0x00000100, 0x807c817c, - 0xbf0a717c, 0xbf85fff7, - 0xbef60084, 0xbef600ff, - 0x01000000, 0x7e00027b, - 0xe0724000, 0x7a1d0000, - 0x807aff7a, 0x00000100, - 0x7e00026c, 0xe0724000, - 0x7a1d0000, 0x807aff7a, - 0x00000100, 0x7e00026d, - 0xe0724000, 0x7a1d0000, - 0x807aff7a, 0x00000100, - 0x7e00026e, 0xe0724000, - 0x7a1d0000, 0x807aff7a, - 0x00000100, 0x7e00026f, - 0xe0724000, 0x7a1d0000, - 0x807aff7a, 0x00000100, - 0x7e000270, 0xe0724000, - 0x7a1d0000, 0x807aff7a, - 0x00000100, 0xb8f1f803, - 0x7e000271, 0xe0724000, - 0x7a1d0000, 0x807aff7a, - 0x00000100, 0x7e000272, - 0xe0724000, 0x7a1d0000, - 0x807aff7a, 0x00000100, - 0x7e000273, 0xe0724000, - 0x7a1d0000, 0x807aff7a, - 0x00000100, 0xb8fbf801, - 0x7e00027b, 0xe0724000, - 0x7a1d0000, 0x807aff7a, - 0x00000100, 0xbf8200bb, - 0xbef4007e, 0x8675ff7f, - 0x0000ffff, 0x8775ff75, - 0x00040000, 0xbef60080, - 0xbef700ff, 0x00807fac, - 0x8672ff7f, 0x08000000, - 0x8f728372, 0x87777277, - 0x8672ff7f, 0x70000000, - 0x8f728172, 0x87777277, - 0xbef80080, 0x8672ff7f, - 0x04000000, 0xbf840011, + 0x867aff78, 0x00400000, + 0xbf850003, 0xb8faf803, + 0x897a7aff, 0x10000000, + 0xbf850059, 0xd3d84000, + 0x18000100, 0xd3d84001, + 0x18000101, 0xd3d84002, + 0x18000102, 0xd3d84003, + 0x18000103, 0xbe840080, + 0xd2890000, 0x00000900, + 0x80048104, 0xd2890001, + 0x00000900, 0x80048104, + 0xd2890002, 0x00000900, + 0x80048104, 0xd2890003, + 0x00000900, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000901, 0x80048104, + 0xd2890001, 0x00000901, + 0x80048104, 0xd2890002, + 0x00000901, 0x80048104, + 0xd2890003, 0x00000901, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000902, + 0x80048104, 0xd2890001, + 0x00000902, 0x80048104, + 0xd2890002, 0x00000902, + 0x80048104, 0xd2890003, + 0x00000902, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000903, 0x80048104, + 0xd2890001, 0x00000903, + 0x80048104, 0xd2890002, + 0x00000903, 0x80048104, + 0xd2890003, 0x00000903, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0x807c847c, + 0xbf0a7b7c, 0xbf85ffa9, + 0xbf9c0000, 0xbf820016, + 0xd3d84000, 0x18000100, + 0xd3d84001, 0x18000101, + 0xd3d84002, 0x18000102, + 0xd3d84003, 0x18000103, + 0xe0724000, 0x701d0000, + 0xe0724100, 0x701d0100, + 0xe0724200, 0x701d0200, + 0xe0724300, 0x701d0300, + 0x807c847c, 0x8070ff70, + 0x00000400, 0xbf0a7b7c, + 0xbf85ffeb, 0xbf9c0000, + 0xbf820106, 0xbef4007e, + 0x8675ff7f, 0x0000ffff, + 0x8775ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x00807fac, 0x866eff7f, + 0x08000000, 0x8f6e836e, + 0x87776e77, 0x866eff7f, + 0x70000000, 0x8f6e816e, + 0x87776e77, 0x866eff7f, + 0x04000000, 0xbf84001f, 0xbefe00c1, 0xbeff00c1, 0xb8ef4306, 0x866fc16f, - 0xbf84000c, 0x8e6f866f, + 0xbf84001a, 0x8e6f866f, 0x8e6f826f, 0xbef6006f, + 0xb8f82a05, 0x80788178, + 0x8e788a78, 0x8e788178, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0x8078ff78, 0x00000080, 0xbef600ff, 0x01000000, - 0xbefc0080, 0x807cff7c, - 0x00000100, 0x8078ff78, - 0x00000100, 0xbf0a6f7c, - 0xbf85fffa, 0xbefe00c1, - 0xbeff00c1, 0xb8ef2a05, - 0x806f816f, 0x8e6f826f, - 0x8e76886f, 0xbef600ff, - 0x01000000, 0xbef20078, - 0x8078ff78, 0x00000100, - 0xbefc0081, 0xbf11087c, - 0x806fff6f, 0x00008000, - 0xe0524000, 0x781d0000, - 0xbf8c0f70, 0x7e000300, - 0x807c817c, 0x8078ff78, - 0x00000100, 0xbf0a6f7c, - 0xbf85fff7, 0xbf9c0000, + 0xbefc0080, 0xe0510000, + 0x781d0000, 0xe0510100, + 0x781d0000, 0x807cff7c, + 0x00000200, 0x8078ff78, + 0x00000200, 0xbf0a6f7c, + 0xbf85fff6, 0xbef80080, 0xbefe00c1, 0xbeff00c1, 0xb8ef2a05, 0x806f816f, 0x8e6f826f, 0x8e76886f, - 0xbef600ff, 0x01000000, - 0xbefc0080, 0xbf11087c, - 0x806fff6f, 0x00008000, + 0xbef90076, 0xbef600ff, + 0x01000000, 0xbeee0078, + 0x8078ff78, 0x00000400, + 0xbef30079, 0x8079ff79, + 0x00000400, 0xbefc0084, + 0xbf11087c, 0x806fff6f, + 0x00008000, 0xe0524000, + 0x791d0000, 0xe0524100, + 0x791d0100, 0xe0524200, + 0x791d0200, 0xe0524300, + 0x791d0300, 0x8079ff79, + 0x00000400, 0xbf8c0f70, + 0xd3d94000, 0x18000100, + 0xd3d94001, 0x18000101, + 0xd3d94002, 0x18000102, + 0xd3d94003, 0x18000103, 0xe0524000, 0x781d0000, - 0xbf8c0f70, 0xd3d94000, - 0x18000100, 0x807c817c, - 0x8078ff78, 0x00000100, - 0xbf0a6f7c, 0xbf85fff6, + 0xe0524100, 0x781d0100, + 0xe0524200, 0x781d0200, + 0xe0524300, 0x781d0300, + 0xbf8c0f70, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0x807c847c, + 0x8078ff78, 0x00000400, + 0xbf0a6f7c, 0xbf85ffdb, 0xbf9c0000, 0xe0524000, - 0x721d0000, 0xb8ef1605, + 0x731d0000, 0xe0524100, + 0x731d0100, 0xe0524200, + 0x731d0200, 0xe0524300, + 0x731d0300, 0xbf8c0f70, + 0xd3d94000, 0x18000100, + 0xd3d94001, 0x18000101, + 0xd3d94002, 0x18000102, + 0xd3d94003, 0x18000103, + 0xe0524000, 0x6e1d0000, + 0xe0524100, 0x6e1d0100, + 0xe0524200, 0x6e1d0200, + 0xe0524300, 0x6e1d0300, + 0xb8f82a05, 0x80788178, + 0x8e788a78, 0x8e788178, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0x80f8c078, 0xb8ef1605, 0x806f816f, 0x8e6f846f, - 0x8e76886f, 0xbef600ff, - 0x01000000, 0xc0211cba, - 0x00000078, 0x8078ff78, - 0x00000100, 0xbefc0081, - 0xc021003a, 0x00000078, - 0x8078ff78, 0x00000100, - 0xbf8cc07f, 0xbe802c00, - 0xbf800000, 0x807c817c, - 0xbf0a6f7c, 0xbf85fff6, - 0xbe800072, 0xbef60084, - 0xbef600ff, 0x01000000, - 0xc0211bfa, 0x00000078, - 0x8078ff78, 0x00000100, + 0x8e76826f, 0xbef600ff, + 0x01000000, 0xbefc006f, + 0xc031003a, 0x00000078, + 0x80f8c078, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe802d00, 0xbe822d02, + 0xbe842d04, 0xbe862d06, + 0xbe882d08, 0xbe8a2d0a, + 0xbe8c2d0c, 0xbe8e2d0e, + 0xbf06807c, 0xbf84fff0, + 0xb8f82a05, 0x80788178, + 0x8e788a78, 0x8e788178, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xc0211bfa, + 0x00000078, 0x80788478, 0xc0211b3a, 0x00000078, - 0x8078ff78, 0x00000100, - 0xc0211b7a, 0x00000078, - 0x8078ff78, 0x00000100, - 0xc0211eba, 0x00000078, - 0x8078ff78, 0x00000100, - 0xc0211efa, 0x00000078, - 0x8078ff78, 0x00000100, + 0x80788478, 0xc0211b7a, + 0x00000078, 0x80788478, 0xc0211c3a, 0x00000078, - 0x8078ff78, 0x00000100, - 0xc0211c7a, 0x00000078, - 0x8078ff78, 0x00000100, + 0x80788478, 0xc0211c7a, + 0x00000078, 0x80788478, + 0xc0211eba, 0x00000078, + 0x80788478, 0xc0211efa, + 0x00000078, 0x80788478, 0xc0211a3a, 0x00000078, - 0x8078ff78, 0x00000100, - 0xc0211a7a, 0x00000078, - 0x8078ff78, 0x00000100, + 0x80788478, 0xc0211a7a, + 0x00000078, 0x80788478, 0xc0211cfa, 0x00000078, - 0x8078ff78, 0x00000100, - 0xbf8cc07f, 0xbef2006d, - 0x866dff72, 0x0000ffff, - 0xbefc006f, 0xbefe007a, - 0xbeff007b, 0x866f71ff, + 0x80788478, 0xbf8cc07f, + 0xbefc006f, 0xbefe0070, + 0xbeff0071, 0x866f7bff, 0x000003ff, 0xb96f4803, - 0x866f71ff, 0xfffff800, + 0x866f7bff, 0xfffff800, 0x8f6f8b6f, 0xb96fa2c3, - 0xb973f801, 0x866fff72, - 0xf8000000, 0x8f6f9b6f, - 0x8e6f906f, 0xbef30080, - 0x87736f73, 0x866fff72, - 0x04000000, 0x8f6f9a6f, - 0x8e6f8f6f, 0x87736f73, - 0x866fff70, 0x00800000, - 0x8f6f976f, 0xb973f807, - 0x86fe7e7e, 0x86ea6a6a, - 0xb970f802, 0xbf8a0000, + 0xb973f801, 0xb8ee2a05, + 0x806e816e, 0x8e6e8a6e, + 0x8e6e816e, 0xb8ef1605, + 0x806f816f, 0x8e6f866f, + 0x806e6f6e, 0x806e746e, + 0x826f8075, 0x866fff6f, + 0x0000ffff, 0xc00b1c37, + 0x00000050, 0xc00b1d37, + 0x00000060, 0xc0031e77, + 0x00000074, 0xbf8cc07f, + 0x866fff6d, 0xf8000000, + 0x8f6f9b6f, 0x8e6f906f, + 0xbeee0080, 0x876e6f6e, + 0x866fff6d, 0x04000000, + 0x8f6f9a6f, 0x8e6f8f6f, + 0x876e6f6e, 0x866fff7a, + 0x00800000, 0x8f6f976f, + 0xb96ef807, 0x866dff6d, + 0x0000ffff, 0x86fe7e7e, + 0x86ea6a6a, 0x8f6e837a, + 0xb96ee0c2, 0xbf800002, + 0xb97a0002, 0xbf8a0000, 0x95806f6c, 0xbf810000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm deleted file mode 100644 index b83e2a643ddb..000000000000 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_arcturus.asm +++ /dev/null @@ -1,746 +0,0 @@ -shader main - -asic(DEFAULT) - -type(CS) - -/*************************************************************************/ -/* control on how to run the shader */ -/*************************************************************************/ -//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run) -var EMU_RUN_HACK = 0 -var EMU_RUN_HACK_RESTORE_NORMAL = 0 -var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0 -var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0 -var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK -var SAVE_LDS = 0 -var WG_BASE_ADDR_LO = 0x9000a000 -var WG_BASE_ADDR_HI = 0x0 -var WAVE_SPACE = 0x6000 //memory size that each wave occupies in workgroup state mem -var CTX_SAVE_CONTROL = 0x0 -var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL -var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run) -var SGPR_SAVE_USE_SQC = 0 //use SQC D$ to do the write -var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //need to change BUF_DATA_FORMAT in S_SAVE_BUF_RSRC_WORD3_MISC from 0 to BUF_DATA_FORMAT_32 if set to 1 (i.e. 0x00827FAC) -var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing - -/**************************************************************************/ -/* variables */ -/**************************************************************************/ -var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 -var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 -var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 - -var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 -var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 -var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8 -var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6 -var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24 -var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits - -var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400 -var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask -var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10 -var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100 -var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8 -var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF -var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0 -var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10 -var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800 -var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11 -var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21 - -var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME -var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME -var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME - -var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24 -var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27 - - -/* Save */ -var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes -var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE - -var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit -var S_SAVE_SPI_INIT_ATC_SHIFT = 27 -var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype -var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28 -var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG -var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26 - -var S_SAVE_PC_HI_RCNT_SHIFT = 27 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used -var S_SAVE_PC_HI_RCNT_MASK = 0xF8000000 //FIXME -var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 26 //FIXME -var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x04000000 //FIXME - -var s_save_spi_init_lo = exec_lo -var s_save_spi_init_hi = exec_hi - -var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} -var s_save_pc_hi = ttmp1 -var s_save_exec_lo = ttmp2 -var s_save_exec_hi = ttmp3 -var s_save_status = ttmp4 -var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine -var s_save_xnack_mask_lo = ttmp6 -var s_save_xnack_mask_hi = ttmp7 -var s_save_buf_rsrc0 = ttmp8 -var s_save_buf_rsrc1 = ttmp9 -var s_save_buf_rsrc2 = ttmp10 -var s_save_buf_rsrc3 = ttmp11 - -var s_save_mem_offset = ttmp14 -var s_save_alloc_size = s_save_trapsts //conflict -var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time) -var s_save_m0 = ttmp15 - -/* Restore */ -var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE -var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC - -var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit -var S_RESTORE_SPI_INIT_ATC_SHIFT = 27 -var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype -var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28 -var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG -var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26 - -var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT -var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK -var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT -var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK - -var s_restore_spi_init_lo = exec_lo -var s_restore_spi_init_hi = exec_hi - -var s_restore_mem_offset = ttmp12 -var s_restore_alloc_size = ttmp3 -var s_restore_tmp = ttmp6 -var s_restore_mem_offset_save = s_restore_tmp //no conflict - -var s_restore_m0 = s_restore_alloc_size //no conflict - -var s_restore_mode = ttmp7 - -var s_restore_pc_lo = ttmp0 -var s_restore_pc_hi = ttmp1 -var s_restore_exec_lo = ttmp14 -var s_restore_exec_hi = ttmp15 -var s_restore_status = ttmp4 -var s_restore_trapsts = ttmp5 -var s_restore_xnack_mask_lo = xnack_mask_lo -var s_restore_xnack_mask_hi = xnack_mask_hi -var s_restore_buf_rsrc0 = ttmp8 -var s_restore_buf_rsrc1 = ttmp9 -var s_restore_buf_rsrc2 = ttmp10 -var s_restore_buf_rsrc3 = ttmp11 - -/**************************************************************************/ -/* trap handler entry points */ -/**************************************************************************/ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore - //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC - s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC - s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f. - s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE - //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE - s_branch L_SKIP_RESTORE //NOT restore, SAVE actually - else - s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save - end - -L_JUMP_TO_RESTORE: - s_branch L_RESTORE //restore - -L_SKIP_RESTORE: - - s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC - s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save - s_cbranch_scc1 L_SAVE //this is the operation for save - - // ********* Handle non-CWSR traps ******************* - if (!EMU_RUN_HACK) - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception - s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly. - s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0 - - L_EXCP_CASE: - s_and_b32 ttmp1, ttmp1, 0xFFFF - s_rfe_b64 [ttmp0, ttmp1] - end - // ********* End handling of non-CWSR traps ******************* - -/**************************************************************************/ -/* save routine */ -/**************************************************************************/ - -L_SAVE: - - //check whether there is mem_viol - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK - s_cbranch_scc0 L_NO_PC_REWIND - - //if so, need rewind PC assuming GDS operation gets NACKed - s_mov_b32 s_save_tmp, 0 //clear mem_viol bit - s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] - s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8 - s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc - -L_NO_PC_REWIND: - s_mov_b32 s_save_tmp, 0 //clear saveCtx bit - s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit - - s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK - s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi - s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT - s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT - s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp - s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY - s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT - s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp - s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS - s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG - - s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp - - /* inform SPI the readiness and wait for SPI's go signal */ - s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI - s_mov_b32 s_save_exec_hi, exec_hi - s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive - if (EMU_RUN_HACK) - - else - s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC - end - - L_SLEEP: - s_sleep 0x2 - - if (EMU_RUN_HACK) - - else - s_cbranch_execz L_SLEEP - end - - - /* setup Resource Contants */ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_save_tmp, v9, 0 - s_lshr_b32 s_save_tmp, s_save_tmp, 6 - s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end - - - s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo - s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE - s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited - s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC - s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK - s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position - s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC TODO: ATC deprecated, no need anymore. - s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK - s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position - s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE - - s_mov_b32 s_save_m0, m0 //save M0 - - /* global mem offset */ - s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0 - - - /* the first wave in the threadgroup */ - s_barrier //FIXME not performance-optimal "LDS is used? wait for other waves in the same TG" - s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here - s_cbranch_scc0 L_SAVE_VGPR - - /* save LDS */ - ////////////////////////////// - L_SAVE_LDS: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on - s_mov_b32 exec_hi, 0xFFFFFFFF - - s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size - s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero? - s_cbranch_scc0 L_SAVE_VGPR //no lds used? jump to L_SAVE_VGPR - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes - s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - s_mov_b32 m0, 0x0 //lds_offset initial value = 0 - s_nop 0x0 //Manually inserted wait states - - L_SAVE_LDS_LOOP: - if (SAVE_LDS) - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 - end - s_add_u32 m0, m0, 256 //every buffer_store_lds does 256 bytes - s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //mem offset increased by 256 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete? - - - /* save VGPRs */ - ////////////////////////////// - L_SAVE_VGPR: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on - s_mov_b32 exec_hi, 0xFFFFFFFF - - s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size - s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_mov_b32 m0, 0x0 //VGPR initial index value =0 - s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 - s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later - - L_SAVE_VGPR_LOOP: - v_mov_b32 v0, v0 //v0 = v[0+m0] - - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - end - - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //every buffer_store_dword does 256 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? - s_set_gpr_idx_off - - - /* save ACC_VGPRs */ - ////////////////////////////// - L_SAVE_ACC_VGPR: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on - s_mov_b32 exec_hi, 0xFFFFFFFF - - s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size - s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_mov_b32 m0, 0x0 //VGPR initial index value =0 - s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 - s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later - - L_SAVE_ACC_VGPR_LOOP: - v_accvgpr_read v0, v0 - v_nop - v_nop - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - end - - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //every buffer_store_dword does 256 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_ACC_VGPR_LOOP //VGPR save is complete? - s_set_gpr_idx_off - - - /* save SGPRs */ - ////////////////////////////// - s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size - s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - - if (SGPR_SAVE_USE_SQC) - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) - end - - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_mov_b32 m0, 0x0 //SGPR initial index value =0 - s_nop 0x0 //Manually inserted wait states - - L_SAVE_SGPR_LOOP: - s_movrels_b32 s0, s0 //s0 = s[0+m0] - write_sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4 - s_add_u32 m0, m0, 1 //next sgpr index - s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_SGPR_LOOP //SGPR save is complete? - - /* save HW registers */ - ////////////////////////////// - L_SAVE_HWREG: - s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - - write_sgpr_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0 - - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME)) - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - end - - write_sgpr_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC - write_sgpr_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - write_sgpr_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC - write_sgpr_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - write_sgpr_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS - - //s_save_trapsts conflicts with s_save_alloc_size - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - write_sgpr_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS - - write_sgpr_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO - write_sgpr_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI - - //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2 - s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE - write_sgpr_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - - /* S_PGM_END_SAVED */ //FIXME graphics ONLY - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT)) - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - s_rfe_b64 s_save_pc_lo //Return to the main shader program - else - end - - - s_branch L_END_PGM - - - -/**************************************************************************/ -/* restore routine */ -/**************************************************************************/ - -L_RESTORE: - /* Setup Resource Contants */ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_restore_tmp, v9, 0 - s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 - s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE - s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL - else - end - - s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo - s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi - s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE - s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) - s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC - s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK - s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position - s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC TODO: ATC deprecated, no need anymore. - s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK - s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position - s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE - - /* global mem offset */ - s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0 - - /* the first wave in the threadgroup */ - s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK - s_cbranch_scc0 L_RESTORE_VGPR - - /* restore LDS */ - ////////////////////////////// - L_RESTORE_LDS: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead - s_mov_b32 exec_hi, 0xFFFFFFFF - - s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size - s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero? - s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes - s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - s_mov_b32 m0, 0x0 //lds_offset initial value = 0 - - L_RESTORE_LDS_LOOP: - if (SAVE_LDS) - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 - end - s_add_u32 m0, m0, 256 //every buffer_load_dword does 256 bytes - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete? - - - /* restore VGPRs */ - ////////////////////////////// - L_RESTORE_VGPR: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead - s_mov_b32 exec_hi, 0xFFFFFFFF - - s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size - s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 - s_mov_b32 m0, 1 //VGPR initial index value = 1 - s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8 - s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later - - L_RESTORE_VGPR_LOOP: - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 - end - s_waitcnt vmcnt(0) //ensure data ready - v_mov_b32 v0, v0 //v[0+m0] = v0 - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //every buffer_load_dword does 256 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete? - s_set_gpr_idx_off - - - /* restore ACC_VGPRs */ - ////////////////////////////// - L_RESTORE_ACC_VGPR: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead - s_mov_b32 exec_hi, 0xFFFFFFFF - - s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size - s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - s_mov_b32 m0, 0 //VGPR initial index value = 0 - s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8 - s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later - - L_RESTORE_ACC_VGPR_LOOP: - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 - end - s_waitcnt vmcnt(0) //ensure data ready - v_accvgpr_write v0, v0 //v[0+m0] = v0 - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //every buffer_load_dword does 256 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_ACC_VGPR_LOOP //VGPR restore (except v0) is complete? - s_set_gpr_idx_off - /* VGPR restore on v0 */ - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 - end - - /* restore SGPRs */ - ////////////////////////////// - s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size - s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - - if (SGPR_SAVE_USE_SQC) - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) - end - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - read_sgpr_from_mem(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp - s_mov_b32 m0, 0x1 //SGPR initial index value =1 //go on with with s1 - - L_RESTORE_SGPR_LOOP: - read_sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made - s_waitcnt lgkmcnt(0) //ensure data ready - s_movreld_b32 s0, s0 //s[0+m0] = s0 - s_nop 0 // hazard SALU M0=> S_MOVREL - s_add_u32 m0, m0, 1 //next sgpr index - s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_SGPR_LOOP //SGPR restore (except s0) is complete? - s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */ - - /* restore HW registers */ - ////////////////////////////// - L_RESTORE_HWREG: - s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - read_sgpr_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0 - read_sgpr_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC - read_sgpr_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - read_sgpr_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC - read_sgpr_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - read_sgpr_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS - read_sgpr_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS - read_sgpr_from_mem(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO - read_sgpr_from_mem(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI - read_sgpr_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE - - s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS - - s_mov_b32 s_restore_tmp, s_restore_pc_hi - s_and_b32 s_restore_pc_hi, s_restore_tmp, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS - - //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - - s_mov_b32 m0, s_restore_m0 - s_mov_b32 exec_lo, s_restore_exec_lo - s_mov_b32 exec_hi, s_restore_exec_hi - - s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts - s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0 - s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts - s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT - s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0 - //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore - s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode - //reuse s_restore_m0 as a temp register - s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_RCNT_MASK - s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT - s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT - s_mov_b32 s_restore_mode, 0x0 //IB_STS is zero - s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0 - s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_FIRST_REPLAY_MASK - s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT - s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT - s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0 - s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK - s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT - s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_mode - - s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 - s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 - s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status - - s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time - - -// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution - s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc - - -/**************************************************************************/ -/* the END */ -/**************************************************************************/ -L_END_PGM: - s_endpgm - -end - - -/**************************************************************************/ -/* the helper functions */ -/**************************************************************************/ - -function write_sgpr_to_mem(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf) - if (use_sqc) - s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on - s_mov_b32 m0, s_mem_offset - s_buffer_store_dword s, s_rsrc, m0 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 4 - s_mov_b32 m0, exec_lo - elsif (use_mtbuf) - v_mov_b32 v0, s - tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 256 - else - v_mov_b32 v0, s - buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 256 - end -end - - - -function read_sgpr_from_mem(s, s_rsrc, s_mem_offset, use_sqc) - s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1 - if (use_sqc) - s_add_u32 s_mem_offset, s_mem_offset, 4 - else - s_add_u32 s_mem_offset, s_mem_offset, 256 - end -end - - - - - diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index 6bae2e022c6e..871f2d431a44 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -197,13 +197,15 @@ var s_restore_spi_init_lo = exec_lo var s_restore_spi_init_hi = exec_hi var s_restore_mem_offset = ttmp12 +var s_restore_accvgpr_offset = ttmp13 var s_restore_alloc_size = ttmp3 var s_restore_tmp = ttmp2 var s_restore_mem_offset_save = s_restore_tmp //no conflict +var s_restore_accvgpr_offset_save = ttmp7 var s_restore_m0 = s_restore_alloc_size //no conflict -var s_restore_mode = ttmp7 +var s_restore_mode = s_restore_accvgpr_offset_save var s_restore_pc_lo = ttmp0 var s_restore_pc_hi = ttmp1 @@ -226,7 +228,7 @@ var s_restore_ttmps_hi = s_restore_alloc_size //no conflict /* Shader Main*/ shader main - asic(GFX9) + asic(DEFAULT) type(CS) @@ -791,10 +793,48 @@ end L_SAVE_VGPR_END: +if ASIC_TARGET_ARCTURUS + // Save ACC VGPRs + s_mov_b32 m0, 0x0 //VGPR initial index value =0 + s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 +if SAVE_AFTER_XNACK_ERROR + check_if_tcp_store_ok() + s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP +L_SAVE_ACCVGPR_LOOP_SQC: + for var vgpr = 0; vgpr < 4; ++ vgpr + v_accvgpr_read v[vgpr], acc[vgpr] // v[N] = acc[N+m0] + end + + write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset) + + s_add_u32 m0, m0, 4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP_SQC + s_set_gpr_idx_off + s_branch L_SAVE_ACCVGPR_END +end +L_SAVE_ACCVGPR_LOOP: + for var vgpr = 0; vgpr < 4; ++ vgpr + v_accvgpr_read v[vgpr], acc[vgpr] // v[N] = acc[N+m0] + end + + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 + + s_add_u32 m0, m0, 4 + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP + s_set_gpr_idx_off + +L_SAVE_ACCVGPR_END: +end /* S_PGM_END_SAVED */ //FIXME graphics ONLY if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT)) @@ -921,6 +961,11 @@ end s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) + +if ASIC_TARGET_ARCTURUS + s_mov_b32 s_restore_accvgpr_offset, s_restore_buf_rsrc2 //ACC VGPRs at end of VGPRs +end + if (SWIZZLE_EN) s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? else @@ -958,6 +1003,10 @@ else // VGPR load using dw burst s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 +if ASIC_TARGET_ARCTURUS + s_mov_b32 s_restore_accvgpr_offset_save, s_restore_accvgpr_offset + s_add_u32 s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4 +end s_mov_b32 m0, 4 //VGPR initial index value = 1 s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8 s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later @@ -966,6 +1015,20 @@ else if(USE_MTBUF_INSTEAD_OF_MUBUF) tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 else + +if ASIC_TARGET_ARCTURUS + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*3 + s_add_u32 s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4 + s_waitcnt vmcnt(0) + + for var vgpr = 0; vgpr < 4; ++ vgpr + v_accvgpr_write acc[vgpr], v[vgpr] + end +end + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2 @@ -982,6 +1045,18 @@ else s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete? s_set_gpr_idx_off /* VGPR restore on v0 */ +if ASIC_TARGET_ARCTURUS + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*3 + s_waitcnt vmcnt(0) + + for var vgpr = 0; vgpr < 4; ++ vgpr + v_accvgpr_write acc[vgpr], v[vgpr] + end +end + if(USE_MTBUF_INSTEAD_OF_MUBUF) tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 else @@ -1202,6 +1277,10 @@ function get_vgpr_size_bytes(s_vgpr_size_byte) s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1 s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) //FIXME for GFX, zero is possible + +if ASIC_TARGET_ARCTURUS + s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, 1 // Double size for ACC VGPRs +end end function get_sgpr_size_bytes(s_sgpr_size_byte) -- cgit v1.2.3 From 69d4de94f8d35111b3cb54821d3d933b93d87343 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 2 Jul 2019 11:15:37 +0800 Subject: drm/amdgpu: enable all 8 sdma instances for Arcturus silicon The more 6 sdma instances work fine now with DF fix in vbios: * mmDF_PIE_AON_MiscClientsEnable(0x1c728)=0x3fe(DF_ALL_INSTANCE) [9:4]MmhubsEnable=3f (change from 0) Signed-off-by: Le Ma Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 8d33cb730b63..921a6dd9cbae 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1602,7 +1602,7 @@ static int sdma_v4_0_early_init(void *handle) if (adev->asic_type == CHIP_RAVEN) adev->sdma.num_instances = 1; else if (adev->asic_type == CHIP_ARCTURUS) - adev->sdma.num_instances = 2; + adev->sdma.num_instances = 8; else adev->sdma.num_instances = 2; -- cgit v1.2.3 From 47a7fe53167cf7b00f6ee9b33992b2faf1e3213e Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 2 Jul 2019 22:01:59 -0500 Subject: drm/amdkfd: Increase vcrat size for GPU GPU cache info (part of virtual CRAT) size depends on CU number. For arcturus, CU number has been increased. So the required memory for vcrat also increases. Signed-off-by: Oak Zeng Reviewed-by: Yong Zhao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index b30ccbfeb648..454552fe2563 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -788,7 +788,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) * is put in the code to ensure we don't overwrite. */ #define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE) -#define VCRAT_SIZE_FOR_GPU (3 * PAGE_SIZE) +#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE) /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node * -- cgit v1.2.3 From 1da418ba65a735bd90ceb8e72afb735f6103a354 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 1 Jul 2019 19:12:14 -0400 Subject: drm/amdgpu:add all VCN rings into schedule request queue Add all VCN instances' decode/encode/jpeg decode rings into drm_sched_rq list. Signed-off-by: James Zhu Reviewed-by: Leo Liu Reviewed-by: Boyuan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 82b871fdfb45..ec311de86fba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) { unsigned num_entities = amdgput_ctx_total_num_entities(); - unsigned i, j; + unsigned i, j, k; int r; if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) @@ -123,7 +123,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; - unsigned num_rings; + unsigned num_rings = 0; unsigned num_rqs = 0; switch (i) { @@ -154,16 +154,26 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, num_rings = 1; break; case AMDGPU_HW_IP_VCN_DEC: - rings[0] = &adev->vcn.inst[0].ring_dec; - num_rings = 1; + for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + if (adev->vcn.harvest_config & (1 << j)) + continue; + rings[num_rings++] = &adev->vcn.inst[j].ring_dec; + } break; case AMDGPU_HW_IP_VCN_ENC: - rings[0] = &adev->vcn.inst[0].ring_enc[0]; - num_rings = 1; + for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + if (adev->vcn.harvest_config & (1 << j)) + continue; + for (k = 0; k < adev->vcn.num_enc_rings; ++k) + rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k]; + } break; case AMDGPU_HW_IP_VCN_JPEG: - rings[0] = &adev->vcn.inst[0].ring_jpeg; - num_rings = 1; + for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { + if (adev->vcn.harvest_config & (1 << j)) + continue; + rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg; + } break; } -- cgit v1.2.3 From ad91b134a2e6e18f4c7ea32043f96e46a569ae43 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 11 Jul 2019 22:10:31 -0500 Subject: drm/amdgpu: drop unused function definitions These were dropped and the headers never got cleaned up. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index b79d2a629768..26d8879bff9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -55,20 +55,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset); -uint32_t kgd_gfx_v9_enable_debug_trap(struct kgd_dev *kgd, - uint32_t trap_debug_wave_launch_mode, - uint32_t vmid); -uint32_t kgd_gfx_v9_disable_debug_trap(struct kgd_dev *kgd); -uint32_t kgd_gfx_v9_set_debug_trap_data(struct kgd_dev *kgd, - int trap_data0, - int trap_data1); -uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct kgd_dev *kgd, - uint32_t trap_override, - uint32_t trap_mask); -uint32_t kgd_gfx_v9_set_wave_launch_mode(struct kgd_dev *kgd, - uint8_t wave_launch_mode, - uint32_t vmid); - bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, -- cgit v1.2.3 From a08a4dae7a11d6665c8d2a49a2c01e8a71a18955 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 15 Jul 2019 16:16:00 -0500 Subject: drm/amdgpu: flag arcturus as experimental for now Current support will only work in internal engineering boards. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 170c780b3bd7..e25e3d8c6fac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -997,9 +997,9 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, /* Arcturus */ - {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, - {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, - {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS}, + {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS|AMD_EXP_HW_SUPPORT}, /* Navi10 */ {0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, {0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10}, -- cgit v1.2.3 From 7e01a2ec96bf8a149c5e83d0352cf6ea286275cf Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 Jul 2019 10:23:17 +0800 Subject: drm/amd/powerplay: correct SW SMU valid mapping check Current implementation is not actually able to detect invalid message/table/workload mapping. Signed-off-by: Evan Quan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 + drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 14 +++--- drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h | 15 ++++-- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 68 ++++++++++++++------------ drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 67 +++++++++++++------------ 5 files changed, 94 insertions(+), 71 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f661bf96ed0..9733bbf9bc72 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -141,6 +141,7 @@ enum PP_SMC_POWER_PROFILE { PP_SMC_POWER_PROFILE_VR = 0x4, PP_SMC_POWER_PROFILE_COMPUTE = 0x5, PP_SMC_POWER_PROFILE_CUSTOM = 0x6, + PP_SMC_POWER_PROFILE_COUNT, }; enum { diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 534e450df4bb..7d680f33ce3c 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -37,9 +37,9 @@ #include "nbio/nbio_7_4_sh_mask.h" #define MSG_MAP(msg, index) \ - [SMU_MSG_##msg] = index + [SMU_MSG_##msg] = {1, (index)} -static int arcturus_message_map[SMU_MSG_MAX_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion), @@ -101,16 +101,18 @@ static int arcturus_message_map[SMU_MSG_MAX_COUNT] = { static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; if (index >= SMU_MSG_MAX_COUNT) return -EINVAL; - val = arcturus_message_map[index]; - if (val > PPSMC_Message_Count) + mapping = arcturus_message_map[index]; + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU message: %d\n", index); return -EINVAL; + } - return val; + return mapping.map_to; } static const struct pptable_funcs arcturus_ppt_funcs = { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 2fff4b16cb4e..fcb58012170f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -43,19 +43,24 @@ #define SMU11_TOOL_SIZE 0x19000 #define CLK_MAP(clk, index) \ - [SMU_##clk] = index + [SMU_##clk] = {1, (index)} #define FEA_MAP(fea) \ - [SMU_FEATURE_##fea##_BIT] = FEATURE_##fea##_BIT + [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT} #define TAB_MAP(tab) \ - [SMU_TABLE_##tab] = TABLE_##tab + [SMU_TABLE_##tab] = {1, TABLE_##tab} #define PWR_MAP(tab) \ - [SMU_POWER_SOURCE_##tab] = POWER_SOURCE_##tab + [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab} #define WORKLOAD_MAP(profile, workload) \ - [profile] = workload + [profile] = {1, (workload)} + +struct smu_11_0_cmn2aisc_mapping { + int valid_mapping; + int map_to; +}; struct smu_11_0_max_sustainable_clocks { uint32_t display_clock; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 2dae0ae0829e..f6b1c8b2b2d6 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -49,9 +49,9 @@ FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) #define MSG_MAP(msg, index) \ - [SMU_MSG_##msg] = index + [SMU_MSG_##msg] = {1, (index)} -static int navi10_message_map[SMU_MSG_MAX_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion), @@ -118,7 +118,7 @@ static int navi10_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(ArmD3, PPSMC_MSG_ArmD3), }; -static int navi10_clk_map[SMU_CLK_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = { CLK_MAP(GFXCLK, PPCLK_GFXCLK), CLK_MAP(SCLK, PPCLK_GFXCLK), CLK_MAP(SOCCLK, PPCLK_SOCCLK), @@ -133,7 +133,7 @@ static int navi10_clk_map[SMU_CLK_COUNT] = { CLK_MAP(PHYCLK, PPCLK_PHYCLK), }; -static int navi10_feature_mask_map[SMU_FEATURE_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = { FEA_MAP(DPM_PREFETCHER), FEA_MAP(DPM_GFXCLK), FEA_MAP(DPM_GFX_PACE), @@ -178,7 +178,7 @@ static int navi10_feature_mask_map[SMU_FEATURE_COUNT] = { FEA_MAP(ATHUB_PG), }; -static int navi10_table_map[SMU_TABLE_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = { TAB_MAP(PPTABLE), TAB_MAP(WATERMARKS), TAB_MAP(AVFS), @@ -193,12 +193,12 @@ static int navi10_table_map[SMU_TABLE_COUNT] = { TAB_MAP(PACE), }; -static int navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { PWR_MAP(AC), PWR_MAP(DC), }; -static int navi10_workload_map[] = { +static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), @@ -210,79 +210,87 @@ static int navi10_workload_map[] = { static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index > SMU_MSG_MAX_COUNT) return -EINVAL; - val = navi10_message_map[index]; - if (val > PPSMC_Message_Count) + mapping = navi10_message_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_CLK_COUNT) return -EINVAL; - val = navi10_clk_map[index]; - if (val >= PPCLK_COUNT) + mapping = navi10_clk_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_FEATURE_COUNT) return -EINVAL; - val = navi10_feature_mask_map[index]; - if (val > 64) + mapping = navi10_feature_mask_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_TABLE_COUNT) return -EINVAL; - val = navi10_table_map[index]; - if (val >= TABLE_COUNT) + mapping = navi10_table_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_POWER_SOURCE_COUNT) return -EINVAL; - val = navi10_pwr_src_map[index]; - if (val >= POWER_SOURCE_COUNT) + mapping = navi10_pwr_src_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (profile > PP_SMC_POWER_PROFILE_CUSTOM) return -EINVAL; - val = navi10_workload_map[profile]; + mapping = navi10_workload_map[profile]; + if (!(mapping.valid_mapping)) + return -EINVAL; - return val; + return mapping.map_to; } static bool is_asic_secure(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index bb9bb09cfc7a..af54fee0aed2 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -47,7 +47,7 @@ #define CTF_OFFSET_HBM 5 #define MSG_MAP(msg) \ - [SMU_MSG_##msg] = PPSMC_MSG_##msg + [SMU_MSG_##msg] = {1, PPSMC_MSG_##msg} #define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \ FEATURE_DPM_GFXCLK_MASK | \ @@ -59,7 +59,7 @@ FEATURE_DPM_LINK_MASK | \ FEATURE_DPM_DCEFCLK_MASK) -static int vega20_message_map[SMU_MSG_MAX_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage), MSG_MAP(GetSmuVersion), MSG_MAP(GetDriverIfVersion), @@ -145,7 +145,7 @@ static int vega20_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(GetAVFSVoltageByDpm), }; -static int vega20_clk_map[SMU_CLK_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = { CLK_MAP(GFXCLK, PPCLK_GFXCLK), CLK_MAP(VCLK, PPCLK_VCLK), CLK_MAP(DCLK, PPCLK_DCLK), @@ -159,7 +159,7 @@ static int vega20_clk_map[SMU_CLK_COUNT] = { CLK_MAP(FCLK, PPCLK_FCLK), }; -static int vega20_feature_mask_map[SMU_FEATURE_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping vega20_feature_mask_map[SMU_FEATURE_COUNT] = { FEA_MAP(DPM_PREFETCHER), FEA_MAP(DPM_GFXCLK), FEA_MAP(DPM_UCLK), @@ -195,7 +195,7 @@ static int vega20_feature_mask_map[SMU_FEATURE_COUNT] = { FEA_MAP(XGMI), }; -static int vega20_table_map[SMU_TABLE_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping vega20_table_map[SMU_TABLE_COUNT] = { TAB_MAP(PPTABLE), TAB_MAP(WATERMARKS), TAB_MAP(AVFS), @@ -208,12 +208,12 @@ static int vega20_table_map[SMU_TABLE_COUNT] = { TAB_MAP(OVERDRIVE), }; -static int vega20_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { +static struct smu_11_0_cmn2aisc_mapping vega20_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { PWR_MAP(AC), PWR_MAP(DC), }; -static int vega20_workload_map[] = { +static struct smu_11_0_cmn2aisc_mapping vega20_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_DEFAULT_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), @@ -225,79 +225,86 @@ static int vega20_workload_map[] = { static int vega20_get_smu_table_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_TABLE_COUNT) return -EINVAL; - val = vega20_table_map[index]; - if (val >= TABLE_COUNT) + mapping = vega20_table_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int vega20_get_pwr_src_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_POWER_SOURCE_COUNT) return -EINVAL; - val = vega20_pwr_src_map[index]; - if (val >= POWER_SOURCE_COUNT) + mapping = vega20_pwr_src_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int vega20_get_smu_feature_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_FEATURE_COUNT) return -EINVAL; - val = vega20_feature_mask_map[index]; - if (val > 64) + mapping = vega20_feature_mask_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int vega20_get_smu_clk_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (index >= SMU_CLK_COUNT) return -EINVAL; - val = vega20_clk_map[index]; - if (val >= PPCLK_COUNT) + mapping = vega20_clk_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; if (index >= SMU_MSG_MAX_COUNT) return -EINVAL; - val = vega20_message_map[index]; - if (val > PPSMC_Message_Count) + mapping = vega20_message_map[index]; + if (!(mapping.valid_mapping)) return -EINVAL; - return val; + return mapping.map_to; } static int vega20_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile) { - int val; + struct smu_11_0_cmn2aisc_mapping mapping; + if (profile > PP_SMC_POWER_PROFILE_CUSTOM) return -EINVAL; - val = vega20_workload_map[profile]; + mapping = vega20_workload_map[profile]; + if (!(mapping.valid_mapping)) + return -EINVAL; - return val; + return mapping.map_to; } static int vega20_tables_init(struct smu_context *smu, struct smu_table *tables) -- cgit v1.2.3 From c06403045aad6ae2edd935f6309e0c72e118c6de Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 Jul 2019 14:36:44 +0800 Subject: drm/amd/powerplay: input check for unsupported message/clock index This can avoid them to be handled in a wrong way without notice. Since not all SMU messages/clocks are supported on every SMU11 ASIC. Signed-off-by: Evan Quan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 18 ++++++++++----- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 31 ++++++++++++++++++++------ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 35 +++++++++++++++++++++++++----- drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 31 ++++++++++++++++++++------ 4 files changed, 90 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index c8048b865161..ec1904e47927 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -331,7 +331,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int int ret = 0; int table_id = smu_table_get_index(smu, table_index); - if (!table_data || table_id >= smu_table->table_count) + if (!table_data || table_id >= smu_table->table_count || table_id < 0) return -EINVAL; table = &smu_table->tables[table_index]; @@ -462,10 +462,12 @@ int smu_feature_init_dpm(struct smu_context *smu) int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { struct smu_feature *feature = &smu->smu_feature; - uint32_t feature_id; + int feature_id; int ret = 0; feature_id = smu_feature_get_index(smu, mask); + if (feature_id < 0) + return 0; WARN_ON(feature_id > feature->feature_num); @@ -480,10 +482,12 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, bool enable) { struct smu_feature *feature = &smu->smu_feature; - uint32_t feature_id; + int feature_id; int ret = 0; feature_id = smu_feature_get_index(smu, mask); + if (feature_id < 0) + return -EINVAL; WARN_ON(feature_id > feature->feature_num); @@ -506,10 +510,12 @@ failed: int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) { struct smu_feature *feature = &smu->smu_feature; - uint32_t feature_id; + int feature_id; int ret = 0; feature_id = smu_feature_get_index(smu, mask); + if (feature_id < 0) + return 0; WARN_ON(feature_id > feature->feature_num); @@ -525,10 +531,12 @@ int smu_feature_set_supported(struct smu_context *smu, bool enable) { struct smu_feature *feature = &smu->smu_feature; - uint32_t feature_id; + int feature_id; int ret = 0; feature_id = smu_feature_get_index(smu, mask); + if (feature_id < 0) + return -EINVAL; WARN_ON(feature_id > feature->feature_num); diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index f6b1c8b2b2d6..7061ff14333f 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -216,8 +216,10 @@ static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = navi10_message_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU message: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -230,8 +232,10 @@ static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = navi10_clk_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU clock: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -244,8 +248,10 @@ static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = navi10_feature_mask_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU feature: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -258,8 +264,10 @@ static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = navi10_table_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU table: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -272,8 +280,10 @@ static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = navi10_pwr_src_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported power source: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -287,8 +297,10 @@ static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P return -EINVAL; mapping = navi10_workload_map[profile]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported workload: %d\n", (int)profile); return -EINVAL; + } return mapping.map_to; } @@ -969,7 +981,7 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) { DpmActivityMonitorCoeffInt_t activity_monitor; uint32_t i, size = 0; - uint16_t workload_type = 0; + int16_t workload_type = 0; static const char *profile_name[] = { "BOOTUP_DEFAULT", "3D_FULL_SCREEN", @@ -1002,6 +1014,9 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf) for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_workload_get_type(smu, i); + if (workload_type < 0) + return -EINVAL; + result = smu_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, (void *)(&activity_monitor), false); @@ -1130,6 +1145,8 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_workload_get_type(smu, smu->power_profile_mode); + if (workload_type < 0) + return -EINVAL; smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type); diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 3505b92a6540..9f3a84fbb9cf 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -937,11 +937,17 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, enum smu_clk_type clock_select) { int ret = 0; + int clk_id; if (!smu->pm_enabled) return ret; + + clk_id = smu_clk_get_index(smu, clock_select); + if (clk_id < 0) + return -EINVAL; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq, - smu_clk_get_index(smu, clock_select) << 16); + clk_id << 16); if (ret) { pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!"); return ret; @@ -956,7 +962,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, /* if DC limit is zero, return AC limit */ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, - smu_clk_get_index(smu, clock_select) << 16); + clk_id << 16); if (ret) { pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!"); return ret; @@ -1052,6 +1058,11 @@ static int smu_v11_0_get_power_limit(struct smu_context *smu, bool get_default) { int ret = 0; + int power_src; + + power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); + if (power_src < 0) + return -EINVAL; if (get_default) { mutex_lock(&smu->mutex); @@ -1063,7 +1074,7 @@ static int smu_v11_0_get_power_limit(struct smu_context *smu, mutex_unlock(&smu->mutex); } else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - smu_power_get_index(smu, SMU_POWER_SOURCE_AC) << 16); + power_src << 16); if (ret) { pr_err("[%s] get PPT limit failed!", __func__); return ret; @@ -1106,16 +1117,21 @@ static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, { int ret = 0; uint32_t freq = 0; + int asic_clk_id; if (clk_id >= SMU_CLK_COUNT || !value) return -EINVAL; + asic_clk_id = smu_clk_get_index(smu, clk_id); + if (asic_clk_id < 0) + return -EINVAL; + /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */ - if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) == 0) + if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0) ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq); else { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq, - (smu_clk_get_index(smu, clk_id) << 16)); + (asic_clk_id << 16)); if (ret) return ret; @@ -1295,6 +1311,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, int ret = 0; enum smu_clk_type clk_select = 0; uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; + int clk_id; if (!smu->pm_enabled) return -EINVAL; @@ -1326,9 +1343,15 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, if (ret) goto failed; + clk_id = smu_clk_get_index(smu, clk_select); + if (clk_id < 0) { + ret = -EINVAL; + goto failed; + } + mutex_lock(&smu->mutex); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, - (smu_clk_get_index(smu, clk_select) << 16) | clk_freq); + (clk_id << 16) | clk_freq); mutex_unlock(&smu->mutex); } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index af54fee0aed2..080956ea0570 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -231,8 +231,10 @@ static int vega20_get_smu_table_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = vega20_table_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU table: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -245,8 +247,10 @@ static int vega20_get_pwr_src_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = vega20_pwr_src_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported power source: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -259,8 +263,10 @@ static int vega20_get_smu_feature_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = vega20_feature_mask_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU feature: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -273,8 +279,10 @@ static int vega20_get_smu_clk_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = vega20_clk_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU clock: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -287,8 +295,10 @@ static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = vega20_message_map[index]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU message: %d\n", index); return -EINVAL; + } return mapping.map_to; } @@ -301,8 +311,10 @@ static int vega20_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P return -EINVAL; mapping = vega20_workload_map[profile]; - if (!(mapping.valid_mapping)) + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU workload: %d\n", (int)profile); return -EINVAL; + } return mapping.map_to; } @@ -1778,7 +1790,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf) { DpmActivityMonitorCoeffInt_t activity_monitor; uint32_t i, size = 0; - uint16_t workload_type = 0; + int16_t workload_type = 0; static const char *profile_name[] = { "BOOTUP_DEFAULT", "3D_FULL_SCREEN", @@ -1811,6 +1823,9 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf) for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_workload_get_type(smu, i); + if (workload_type < 0) + return -EINVAL; + result = smu_update_table(smu, SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, (void *)(&activity_monitor), false); @@ -1963,6 +1978,8 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ workload_type = smu_workload_get_type(smu, smu->power_profile_mode); + if (workload_type < 0) + return -EINVAL; smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 1 << workload_type); -- cgit v1.2.3 From c52e7ebce72bc9d269c6025da5a4d41601e5f6ca Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 11 Jul 2019 12:19:44 -0400 Subject: drm/amdgpu: exposing fica registers to df offsets exposing fica registers to poll df pie data for xgmi error counters for vega20. Signed-off-by: Jonathan Kim Reviewed-by: Alexander Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h index 6efcaa93e17b..c2bd25589e84 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h @@ -48,4 +48,8 @@ #define smnPerfMonCtrLo3 0x01d478UL #define smnPerfMonCtrHi3 0x01d47cUL +#define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3 0x1d05cUL +#define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL +#define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3 0x1d09cUL + #endif -- cgit v1.2.3 From fbdc5d8d84cc521573f1265d5e6ceb04a16075c9 Mon Sep 17 00:00:00 2001 From: Joseph Greathouse Date: Wed, 17 Jul 2019 11:55:22 -0500 Subject: drm/amdgpu: Default disable GDS for compute VMIDs The GDS and GWS blocks default to allowing all VMIDs to access all entries. Graphics VMIDs can handle setting these limits when the driver launches work. However, compute workloads under HWS control don't go through the kernel driver. Instead, HWS firmware should set these limits when a process is put into a VMID slot. Disable access to these devices by default by turning off all mask bits (for OA) and setting BASE=SIZE=0 (for GDS and GWS) for all compute VMIDs. If a process wants to use these resources, they can request this from the HWS firmware (when such capabilities are enabled). HWS will then handle setting the base and limit for the process when it is assigned to a VMID. This will also prevent user kernels from getting 'stuck' in GWS by accident if they write GWS-using code but HWS firmware is not set up to handle GWS reset. Until HWS is enabled to handle GWS properly, all GWS accesses will MEM_VIOL fault the kernel. v2: Move initialization outside of SRBM mutex Signed-off-by: Joseph Greathouse Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 9 +++++++++ 4 files changed, 36 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 5b5ea9b41c12..98d6cb9e9543 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1516,6 +1516,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) } nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0db9f488da7e..21187275dfd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v7_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5f401b41ef7c..751567f78567 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3706,6 +3706,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[i].gws, 0); + WREG32(amdgpu_gds_reg_offset[i].oa, 0); + } } static void gfx_v8_0_config_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b9c347d2b989..f0a7f85990bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2029,6 +2029,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) } soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + + /* Initialize all compute VMIDs to have no GDS, GWS, or OA + acccess. These should be enabled by FW for target VMIDs. */ + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); + } } static void gfx_v9_0_constants_init(struct amdgpu_device *adev) -- cgit v1.2.3 From 3a48c10d7fb055ff655096eb50e96f36b22b8021 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 17 Jul 2019 16:32:27 +0800 Subject: drm/amd/powerplay: report bootup clock as max supported on dpm disabled With gfxclk or uclk dpm disabled, it's reasonable to report bootup clock as the max supported. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index ec1904e47927..b6b6c10eeb6e 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, { int ret = 0, clk_id = 0; uint32_t param = 0; + uint32_t clock_limit; if (!min && !max) return -EINVAL; - if (!smu_clk_dpm_is_enabled(smu, clk_type)) + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + return 0; + } mutex_lock(&smu->mutex); clk_id = smu_clk_get_index(smu, clk_type); -- cgit v1.2.3 From fb2a36075a8650c2fc04f8fbef6901bacf0c834b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 12:49:15 +0800 Subject: drm/amdgpu: do not create ras debugfs/sysfs node for ASICs that don't have ras ability driver shouldn't init any ras debugfs/sysfs node for ASICs that don't have ras hardware ability Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 1a4412e47810..3a9ece450b31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1557,6 +1557,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) amdgpu_ras_check_supported(adev, &con->hw_supported, &con->supported); + if (!con->hw_supported) { + amdgpu_ras_set_context(adev, NULL); + kfree(con); + return 0; + } + con->features = 0; INIT_LIST_HEAD(&con->head); /* Might need get this flag from vbios. */ -- cgit v1.2.3 From 4d249d3abd424e4de4a24ef95306a612e6820f6f Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 12:52:56 +0800 Subject: drm/amdgpu: disable GFX RAS by default GFX RAS has not been stablized yet. disable GFX ras until it is fully funcitonal. Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e25e3d8c6fac..20f18d492537 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; -uint amdgpu_ras_mask = 0xffffffff; +uint amdgpu_ras_mask = 0xfffffffb; /** * DOC: vramlimit (int) -- cgit v1.2.3 From a5dd40ca81bb2465a754709d999a93c5343d1a97 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 13:59:38 +0800 Subject: drm/amdgpu: only allow error injection to UMC IP block error injection to other IP blocks (except UMC) will be enabled until RAS feature stablize on those IP blocks Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3a9ece450b31..fc346eb1aacd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -689,6 +689,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + if (block_info.block_id != TA_RAS_BLOCK__UMC) { + DRM_INFO("%s error injection is not supported yet\n", + ras_block_str(info->head.block)); + return -EINVAL; + } + ret = psp_ras_trigger_error(&adev->psp, &block_info); if (ret) DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", -- cgit v1.2.3 From 33c976c9612a8b414d56e1f591bc39bbacd18b9c Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 18 Jul 2019 16:03:46 +0800 Subject: drm/amdgpu: drop ras self test this function is not needed any more. error injection is the only way to validate ras but it can't be executed in amdgpu_ras_init, where gpu is even not initialized Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index fc346eb1aacd..fac7aa2c244f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, static int amdgpu_ras_release_vram(struct amdgpu_device *adev, struct amdgpu_bo **bo_ptr); -static void amdgpu_ras_self_test(struct amdgpu_device *adev) -{ - /* TODO */ -} - static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1582,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (amdgpu_ras_fs_init(adev)) goto fs_out; - amdgpu_ras_self_test(adev); - DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); -- cgit v1.2.3 From c49b1b59a2fa3fa6972afd7e216799f696e67fe5 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Thu, 18 Jul 2019 15:46:55 +0800 Subject: drm/amd/powerplay: change sysfs pp_dpm_xxx format for navi10 v2: set average clock value on level 1 when current clock equal min or max clock (fine grained dpm support). the navi10 gfxclk (sclk) support fine grained DPM, so use level 1 to show current dpm freq in sysfs pp_dpm_xxx Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 47 ++++++++++++++++++++++++++---- 1 file changed, 41 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 7061ff14333f..80daded31970 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -646,11 +646,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, return ret; } +static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + DpmDescriptor_t *dpm_desc = NULL; + uint32_t clk_index = 0; + + clk_index = smu_clk_get_index(smu, clk_type); + dpm_desc = &pptable->DpmDescriptor[clk_index]; + + /* 0 - Fine grained DPM, 1 - Discrete DPM */ + return dpm_desc->SnapToDiscrete == 0 ? true : false; +} + static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t freq_values[3] = {0}; + uint32_t mark_index = 0; switch (clk_type) { case SMU_GFXCLK: @@ -663,22 +678,42 @@ static int navi10_print_clk_levels(struct smu_context *smu, ret = smu_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) return size; + /* 10KHz -> MHz */ cur_value = cur_value / 100; - size += sprintf(buf, "current clk: %uMhz\n", cur_value); - ret = smu_get_dpm_level_count(smu, clk_type, &count); if (ret) return size; - for (i = 0; i < count; i++) { - ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) { + for (i = 0; i < count; i++) { + ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value); + if (ret) + return size; + + size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, + cur_value == value ? "*" : ""); + } + } else { + ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]); + if (ret) + return size; + ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]); if (ret) return size; - size += sprintf(buf + size, "%d: %uMhz %s\n", i, value, - cur_value == value ? "*" : ""); + freq_values[1] = cur_value; + mark_index = cur_value == freq_values[0] ? 0 : + cur_value == freq_values[2] ? 2 : 1; + if (mark_index != 1) + freq_values[1] = (freq_values[0] + freq_values[2]) / 2; + + for (i = 0; i < 3; i++) { + size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i], + i == mark_index ? "*" : ""); + } + } break; default: -- cgit v1.2.3 From b64d98656f0284cb88fa203541d4d0404b0e6c1a Mon Sep 17 00:00:00 2001 From: Fuqian Huang Date: Thu, 11 Jul 2019 11:10:21 +0800 Subject: drm/ttm: use the same attributes when freeing d_page->vaddr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In function __ttm_dma_alloc_page(), d_page->addr is allocated by dma_alloc_attrs() but freed with use dma_free_coherent() in __ttm_dma_free_page(). Use the correct dma_free_attrs() to free d_page->vaddr. Signed-off-by: Fuqian Huang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index d594f7520b7b..7d78e6deac89 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool, static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) { + unsigned long attrs = 0; dma_addr_t dma = d_page->dma; d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL; - dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma); + if (pool->type & IS_HUGE) + attrs = DMA_ATTR_NO_WARN; + + dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); kfree(d_page); d_page = NULL; -- cgit v1.2.3 From 53ef3969ddb88a2ee0b69d37f60bd2b1f02f23c9 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 18 Jul 2019 11:38:46 -0400 Subject: drm/amdgpu: use VCN firmware offset for cache window Since we are using the signed FW now, and also using PSP firmware loading, but it's still potential to break driver when loading FW directly instead of PSP, so we should add offset. Signed-off-by: Leo Liu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 31539e6a16b4..a022e47f2a1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -379,11 +379,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst->gpu_addr)); offset = size; - /* No signed header for now from firmware WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0); } WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); -- cgit v1.2.3 From 93c25fbdc30a6058115e7a9f3fa161f5a26f7043 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Fri, 31 May 2019 15:14:13 -0400 Subject: drm/amd/display: initialize p_state to proper value [why] On some modes SMU will be in infinite loop state at boot, this is because driver assumes p_state_support is false, but this is the opposite of the assumed boot state by SMU. we optimize away notifying SMU about no pstate, and so they will get stuck [how] when we init clk manager, init pstate to true, so it matches driver load assumption Signed-off-by: Jun Lei Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 08a774fc7b67..740f5db22bb5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, void dcn2_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; } void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) -- cgit v1.2.3 From e0a6440a2961b1da3ea895b0bef082fc1a78e190 Mon Sep 17 00:00:00 2001 From: David Galiffi Date: Thu, 30 May 2019 11:56:39 -0400 Subject: drm/amd/display: Add ability to set preferred link training parameters. [WHY] To add support for OS requirement to set preferred link training parameters. [HOW] Create new structure of dp link training overrides. During link training processes, these values should be used instead of the default training parameters. Signed-off-by: David Galiffi Reviewed-by: Tony Cheng Acked-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 46 ++- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 337 +++++++++++++++------ drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 28 +- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 21 ++ drivers/gpu/drm/amd/display/dc/dc_link.h | 11 + drivers/gpu/drm/amd/display/dc/inc/link_hwss.h | 2 +- .../drm/amd/display/include/link_service_types.h | 17 +- 7 files changed, 338 insertions(+), 124 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 8dbf759eba45..b4293dca16c8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1189,6 +1189,9 @@ static bool construct( link->ctx = dc_ctx; link->link_index = init_params->link_index; + memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides)); + memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings)); + link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { @@ -1467,6 +1470,9 @@ static enum dc_status enable_link_dp( struct dc_link *link = stream->link; struct dc_link_settings link_settings = {0}; enum dp_panel_mode panel_mode; +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + bool fec_enable; +#endif /* get link settings for video mode timing */ decide_link_settings(stream, &link_settings); @@ -1511,10 +1517,20 @@ static enum dc_status enable_link_dp( skip_video_pattern = false; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - dp_set_fec_ready(link, true); + if (link->preferred_training_settings.fec_enable != NULL) + fec_enable = *link->preferred_training_settings.fec_enable; + else + fec_enable = true; + + dp_set_fec_ready(link, fec_enable); #endif - if (perform_link_training_with_retries( + if (link->aux_access_disabled) { + dc_link_dp_perform_link_training_skip_aux(link, &link_settings); + + link->cur_link_settings = link_settings; + status = DC_OK; + } else if (perform_link_training_with_retries( link, &link_settings, skip_video_pattern, @@ -1526,7 +1542,7 @@ static enum dc_status enable_link_dp( status = DC_FAIL_DP_LINK_TRAINING; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - dp_set_fec_enable(link, true); + dp_set_fec_enable(link, fec_enable); #endif return status; } @@ -3014,6 +3030,29 @@ void dc_link_set_preferred_link_settings(struct dc *dc, dp_retrain_link_dp_test(link, &store_settings, false); } +void dc_link_set_preferred_training_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link_training_overrides *lt_overrides, + struct dc_link *link, + bool skip_immediate_retrain) +{ + if (lt_overrides != NULL) + link->preferred_training_settings = *lt_overrides; + else + memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings)); + + if (link_setting != NULL) { + link->preferred_link_setting = *link_setting; + } else { + link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; + link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; + } + + /* Retrain now, or wait until next stream update to apply */ + if (skip_immediate_retrain == false) + dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link); +} + void dc_link_enable_hpd(const struct dc_link *link) { dc_link_dp_enable_hpd(link); @@ -3024,7 +3063,6 @@ void dc_link_disable_hpd(const struct dc_link *link) dc_link_dp_disable_hpd(link); } - void dc_link_set_test_pattern(struct dc_link *link, enum dp_test_pattern test_pattern, const struct link_training_settings *p_link_settings, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 056be4c34a98..3f8a8f61cd76 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -49,7 +49,7 @@ static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b); -static void wait_for_training_aux_rd_interval( +static uint32_t get_training_aux_rd_interval( struct dc_link *link, uint32_t default_wait_in_micro_secs) { @@ -68,15 +68,21 @@ static void wait_for_training_aux_rd_interval( sizeof(training_rd_interval)); if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - default_wait_in_micro_secs = - training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + default_wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; } - udelay(default_wait_in_micro_secs); + return default_wait_in_micro_secs; +} + +static void wait_for_training_aux_rd_interval( + struct dc_link *link, + uint32_t wait_in_micro_secs) +{ + udelay(wait_in_micro_secs); DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, - default_wait_in_micro_secs); + wait_in_micro_secs); } static void dpcd_set_training_pattern( @@ -95,27 +101,27 @@ static void dpcd_set_training_pattern( dpcd_pattern.v1_4.TRAINING_PATTERN_SET); } -static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link) +static enum dc_dp_training_pattern get_supported_tp(struct dc_link *link) { - enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2; + enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2; struct encoder_feature_support *features = &link->link_enc->features; struct dpcd_caps *dpcd_caps = &link->dpcd_caps; if (features->flags.bits.IS_TPS3_CAPABLE) - highest_tp = HW_DP_TRAINING_PATTERN_3; + highest_tp = DP_TRAINING_PATTERN_SEQUENCE_3; if (features->flags.bits.IS_TPS4_CAPABLE) - highest_tp = HW_DP_TRAINING_PATTERN_4; + highest_tp = DP_TRAINING_PATTERN_SEQUENCE_4; if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED && - highest_tp >= HW_DP_TRAINING_PATTERN_4) - return HW_DP_TRAINING_PATTERN_4; + highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_4) + return DP_TRAINING_PATTERN_SEQUENCE_4; if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED && - highest_tp >= HW_DP_TRAINING_PATTERN_3) - return HW_DP_TRAINING_PATTERN_3; + highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_3) + return DP_TRAINING_PATTERN_SEQUENCE_3; - return HW_DP_TRAINING_PATTERN_2; + return DP_TRAINING_PATTERN_SEQUENCE_2; } static void dpcd_set_link_settings( @@ -126,7 +132,7 @@ static void dpcd_set_link_settings( union down_spread_ctrl downspread = { {0} }; union lane_count_set lane_count_set = { {0} }; - enum hw_dp_training_pattern hw_tr_pattern; + enum dc_dp_training_pattern dp_tr_pattern; downspread.raw = (uint8_t) (lt_settings->link_settings.link_spread); @@ -134,21 +140,21 @@ static void dpcd_set_link_settings( lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; - lane_count_set.bits.ENHANCED_FRAMING = 1; - + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; - hw_tr_pattern = get_supported_tp(link); - if (hw_tr_pattern != HW_DP_TRAINING_PATTERN_4) { + dp_tr_pattern = get_supported_tp(link); + + if (dp_tr_pattern != DP_TRAINING_PATTERN_SEQUENCE_4) { lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; } core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); + &downspread.raw, sizeof(downspread)); core_link_write_dpcd(link, DP_LANE_COUNT_SET, - &lane_count_set.raw, 1); + &lane_count_set.raw, 1); if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && lt_settings->link_settings.use_link_rate_set == true) { @@ -162,46 +168,47 @@ static void dpcd_set_link_settings( } if (rate) { - DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n", + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", __func__, DP_LINK_BW_SET, lt_settings->link_settings.link_rate, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); } else { - DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x\n %x spread = %x\n", + DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", __func__, DP_LINK_RATE_SET, lt_settings->link_settings.link_rate_set, DP_LANE_COUNT_SET, lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, DP_DOWNSPREAD_CTRL, lt_settings->link_settings.link_spread); } - } static enum dpcd_training_patterns - hw_training_pattern_to_dpcd_training_pattern( + dc_dp_training_pattern_to_dpcd_training_pattern( struct dc_link *link, - enum hw_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern) { enum dpcd_training_patterns dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; switch (pattern) { - case HW_DP_TRAINING_PATTERN_1: + case DP_TRAINING_PATTERN_SEQUENCE_1: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; break; - case HW_DP_TRAINING_PATTERN_2: + case DP_TRAINING_PATTERN_SEQUENCE_2: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; break; - case HW_DP_TRAINING_PATTERN_3: + case DP_TRAINING_PATTERN_SEQUENCE_3: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; break; - case HW_DP_TRAINING_PATTERN_4: + case DP_TRAINING_PATTERN_SEQUENCE_4: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; break; default: @@ -212,13 +219,12 @@ static enum dpcd_training_patterns } return dpcd_tr_pattern; - } static void dpcd_set_lt_pattern_and_lane_settings( struct dc_link *link, const struct link_training_settings *lt_settings, - enum hw_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern) { union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } }; const uint32_t dpcd_base_lt_offset = @@ -233,7 +239,7 @@ static void dpcd_set_lt_pattern_and_lane_settings( * DpcdAddress_TrainingPatternSet *****************************************************************/ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = - hw_training_pattern_to_dpcd_training_pattern(link, pattern); + dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern); dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset] = dpcd_pattern.raw; @@ -346,12 +352,20 @@ static void update_drive_settings( { uint32_t lane; for (lane = 0; lane < src.link_settings.lane_count; lane++) { - dest->lane_settings[lane].VOLTAGE_SWING = - src.lane_settings[lane].VOLTAGE_SWING; - dest->lane_settings[lane].PRE_EMPHASIS = - src.lane_settings[lane].PRE_EMPHASIS; - dest->lane_settings[lane].POST_CURSOR2 = - src.lane_settings[lane].POST_CURSOR2; + if (dest->voltage_swing == NULL) + dest->lane_settings[lane].VOLTAGE_SWING = src.lane_settings[lane].VOLTAGE_SWING; + else + dest->lane_settings[lane].VOLTAGE_SWING = *dest->voltage_swing; + + if (dest->pre_emphasis == NULL) + dest->lane_settings[lane].PRE_EMPHASIS = src.lane_settings[lane].PRE_EMPHASIS; + else + dest->lane_settings[lane].PRE_EMPHASIS = *dest->pre_emphasis; + + if (dest->post_cursor2 == NULL) + dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2; + else + dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2; } } @@ -754,15 +768,15 @@ static enum link_training_result perform_channel_equalization_sequence( struct link_training_settings *lt_settings) { struct link_training_settings req_settings; - enum hw_dp_training_pattern hw_tr_pattern; + enum dc_dp_training_pattern tr_pattern; uint32_t retries_ch_eq; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; union lane_align_status_updated dpcd_lane_status_updated = { {0} }; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } }; - hw_tr_pattern = get_supported_tp(link); + tr_pattern = lt_settings->pattern_for_eq; - dp_set_hw_training_pattern(link, hw_tr_pattern); + dp_set_hw_training_pattern(link, tr_pattern); for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; retries_ch_eq++) { @@ -776,12 +790,12 @@ static enum link_training_result perform_channel_equalization_sequence( dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - hw_tr_pattern); + tr_pattern); else dpcd_set_lane_settings(link, lt_settings); /* 3. wait for receiver to lock-on*/ - wait_for_training_aux_rd_interval(link, 400); + wait_for_training_aux_rd_interval(link, lt_settings->eq_pattern_time); /* 4. Read lane status and requested * drive settings as set by the sink*/ @@ -817,27 +831,16 @@ static enum link_training_result perform_clock_recovery_sequence( { uint32_t retries_cr; uint32_t retry_count; - uint32_t lane; struct link_training_settings req_settings; - enum dc_lane_count lane_count = - lt_settings->link_settings.lane_count; - enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + enum dc_dp_training_pattern tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_1; union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; union lane_align_status_updated dpcd_lane_status_updated; retries_cr = 0; retry_count = 0; - /* initial drive setting (VS/PE/PC2)*/ - for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { - lt_settings->lane_settings[lane].VOLTAGE_SWING = - VOLTAGE_SWING_LEVEL0; - lt_settings->lane_settings[lane].PRE_EMPHASIS = - PRE_EMPHASIS_DISABLED; - lt_settings->lane_settings[lane].POST_CURSOR2 = - POST_CURSOR2_DISABLED; - } - dp_set_hw_training_pattern(link, hw_tr_pattern); + dp_set_hw_training_pattern(link, tr_pattern); /* najeeb - The synaptics MST hub can put the LT in * infinite loop by switching the VS @@ -845,7 +848,7 @@ static enum link_training_result perform_clock_recovery_sequence( /* between level 0 and level 1 continuously, here * we try for CR lock for LinkTrainingMaxCRRetry count*/ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && - (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); memset(&dpcd_lane_status_updated, '\0', @@ -863,7 +866,7 @@ static enum link_training_result perform_clock_recovery_sequence( dpcd_set_lt_pattern_and_lane_settings( link, lt_settings, - hw_tr_pattern); + tr_pattern); else dpcd_set_lane_settings( link, @@ -872,7 +875,7 @@ static enum link_training_result perform_clock_recovery_sequence( /* 3. wait receiver to lock-on*/ wait_for_training_aux_rd_interval( link, - 100); + lt_settings->cr_pattern_time); /* 4. Read lane status and requested drive * settings as set by the sink @@ -939,7 +942,7 @@ static inline enum link_training_result perform_link_training_int( * TPS4 must be used instead of POST_LT_ADJ_REQ. */ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || - get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4) + get_supported_tp(link) == DP_TRAINING_PATTERN_SEQUENCE_4) return status; if (status == LINK_TRAINING_SUCCESS && @@ -947,7 +950,7 @@ static inline enum link_training_result perform_link_training_int( status = LINK_TRAINING_LQA_FAIL; lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; - lane_count_set.bits.ENHANCED_FRAMING = 1; + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; core_link_write_dpcd( @@ -959,24 +962,28 @@ static inline enum link_training_result perform_link_training_int( return status; } -enum link_training_result dc_link_dp_perform_link_training( - struct dc_link *link, +static void initialize_training_settings( + struct dc_link *link, const struct dc_link_settings *link_setting, - bool skip_video_pattern) + struct link_training_settings *lt_settings) { - enum link_training_result status = LINK_TRAINING_SUCCESS; + uint32_t lane; - char *link_rate = "Unknown"; - char *lt_result = "Unknown"; + memset(lt_settings, '\0', sizeof(struct link_training_settings)); - struct link_training_settings lt_settings; + /* Initialize link settings */ + lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; + lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; - memset(<_settings, '\0', sizeof(lt_settings)); + if (link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) + lt_settings->link_settings.link_rate = link->preferred_link_setting.link_rate; + else + lt_settings->link_settings.link_rate = link_setting->link_rate; - lt_settings.link_settings.link_rate = link_setting->link_rate; - lt_settings.link_settings.lane_count = link_setting->lane_count; - lt_settings.link_settings.use_link_rate_set = link_setting->use_link_rate_set; - lt_settings.link_settings.link_rate_set = link_setting->link_rate_set; + if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN) + lt_settings->link_settings.lane_count = link->preferred_link_setting.lane_count; + else + lt_settings->link_settings.lane_count = link_setting->lane_count; /*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/ @@ -987,31 +994,75 @@ enum link_training_result dc_link_dp_perform_link_training( * LINK_SPREAD_05_DOWNSPREAD_30KHZ : * LINK_SPREAD_DISABLED; */ + /* Initialize link spread */ if (link->dp_ss_off) - lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED; + lt_settings->link_settings.link_spread = LINK_SPREAD_DISABLED; + else if (link->preferred_training_settings.downspread != NULL) + lt_settings->link_settings.link_spread + = *link->preferred_training_settings.downspread + ? LINK_SPREAD_05_DOWNSPREAD_30KHZ + : LINK_SPREAD_DISABLED; else - lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ; + lt_settings->link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ; - /* 1. set link rate, lane count and spread*/ - dpcd_set_link_settings(link, <_settings); + /* Initialize lane settings overrides */ + if (link->preferred_training_settings.voltage_swing != NULL) + lt_settings->voltage_swing = link->preferred_training_settings.voltage_swing; - /* 2. perform link training (set link training done - * to false is done as well)*/ - status = perform_clock_recovery_sequence(link, <_settings); - if (status == LINK_TRAINING_SUCCESS) { - status = perform_channel_equalization_sequence(link, - <_settings); - } + if (link->preferred_training_settings.pre_emphasis != NULL) + lt_settings->pre_emphasis = link->preferred_training_settings.pre_emphasis; - if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { - status = perform_link_training_int(link, - <_settings, - status); + if (link->preferred_training_settings.post_cursor2 != NULL) + lt_settings->post_cursor2 = link->preferred_training_settings.post_cursor2; + + /* Initialize lane settings (VS/PE/PC2) */ + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->lane_settings[lane].VOLTAGE_SWING = + lt_settings->voltage_swing != NULL ? + *lt_settings->voltage_swing : + VOLTAGE_SWING_LEVEL0; + lt_settings->lane_settings[lane].PRE_EMPHASIS = + lt_settings->pre_emphasis != NULL ? + *lt_settings->pre_emphasis + : PRE_EMPHASIS_DISABLED; + lt_settings->lane_settings[lane].POST_CURSOR2 = + lt_settings->post_cursor2 != NULL ? + *lt_settings->post_cursor2 + : POST_CURSOR2_DISABLED; } - /* 6. print status message*/ - switch (lt_settings.link_settings.link_rate) { + /* Initialize training timings */ + if (link->preferred_training_settings.cr_pattern_time != NULL) + lt_settings->cr_pattern_time = *link->preferred_training_settings.cr_pattern_time; + else + lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100); + + if (link->preferred_training_settings.eq_pattern_time != NULL) + lt_settings->eq_pattern_time = *link->preferred_training_settings.eq_pattern_time; + else + lt_settings->eq_pattern_time = get_training_aux_rd_interval(link, 400); + + if (link->preferred_training_settings.pattern_for_eq != NULL) + lt_settings->pattern_for_eq = *link->preferred_training_settings.pattern_for_eq; + else + lt_settings->pattern_for_eq = get_supported_tp(link); + + if (link->preferred_training_settings.enhanced_framing != NULL) + lt_settings->enhanced_framing = *link->preferred_training_settings.enhanced_framing; + else + lt_settings->enhanced_framing = 1; +} + +static void print_status_message( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum link_training_result status) +{ + char *link_rate = "Unknown"; + char *lt_result = "Unknown"; + char *lt_spread = "Disabled"; + switch (lt_settings->link_settings.link_rate) { case LINK_RATE_LOW: link_rate = "RBR"; break; @@ -1057,13 +1108,102 @@ enum link_training_result dc_link_dp_perform_link_training( break; } + switch (lt_settings->link_settings.link_spread) { + case LINK_SPREAD_DISABLED: + lt_spread = "Disabled"; + break; + case LINK_SPREAD_05_DOWNSPREAD_30KHZ: + lt_spread = "0.5% 30KHz"; + break; + case LINK_SPREAD_05_DOWNSPREAD_33KHZ: + lt_spread = "0.5% 33KHz"; + break; + default: + break; + } + /* Connectivity log: link training */ - CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d", - link_rate, - lt_settings.link_settings.lane_count, - lt_result, - lt_settings.lane_settings[0].VOLTAGE_SWING, - lt_settings.lane_settings[0].PRE_EMPHASIS); + CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", + link_rate, + lt_settings->link_settings.lane_count, + lt_result, + lt_settings->lane_settings[0].VOLTAGE_SWING, + lt_settings->lane_settings[0].PRE_EMPHASIS, + lt_spread); +} + +bool dc_link_dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct dc_link_settings *link_setting) +{ + struct link_training_settings lt_settings; + enum dc_dp_training_pattern pattern_for_cr = DP_TRAINING_PATTERN_SEQUENCE_1; + + initialize_training_settings(link, link_setting, <_settings); + + /* 1. Perform_clock_recovery_sequence. */ + + /* transmit training pattern for clock recovery */ + dp_set_hw_training_pattern(link, pattern_for_cr); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, <_settings); + + /* wait receiver to lock-on*/ + wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); + + /* 2. Perform_channel_equalization_sequence. */ + + /* transmit training pattern for channel equalization. */ + dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, <_settings); + + /* wait receiver to lock-on. */ + wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); + + /* 3. Perform_link_training_int. */ + + /* Mainlink output idle pattern. */ + dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + print_status_message(link, <_settings, LINK_TRAINING_SUCCESS); + + return true; +} + +enum link_training_result dc_link_dp_perform_link_training( + struct dc_link *link, + const struct dc_link_settings *link_setting, + bool skip_video_pattern) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + + struct link_training_settings lt_settings; + + initialize_training_settings(link, link_setting, <_settings); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, <_settings); + + /* 2. perform link training (set link training done + * to false is done as well) + */ + status = perform_clock_recovery_sequence(link, <_settings); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_channel_equalization_sequence(link, + <_settings); + } + + if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) { + status = perform_link_training_int(link, + <_settings, + status); + } + + /* 6. print status message*/ + print_status_message(link, <_settings, status); if (status != LINK_TRAINING_SUCCESS) link->ctx->dc->debug_data.ltFailCount++; @@ -1071,7 +1211,6 @@ enum link_training_result dc_link_dp_perform_link_training( return status; } - bool perform_link_training_with_retries( struct dc_link *link, const struct dc_link_settings *link_setting, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 2d019e1f6135..211fadefe2f5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -25,10 +25,11 @@ enum dc_status core_link_read_dpcd( uint8_t *data, uint32_t size) { - if (!dm_helpers_dp_read_dpcd(link->ctx, - link, - address, data, size)) - return DC_ERROR_UNEXPECTED; + if (!link->aux_access_disabled && + !dm_helpers_dp_read_dpcd(link->ctx, + link, address, data, size)) { + return DC_ERROR_UNEXPECTED; + } return DC_OK; } @@ -39,10 +40,11 @@ enum dc_status core_link_write_dpcd( const uint8_t *data, uint32_t size) { - if (!dm_helpers_dp_write_dpcd(link->ctx, - link, - address, data, size)) - return DC_ERROR_UNEXPECTED; + if (!link->aux_access_disabled && + !dm_helpers_dp_write_dpcd(link->ctx, + link, address, data, size)) { + return DC_ERROR_UNEXPECTED; + } return DC_OK; } @@ -203,21 +205,21 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal) bool dp_set_hw_training_pattern( struct dc_link *link, - enum hw_dp_training_pattern pattern) + enum dc_dp_training_pattern pattern) { enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; switch (pattern) { - case HW_DP_TRAINING_PATTERN_1: + case DP_TRAINING_PATTERN_SEQUENCE_1: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; break; - case HW_DP_TRAINING_PATTERN_2: + case DP_TRAINING_PATTERN_SEQUENCE_2: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; break; - case HW_DP_TRAINING_PATTERN_3: + case DP_TRAINING_PATTERN_SEQUENCE_3: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; break; - case HW_DP_TRAINING_PATTERN_4: + case DP_TRAINING_PATTERN_SEQUENCE_4: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; default: diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index dfcec4d3e9c0..efa7a47f6b7e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -90,6 +90,13 @@ enum dc_post_cursor2 { POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3, }; +enum dc_dp_training_pattern { + DP_TRAINING_PATTERN_SEQUENCE_1 = 0, + DP_TRAINING_PATTERN_SEQUENCE_2, + DP_TRAINING_PATTERN_SEQUENCE_3, + DP_TRAINING_PATTERN_SEQUENCE_4, +}; + struct dc_link_settings { enum dc_lane_count lane_count; enum dc_link_rate link_rate; @@ -109,6 +116,20 @@ struct dc_link_training_settings { struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; }; +struct dc_link_training_overrides { + enum dc_voltage_swing *voltage_swing; + enum dc_pre_emphasis *pre_emphasis; + enum dc_post_cursor2 *post_cursor2; + + uint16_t *cr_pattern_time; + uint16_t *eq_pattern_time; + enum dc_dp_training_pattern *pattern_for_eq; + + enum dc_link_spread *downspread; + bool *alternate_scrambler_reset; + bool *enhanced_framing; + bool *fec_enable; +}; union dpcd_rev { struct { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 6f0b80111e58..d6ff5af70c71 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -83,6 +83,7 @@ struct dc_link { bool is_hpd_filter_disabled; bool dp_ss_off; bool link_state_valid; + bool aux_access_disabled; /* caps is the same as reported_link_cap. link_traing use * reported_link_cap. Will clean up. TODO @@ -92,6 +93,7 @@ struct dc_link { struct dc_link_settings cur_link_settings; struct dc_lane_settings cur_lane_setting; struct dc_link_settings preferred_link_setting; + struct dc_link_training_overrides preferred_training_settings; uint8_t ddc_hw_inst; @@ -217,6 +219,10 @@ void dc_link_dp_set_drive_settings( struct dc_link *link, struct link_training_settings *lt_settings); +bool dc_link_dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct dc_link_settings *link_setting); + enum link_training_result dc_link_dp_perform_link_training( struct dc_link *link, const struct dc_link_settings *link_setting, @@ -251,6 +257,11 @@ void dc_link_perform_link_training(struct dc *dc, void dc_link_set_preferred_link_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link *link); +void dc_link_set_preferred_training_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link_training_overrides *lt_overrides, + struct dc_link *link, + bool skip_immediate_retrain); void dc_link_enable_hpd(const struct dc_link *link); void dc_link_disable_hpd(const struct dc_link *link); void dc_link_set_test_pattern(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 30be7bb4a01a..3680846674e8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -60,7 +60,7 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal); bool dp_set_hw_training_pattern( struct dc_link *link, - enum hw_dp_training_pattern pattern); + enum dc_dp_training_pattern pattern); void dp_set_hw_lane_settings( struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 80f0d93cfd94..876b0b3e1a9c 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -71,14 +71,17 @@ enum link_training_result { struct link_training_settings { struct dc_link_settings link_settings; struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]; - bool allow_invalid_msa_timing_param; -}; -enum hw_dp_training_pattern { - HW_DP_TRAINING_PATTERN_1 = 0, - HW_DP_TRAINING_PATTERN_2, - HW_DP_TRAINING_PATTERN_3, - HW_DP_TRAINING_PATTERN_4 + enum dc_voltage_swing *voltage_swing; + enum dc_pre_emphasis *pre_emphasis; + enum dc_post_cursor2 *post_cursor2; + + uint16_t cr_pattern_time; + uint16_t eq_pattern_time; + enum dc_dp_training_pattern pattern_for_eq; + + bool enhanced_framing; + bool allow_invalid_msa_timing_param; }; /*TODO: Move this enum test harness*/ -- cgit v1.2.3 From 125cfef3e4d82d9c6bda9c0b5868cc95d173d86d Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 3 Jun 2019 09:12:55 -0400 Subject: drm/amd/display: 3.2.36 Signed-off-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e513028faefa..2a7f25d372e1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.35" +#define DC_VER "3.2.36" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3 From b48935b3bfc1350737e759fef5e92db14a2e2fbb Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 3 Jun 2019 08:13:12 -0400 Subject: drm/amd/display: fix up HUBBUB hw programming for VM [why] Some values were not being converted or bit-shifted properly for HW registers, causing black screen [how] Fix up the values before programming HW Signed-off-by: Jun Lei Reviewed-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 17 ++++++++--------- drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 4 ++-- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index ece6e136437b..c72a9ff57f15 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -366,25 +366,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.depth = 1; - phys_config.block_size = 4096; phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; - + phys_config.depth = 0; + phys_config.block_size = 0; // Init VMID 0 based on PA config dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 959f5b654611..1ea505f7a05a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth { }; enum dcn_hubbub_page_table_block_size { - DCN_PAGE_TABLE_BLOCK_SIZE_4KB, - DCN_PAGE_TABLE_BLOCK_SIZE_64KB + DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0, + DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4 }; struct dcn_hubbub_phys_addr_config { -- cgit v1.2.3 From ec16ac6b4264dffefe3cd137038e138e79e07848 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 30 May 2019 15:47:51 -0400 Subject: drm/amd/display: fix dsc disable A regression caused dsc to never get disabled in certain situations. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Nikola Cornij Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 0b84a322b8a2..94f2f9fc6956 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1740,8 +1740,11 @@ static void dcn20_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) { dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE); } - } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + else if (pipe_ctx->stream_res.dsc) + dp_set_dsc_enable(pipe_ctx, false); +#endif /* by upper caller loop, parent pipe: pipe0, will be reset last. * back end share by all pipes and will be disable only when disable -- cgit v1.2.3 From 4bc846908be51fcd66fed18aba9ba531a38dc725 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 3 Jun 2019 11:30:43 -0400 Subject: drm/amd/display: 3.2.37 Signed-off-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 2a7f25d372e1..c0ebb77fab70 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.36" +#define DC_VER "3.2.37" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3 From 6de202373bf6e0cea0f47c2f81274f713c22d84d Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 22 May 2019 14:24:40 -0400 Subject: drm/amd/display: move bw calc code into helpers [Why] For better readability and reusability [How] Move snippets of BW calculation code into helpers. Signed-off-by: Eric Yang Reviewed-by: Fatemeh Darbehani Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 - .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 250 +++++++++++++-------- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.h | 11 + .../drm/amd/display/dc/inc/hw/clk_mgr_internal.h | 2 + 4 files changed, 166 insertions(+), 99 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 740f5db22bb5..614a941eb9f2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -26,8 +26,6 @@ #include "dccg.h" #include "clk_mgr_internal.h" - -#include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index b537536d6488..51c45abb3b11 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2018,15 +2018,16 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) } #endif -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_split_from, + int *vlevel_out) { bool out = false; - BW_VAL_TRACE_SETUP(); - int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit; - int pipe_split_from[MAX_PIPES]; bool odm_capable = context->bw_ctx.dml.ip.odm_capable; bool force_split = false; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT @@ -2034,10 +2035,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, #endif int split_threshold = dc->res_pool->pipe_count / 2; bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); - DC_LOGGER_INIT(dc->ctx->logger); - BW_VAL_TRACE_COUNT(); ASSERT(pipes); if (!pipes) @@ -2077,7 +2075,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, &context->res_ctx, pipes); if (!pipe_cnt) { - BW_VAL_TRACE_SKIP(pass); out = true; goto validate_out; } @@ -2242,101 +2239,128 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, } #endif - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + *vlevel_out = vlevel; - if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - out = true; - goto validate_out; - } + out = true; + goto validate_out; + +validate_fail: + out = false; + +validate_out: + return out; +} + +void dcn20_calculate_wm( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *out_pipe_cnt, + int *pipe_split_from, + int vlevel) +{ + int pipe_cnt, i, pipe_idx; for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; + if (!context->res_ctx.pipe_ctx[i].stream) + continue; - pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.vba.RequiredDISPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; - if (pipe_split_from[i] < 0) { - pipes[pipe_cnt].clks_cfg.dppclk_mhz = - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; - if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) - pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; - else - pipes[pipe_cnt].pipe.dest.odm_combine = 0; - pipe_idx++; - } else { - pipes[pipe_cnt].clks_cfg.dppclk_mhz = - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; - if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) - pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; + if (pipe_split_from[i] < 0) { + pipes[pipe_cnt].clks_cfg.dppclk_mhz = + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; + if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) + pipes[pipe_cnt].pipe.dest.odm_combine = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + else + pipes[pipe_cnt].pipe.dest.odm_combine = 0; + pipe_idx++; + } else { + pipes[pipe_cnt].clks_cfg.dppclk_mhz = + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; + if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) + pipes[pipe_cnt].pipe.dest.odm_combine = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; + else + pipes[pipe_cnt].pipe.dest.odm_combine = 0; + } + + if (dc->config.forced_clocks) { + pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + + pipe_cnt++; + } + + if (pipe_cnt != pipe_idx) { + if (dc->res_pool->funcs->populate_dml_pipes) + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, + &context->res_ctx, pipes); else - pipes[pipe_cnt].pipe.dest.odm_combine = 0; + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, + &context->res_ctx, pipes); } - if (dc->config.forced_clocks) { - pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; - pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + + *out_pipe_cnt = pipe_cnt; + + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + + /* only pipe 0 is read for voltage and dcf/soc clocks */ + if (vlevel < 1) { + pipes[0].clks_cfg.voltage = 1; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz; } - pipe_cnt++; - } + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + if (vlevel < 2) { + pipes[0].clks_cfg.voltage = 2; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; + } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + if (vlevel < 3) { + pipes[0].clks_cfg.voltage = 3; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; + } + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +} + +void dcn20_calculate_dlg_params( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx; - if (pipe_cnt != pipe_idx) { - if (dc->res_pool->funcs->populate_dml_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); - else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); - } - - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; - - /* only pipe 0 is read for voltage and dcf/soc clocks */ - if (vlevel < 1) { - pipes[0].clks_cfg.voltage = 1; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[1].socclk_mhz; - } - context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - - if (vlevel < 2) { - pipes[0].clks_cfg.voltage = 2; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; - } - context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - - if (vlevel < 3) { - pipes[0].clks_cfg.voltage = 3; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[2].socclk_mhz; - } - context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - - pipes[0].clks_cfg.voltage = vlevel; - pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; - pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; - context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; /* Writeback MCIF_WB arbitration parameters */ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); @@ -2351,7 +2375,7 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, != dm_dram_clock_change_unsupported; context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; - BW_VAL_TRACE_END_WATERMARKS(); + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) @@ -2393,8 +2417,40 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, pipes[pipe_idx].pipe); pipe_idx++; } +} + +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_split_from[MAX_PIPES]; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + out = dcn20_fast_validate_bw(dc, context, pipes, pipe_split_from, &vlevel); + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel); + dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); - out = true; goto validate_out; validate_fail: diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index b5a75289f444..2b3692e0c48d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -116,6 +116,17 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_split_from, + int *vlevel_out); +void dcn20_calculate_dlg_params( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel); enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 0835ac041acf..3c105124dcdd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -64,6 +64,8 @@ enum dentist_divider_range { *************************************************************************************** */ +/* Macros */ + #define TO_CLK_MGR_INTERNAL(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_internal, base) -- cgit v1.2.3 From ff4258d59031f783039127b90225320bcaa2625c Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 3 Jun 2019 11:37:44 -0400 Subject: drm/amd/display: cap DCFCLK hardmin to 507 for NV10 [why] Due to limitation in SMU/PPLIB, it is not possible to know Fmax @ Vmin for DCFCLK. This causes issues at high display configurations where extra headroom of DCFCLK can enable P-state switching [how] Use existing override logic. If override not defined, then force min = 507 Signed-off-by: Jun Lei Reviewed-by: Eric Yang Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 51c45abb3b11..b57c42061870 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2709,6 +2709,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. + min_dcfclk = 507; for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; -- cgit v1.2.3 From ac3d76e0665cd66b06ee13f5ed61cdbeb8180229 Mon Sep 17 00:00:00 2001 From: Harmanprit Tatla Date: Tue, 4 Jun 2019 14:12:21 -0400 Subject: drm/amd/display: No audio endpoint for Dell MST display [Why] There are certain MST displays (i.e. Dell P2715Q) that although have the MST feature set to off may still report it is a branch device and a non-zero value for downstream port present. This can lead to us incorrectly classifying a dp dongle connection as being active and disabling the audio endpoint for the display. [How] Modified the placement and condition used to assign the is_branch_dev bit. Signed-off-by: Harmanprit Tatla Reviewed-by: Aric Cyr Acked-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 3f8a8f61cd76..fca1bfc901b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2369,11 +2369,18 @@ static void get_active_converter_info( link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + link->dpcd_caps.is_branch_dev = false; return; } /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ - link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) { + link->dpcd_caps.is_branch_dev = false; + } + + else { + link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + } switch (ds_port.fields.PORT_TYPE) { case DOWNSTREAM_VGA: -- cgit v1.2.3 From 63b9caff9874202ec7f6aa53abb6a4be0a197dc1 Mon Sep 17 00:00:00 2001 From: Chiawen Huang Date: Tue, 14 May 2019 16:16:11 +0800 Subject: drm/amd/display: Add aux tracing log in dce [Why] dce was re-arch'd, therefore adding aux tracing log into new dce [How] The porting from submit_channel_request/process_channel_reply of aux_engine_dce110.c Signed-off-by: Chiawen Huang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_aux.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index f2295e780031..c3f9f4185ce8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -30,6 +30,7 @@ #include "core_types.h" #include "dce_aux.h" #include "dce/dce_11_0_sh_mask.h" +#include "dm_event_log.h" #define CTX \ aux110->base.ctx @@ -252,6 +253,8 @@ static void submit_channel_request( } REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1); + EVENT_LOG_AUX_REQ(engine->ddc->pin_data->en, EVENT_LOG_AUX_ORIGIN_NATIVE, + request->action, request->address, request->length, request->data); } static int read_channel_reply(struct dce_aux *engine, uint32_t size, @@ -480,9 +483,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, *operation_result = get_channel_status(aux_engine, &returned_bytes); if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) { - read_channel_reply(aux_engine, payload->length, + int bytes_replied = 0; + bytes_replied = read_channel_reply(aux_engine, payload->length, payload->data, payload->reply, &status); + EVENT_LOG_AUX_REP(aux_engine->ddc->pin_data->en, + EVENT_LOG_AUX_ORIGIN_NATIVE, *payload->reply, + bytes_replied, payload->data); res = returned_bytes; } else { res = -1; -- cgit v1.2.3 From 64af142a8caf8887471d8d79079806875792b5dd Mon Sep 17 00:00:00 2001 From: David Francis Date: Tue, 4 Jun 2019 16:05:14 -0400 Subject: drm/amd/display: Update drm_dsc to reflect native 4.2.0 DSC spec [Why] Some parts of the DSC spec relating to 4.2.0 were not reflected in drm_dsc_compute_rc_parameters, causing unexpected config failures [How] Add nsl_bpg_offset and rbs_min computation Signed-off-by: David Francis Reviewed-by: Nikola Cornij Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c | 73 +++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c index 67089765780b..04c6295f296e 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c @@ -15,6 +15,7 @@ #define ERANGE -1 #define DRM_DEBUG_KMS(msg) /* nothing */ #define cpu_to_be16(__x) little_to_big(__x) +#define MAX(x, y) ((x) > (y) ? (x) : (y)) static unsigned short little_to_big(int data) { @@ -232,6 +233,38 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, } EXPORT_SYMBOL(drm_dsc_pps_payload_pack); +static int compute_offset(struct drm_dsc_config *vdsc_cfg, int pixels_per_group, + int groups_per_line, int grpcnt) +{ + int offset = 0; + int grpcnt_id = DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, pixels_per_group); + + if (grpcnt <= grpcnt_id) + offset = DIV_ROUND_UP(grpcnt * pixels_per_group * vdsc_cfg->bits_per_pixel, 16); + else + offset = DIV_ROUND_UP(grpcnt_id * pixels_per_group * vdsc_cfg->bits_per_pixel, 16) + - (((grpcnt - grpcnt_id) * vdsc_cfg->slice_bpg_offset) >> 11); + + if (grpcnt <= groups_per_line) + offset += grpcnt * vdsc_cfg->first_line_bpg_offset; + else + offset += groups_per_line * vdsc_cfg->first_line_bpg_offset + - (((grpcnt - groups_per_line) * vdsc_cfg->nfl_bpg_offset) >> 11); + + if (vdsc_cfg->native_420) { + if (grpcnt <= groups_per_line) + offset -= (grpcnt * vdsc_cfg->nsl_bpg_offset) >> 11; + else if (grpcnt <= 2 * groups_per_line) + offset += (grpcnt - groups_per_line) * vdsc_cfg->second_line_bpg_offset + - ((groups_per_line * vdsc_cfg->nsl_bpg_offset) >> 11); + else + offset += (grpcnt - groups_per_line) * vdsc_cfg->second_line_bpg_offset + - (((grpcnt - groups_per_line) * vdsc_cfg->nsl_bpg_offset) >> 11); + } + + return offset; +} + /** * drm_dsc_compute_rc_parameters() - Write rate control * parameters to the dsc configuration defined in @@ -251,6 +284,7 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) unsigned long hrd_delay = 0; unsigned long final_scale = 0; unsigned long rbs_min = 0; + unsigned long max_offset = 0; if (vdsc_cfg->native_420 || vdsc_cfg->native_422) { /* Number of groups used to code each line of a slice */ @@ -329,6 +363,17 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) return -ERANGE; } + if (vdsc_cfg->slice_height > 2) + vdsc_cfg->nsl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->second_line_bpg_offset << 11), + (vdsc_cfg->slice_height - 1)); + else + vdsc_cfg->nsl_bpg_offset = 0; + + if (vdsc_cfg->nsl_bpg_offset > 65535) { + DRM_DEBUG_KMS("NslBpgOffset is too large for this slice height\n"); + return -ERANGE; + } + /* Number of groups used to code the entire slice */ groups_total = groups_per_line * vdsc_cfg->slice_height; @@ -348,6 +393,7 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) vdsc_cfg->scale_increment_interval = (vdsc_cfg->final_offset * (1 << 11)) / ((vdsc_cfg->nfl_bpg_offset + + vdsc_cfg->nsl_bpg_offset + vdsc_cfg->slice_bpg_offset) * (final_scale - 9)); } else { @@ -368,10 +414,29 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) * bits/pixel (bpp) rate that is used by the encoder, * in steps of 1/16 of a bit per pixel */ - rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + - DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * - vdsc_cfg->bits_per_pixel, 16) + - groups_per_line * vdsc_cfg->first_line_bpg_offset; + if (vdsc_cfg->dsc_version_minor == 2 && (vdsc_cfg->native_420 || vdsc_cfg->native_422)) { + + max_offset = compute_offset(vdsc_cfg, DSC_RC_PIXELS_PER_GROUP, groups_per_line, + DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, + DSC_RC_PIXELS_PER_GROUP)); + + max_offset = MAX(max_offset, + compute_offset(vdsc_cfg, DSC_RC_PIXELS_PER_GROUP, groups_per_line, + DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, + groups_per_line))); + + max_offset = MAX(max_offset, + compute_offset(vdsc_cfg, DSC_RC_PIXELS_PER_GROUP, groups_per_line, + DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay, + groups_per_line * 2))); + + rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + max_offset; + } else { + rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset + + DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay * + vdsc_cfg->bits_per_pixel, 16) + + groups_per_line * vdsc_cfg->first_line_bpg_offset; + } hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel); vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; -- cgit v1.2.3 From 5e335add0bccaa8285f26ac4dd8366082a46ac33 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 4 Jun 2019 18:14:43 -0400 Subject: drm/amd/display: early return when pipe_cnt is 0 in bw validation [Why] Unintentionally introduced behaviour change from previous refactor, which causes clks to be 0 in no stream cases, which will cause divide by 0. [How] Skip calculation of clocks when no stream. Which is the same as old behaviour. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 8 +++++++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index b57c42061870..ae2545fb8ece 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2022,6 +2022,7 @@ bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out) { @@ -2074,6 +2075,8 @@ bool dcn20_fast_validate_bw( pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); + *pipe_cnt_out = pipe_cnt; + if (!pipe_cnt) { out = true; goto validate_out; @@ -2434,7 +2437,10 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, BW_VAL_TRACE_COUNT(); - out = dcn20_fast_validate_bw(dc, context, pipes, pipe_split_from, &vlevel); + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel); + + if (pipe_cnt == 0) + goto validate_out; if (!out) goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 2b3692e0c48d..44f95aa0d61e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -120,6 +120,7 @@ bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out); void dcn20_calculate_dlg_params( -- cgit v1.2.3 From a6f30079b8562b659e1d06f7cb1bc30951869bbc Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 4 Jun 2019 14:48:33 -0400 Subject: drm/amd/display: Set default block_size, even in unexpected cases We're not expected to enter the default case, but not returning a default value here is incorrect. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index c72a9ff57f15..6e2dbd03f9bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne break; default: ASSERT(false); + block_size = page_table_block_size; break; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 1ea505f7a05a..9502478c4a1b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -62,7 +62,7 @@ enum dcn_hubbub_page_table_depth { enum dcn_hubbub_page_table_block_size { DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0, - DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4 + DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4, }; struct dcn_hubbub_phys_addr_config { -- cgit v1.2.3 From 771b286379282ee9676c4233093e4e3c33c78346 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Wed, 5 Jun 2019 14:29:47 -0400 Subject: drm/amd/display: Set one 4:2:0-related PPS field as recommended by DSC spec [why] 'second_line_offset_adj' was mistakenly left at zero, even though DSC spec v1.2a recommends setting this field to 512 for 4:2:0. [how] Set 'second_line_offset_adj' to 512 for 4:2:0 and leave at zero otherwise Signed-off-by: Nikola Cornij Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c index 04c6295f296e..fd1fb1653479 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c @@ -442,6 +442,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16; vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay; + /* As per DSC spec v1.2a recommendation: */ + if (vdsc_cfg->native_420) + vdsc_cfg->second_line_offset_adj = 512; + else + vdsc_cfg->second_line_offset_adj = 0; + return 0; } EXPORT_SYMBOL(drm_dsc_compute_rc_parameters); -- cgit v1.2.3 From 6d988a557ee8bd0970c46aea94396ff4815724cd Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Wed, 5 Jun 2019 10:53:40 -0400 Subject: drm/amd/display: swap system aperture high/low [why] Currently logical values are swapped in HW, causing system aperture to be undefined, so VA and PA cannot co-exist [how] program values correctly Signed-off-by: Jun Lei Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 94f2f9fc6956..710727e5d0f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1153,8 +1153,8 @@ void dcn20_enable_plane( apt.sys_default.quad_part = 0; - apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr; - apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr; + apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr; + apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr; // Program system aperture settings pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt); -- cgit v1.2.3 From 08900ab73225584e4a260223a0848e2825e226fe Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Tue, 4 Jun 2019 15:52:59 -0400 Subject: drm/amd/display: skip retrain in dc_link_set_preferred_link_settings() if using passive dongle [Why] Fixes issue when we have a display connected using a passive dongle and then emulate over it using a DP connection at 1 x 1.62 Ghz. System hangs because register bus returns back 0xFFFFFFFF for all register reads after setting register DIG_BE_CNTL in dcn10_link_encoder_connect_dig_be_to_fe(). Hang occurs later when trying to do a register read. [How] At the start of the emulation, dc_link_set_preferred_link_settings() and dp_retrain_link_dp_test() is called, even though it is connected using a passive dongle. Add an extra condition in dp_retrain_link_dp_test() to check for link->dongle_max_pix_clk > 0. This is the only way we know if the connection is using passive dongle so we don't retrain DP. Signed-off-by: Samson Tam Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index b4293dca16c8..caa707567b4f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3000,8 +3000,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc, /* Retrain with preferred link settings only relevant for * DP signal type + * Check for non-DP signal or if passive dongle present */ - if (!dc_is_dp_signal(link->connector_signal)) + if (!dc_is_dp_signal(link->connector_signal) || + link->dongle_max_pix_clk > 0) return; for (i = 0; i < MAX_PIPES; i++) { -- cgit v1.2.3 From c70b4016306a10b2c6e5d5da96a1f04a6248900f Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Wed, 5 Jun 2019 15:21:03 -0400 Subject: drm/amd/display: Split out common HUBP registers and code There are shared regs and code across DCN generations. Pull them out into a shared common location. Also, expose some dcn20 init functions. Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 3 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 50 ++++++++++++++-------- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 23 +++++++--- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h | 23 ++++++---- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 43 ++++++++++++++++++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h | 14 +++--- 6 files changed, 109 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index cb0a037b1c4a..3a49f1ffb5dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -696,7 +696,8 @@ struct dce_hwseq_registers { type D2VGA_MODE_ENABLE; \ type D3VGA_MODE_ENABLE; \ type D4VGA_MODE_ENABLE; \ - type AZALIA_AUDIO_DTO_MODULE; + type AZALIA_AUDIO_DTO_MODULE;\ + type HPO_HDMISTREAMCLK_GATE_DIS; struct dce_hwseq_shift { HWSEQ_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 934bacc0c6ad..a16128814d62 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -843,7 +843,7 @@ void min_set_viewport( PRI_VIEWPORT_Y_START_C, viewport_c->y); } -void hubp1_read_state(struct hubp *hubp) +void hubp1_read_state_common(struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); struct dcn_hubp_state *s = &hubp1->state; @@ -859,24 +859,6 @@ void hubp1_read_state(struct hubp *hubp) PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode, MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode, CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode); - REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, - CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size, - MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size, - META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size, - MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size, - DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size, - MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size, - SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height, - PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear); - REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, - CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size, - MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size, - META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size, - MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size, - DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size, - MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size, - SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, - PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); /* DLG - Per hubp */ REG_GET_2(BLANK_OFFSET_0, @@ -1030,8 +1012,38 @@ void hubp1_read_state(struct hubp *hubp) REG_GET_2(DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, &s->qos_level_low_wm, QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm); + } +void hubp1_read_state(struct hubp *hubp) +{ + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + struct dcn_hubp_state *s = &hubp1->state; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + hubp1_read_state_common(hubp); + + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear); + + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + +} enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch) { enum cursor_pitch hw_pitch; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 31c8fdd3206c..8f4bcdc74116 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -249,7 +249,8 @@ .field_name = reg_name ## __ ## field_name ## post_fix /* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */ -#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\ +/*1.x, 2.x, and 3.x*/ +#define HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh)\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ @@ -265,7 +266,6 @@ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\ - HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\ HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\ @@ -372,12 +372,17 @@ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) - -#define HUBP_MASK_SH_LIST_DCN(mask_sh)\ - HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\ +/*2.x and 1.x only*/ +#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\ + HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh),\ + HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh) +/*2.x and 1.x only*/ +#define HUBP_MASK_SH_LIST_DCN(mask_sh)\ + HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh) + /* Mask/shift struct generation macro for ASICs with VM */ #define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ @@ -434,7 +439,7 @@ HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \ HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh) -#define DCN_HUBP_REG_FIELD_LIST(type) \ +#define DCN_HUBP_REG_FIELD_BASE_LIST(type) \ type HUBP_BLANK_EN;\ type HUBP_DISABLE;\ type HUBP_TTU_DISABLE;\ @@ -459,7 +464,6 @@ type ROTATION_ANGLE;\ type H_MIRROR_EN;\ type SURFACE_PIXEL_FORMAT;\ - type ALPHA_PLANE_EN;\ type SURFACE_FLIP_TYPE;\ type SURFACE_FLIP_MODE_FOR_STEREOSYNC;\ type SURFACE_FLIP_IN_STEREOSYNC;\ @@ -632,6 +636,10 @@ type CURSOR_DST_X_OFFSET; \ type OUTPUT_FP +#define DCN_HUBP_REG_FIELD_LIST(type) \ + DCN_HUBP_REG_FIELD_BASE_LIST(type);\ + type ALPHA_PLANE_EN + struct dcn_mi_registers { HUBP_COMMON_REG_VARIABLE_LIST; }; @@ -760,5 +768,6 @@ void hubp1_vready_workaround(struct hubp *hubp, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest); void hubp1_init(struct hubp *hubp); +void hubp1_read_state_common(struct hubp *hubp); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index d5acc348be22..2c6405a62fc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -72,8 +72,8 @@ SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),\ SR(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB) -#define HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh)\ - HUBP_MASK_SH_LIST_DCN(mask_sh),\ +#define HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh)\ + HUBP_MASK_SH_LIST_DCN_SHARE_COMMON(mask_sh),\ HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\ HUBP_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\ @@ -127,13 +127,21 @@ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\ HUBP_SF(HUBPREQ0_VMID_SETTINGS_0, VMID, mask_sh) +/*DCN2.x and DCN1.x*/ +#define HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh)\ + HUBP_MASK_SH_LIST_DCN2_SHARE_COMMON(mask_sh),\ + HUBP_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh) + +/*DCN2.0 specific*/ #define HUBP_MASK_SH_LIST_DCN20(mask_sh)\ HUBP_MASK_SH_LIST_DCN2_COMMON(mask_sh),\ HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\ HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\ HUBP_SF(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh) - +/*DCN2.x */ #define DCN2_HUBP_REG_COMMON_VARIABLE_LIST \ HUBP_COMMON_REG_VARIABLE_LIST; \ uint32_t DMDATA_ADDRESS_HIGH; \ @@ -149,14 +157,11 @@ uint32_t FLIP_PARAMETERS_2;\ uint32_t DCN_CUR1_TTU_CNTL0;\ uint32_t DCN_CUR1_TTU_CNTL1;\ - uint32_t VMID_SETTINGS_0;\ - uint32_t FLIP_PARAMETERS_3;\ - uint32_t FLIP_PARAMETERS_4;\ - uint32_t VBLANK_PARAMETERS_5;\ - uint32_t VBLANK_PARAMETERS_6 + uint32_t VMID_SETTINGS_0 + #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ - DCN_HUBP_REG_FIELD_LIST(type); \ + DCN_HUBP_REG_FIELD_BASE_LIST(type); \ type DMDATA_ADDRESS_HIGH;\ type DMDATA_MODE;\ type DMDATA_UPDATED;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 710727e5d0f8..9fa11c001f1b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -115,7 +115,7 @@ static void enable_power_gating_plane( REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on); } -static void dcn20_dccg_init(struct dce_hwseq *hws) +void dcn20_dccg_init(struct dce_hwseq *hws) { /* * set MICROSECOND_TIME_BASE_DIV @@ -138,6 +138,45 @@ static void dcn20_dccg_init(struct dce_hwseq *hws) /* This value is dependent on the hardware pipeline delay so set once per SOC */ REG_WRITE(DISPCLK_FREQ_CHANGE_CNTL, 0x801003c); } +void dcn20_display_init(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + + /* RBBMIF + * disable RBBMIF timeout detection for all clients + * Ensure RBBMIF does not drop register accesses due to the per-client timeout + */ + REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF); + REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF); + + /* DCCG */ + dcn20_dccg_init(hws); + + /* Disable all memory low power mode. All memories are enabled. */ + REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, 1); + + /* DCHUB/MMHUBBUB + * set global timer refclk divider + * 100Mhz refclk -> 2 + * 27Mhz refclk -> 1 + * 48Mhz refclk -> 1 + */ + REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2); + REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); + REG_WRITE(REFCLK_CNTL, 0); + + /* OPTC + * OTG_CONTROL.OTG_DISABLE_POINT_CNTL = 0x3; will be set during optc2_enable_crtc + */ + + /* AZ + * default value is 0x64 for 100Mhz ref clock, if the ref clock is 100Mhz, no need to program this regiser, + * if not, it should be programmed according to the ref clock + */ + REG_UPDATE(AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, 0x64); + /* Enable controller clock gating */ + REG_WRITE(AZALIA_CONTROLLER_CLOCK_GATING, 0x1); +} static void disable_vga( struct dce_hwseq *hws) @@ -163,7 +202,7 @@ void dcn20_program_tripleBuffer( } /* Blank pixel data during initialization */ -static void dcn20_init_blank( +void dcn20_init_blank( struct dc *dc, struct timing_generator *tg) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 2b0409454073..689c2765b071 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -91,13 +91,9 @@ void dcn20_pipe_control_lock_global( void dcn20_setup_gsl_group_as_lock(const struct dc *dc, struct pipe_ctx *pipe_ctx, bool enable); -void dcn20_pipe_control_lock( - struct dc *dc, - struct pipe_ctx *pipe, - bool lock); -void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); -void dcn20_enable_plane( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context); +void dcn20_dccg_init(struct dce_hwseq *hws); +void dcn20_init_blank( + struct dc *dc, + struct timing_generator *tg); +void dcn20_display_init(struct dc *dc); #endif /* __DC_HWSS_DCN20_H__ */ -- cgit v1.2.3 From 0b6cbbd5da8f3ab99442bdc0df6b6bb619f87d5a Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 10 Jun 2019 08:49:36 -0400 Subject: drm/amd/display: 3.2.38 Signed-off-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c0ebb77fab70..a307a3a1cde4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.37" +#define DC_VER "3.2.38" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3 From 0430017149c53f20493ebeee856315c669d18f4d Mon Sep 17 00:00:00 2001 From: David Galiffi Date: Fri, 7 Jun 2019 21:32:34 -0400 Subject: drm/amd/display: Incorrect Read Interval Time For CR Sequence [WHY] TRAINING_AUX_RD_INTERVAL (DPCD 000Eh) modifies the read interval for the EQ training sequence. CR read interval should remain 100 us. Currently, the CR interval is also being modified. [HOW] lt_settings->cr_pattern_time should always be 100 us. Signed-off-by: David Galiffi Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index fca1bfc901b6..4442e7b1e5b5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1035,7 +1035,7 @@ static void initialize_training_settings( if (link->preferred_training_settings.cr_pattern_time != NULL) lt_settings->cr_pattern_time = *link->preferred_training_settings.cr_pattern_time; else - lt_settings->cr_pattern_time = get_training_aux_rd_interval(link, 100); + lt_settings->cr_pattern_time = 100; if (link->preferred_training_settings.eq_pattern_time != NULL) lt_settings->eq_pattern_time = *link->preferred_training_settings.eq_pattern_time; -- cgit v1.2.3 From 08d238e68989b2389afb7d0cb489386ffc43e13c Mon Sep 17 00:00:00 2001 From: Murton Liu Date: Mon, 10 Jun 2019 17:55:28 -0400 Subject: drm/amd/display: Clock does not lower in Updateplanes [why] We reset the optimized_required in atomic_plane_disable flag immediately after it is set in atomic_plane_disconnect, causing us to never have flag set during next flip in UpdatePlanes. [how] Optimize directly after each time plane is removed. Signed-off-by: Murton Liu Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index e50a696fcb5d..0c4340404e24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2516,6 +2516,12 @@ static void dcn10_apply_ctx_for_surface( if (removed_pipe[i]) dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (removed_pipe[i]) { + dc->hwss.optimize_bandwidth(dc, context); + break; + } + if (dc->hwseq->wa.DEGVIDCN10_254) hubbub1_wm_change_req_wa(dc->res_pool->hubbub); } -- cgit v1.2.3 From 7cecfe9d1976f6da82ba2836918f13526c1cf3d1 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 5 Jun 2019 15:02:04 -0400 Subject: drm/amd/display: Copy max_clks_by_state after dce_clk_mgr_construct [Why] For DCE110, DCE112 and DCE120 the max_clks_by_state for the clk_mgr are copied from their respective table before the call to dce_clk_mgr_construct, but then dce_clk_mgr_construct overwrites these with the dce80_max_clks_by_state. [How] Copy these after we call dce_clk_mgr_construct so we're using the right tables. Signed-off-by: Nicholas Kazlauskas Reviewed-by: David Francis Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c | 4 ++-- drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c | 4 ++-- drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index c1a92c16535c..5cc3acccda2a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -262,12 +262,12 @@ void dce110_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce110_max_clks_by_state, sizeof(dce110_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 778392c73187..7c746ef1e32e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -226,12 +226,12 @@ void dce112_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce112_max_clks_by_state, sizeof(dce112_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->regs = &disp_clk_regs; clk_mgr->clk_mgr_shift = &disp_clk_shift; clk_mgr->clk_mgr_mask = &disp_clk_mask; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c index 906310c3e2eb..5399b8cf6b75 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c @@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = { void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr) { + dce_clk_mgr_construct(ctx, clk_mgr); + memcpy(clk_mgr->max_clks_by_state, dce120_max_clks_by_state, sizeof(dce120_max_clks_by_state)); - dce_clk_mgr_construct(ctx, clk_mgr); - clk_mgr->base.dprefclk_khz = 600000; clk_mgr->base.funcs = &dce120_funcs; } -- cgit v1.2.3 From 40fd9090aec688be730d54a00fd2fdcb37e16701 Mon Sep 17 00:00:00 2001 From: Nevenko Stupar Date: Tue, 11 Jun 2019 17:35:16 -0400 Subject: drm/amd/display:Use Pixel clock in 100Hz units for HDMI Audio wall clock DTO [Why] -Pass and use pixel clock in 100 Hz to Audio for HDMI audio DTO for Audio wall clock programming so audio DTO gets increased precision for timings with /1001 factor. -For HDMI TMDS for N and CTS ACR tables are based on 10 KHz units, these does not need to be modified as N and CTS values are still valid using current tables. Signed-off-by: Nevenko Stupar Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_audio.c | 28 ++++++++++---------- .../drm/amd/display/dc/dce/dce_stream_encoder.c | 30 +++++++++++----------- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 22 ++++++++-------- .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 30 +++++++++++----------- .../amd/display/dc/dcn10/dcn10_stream_encoder.h | 4 +-- drivers/gpu/drm/amd/display/include/audio_types.h | 4 +-- 6 files changed, 59 insertions(+), 59 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 4a10a5d22c90..6147530144eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -145,20 +145,20 @@ static void check_audio_bandwidth_hdmi( if (channel_count > 2) { /* Based on HDMI spec 1.3 Table 7.5 */ - if ((crtc_info->requested_pixel_clock <= 27000) && + if ((crtc_info->requested_pixel_clock_100Hz <= 270000) && (crtc_info->v_active <= 576) && !(crtc_info->interlaced) && !(crtc_info->pixel_repetition == 2 || crtc_info->pixel_repetition == 4)) { limit_freq_to_48_khz = true; - } else if ((crtc_info->requested_pixel_clock <= 27000) && + } else if ((crtc_info->requested_pixel_clock_100Hz <= 270000) && (crtc_info->v_active <= 576) && (crtc_info->interlaced) && (crtc_info->pixel_repetition == 2)) { limit_freq_to_88_2_khz = true; - } else if ((crtc_info->requested_pixel_clock <= 54000) && + } else if ((crtc_info->requested_pixel_clock_100Hz <= 540000) && (crtc_info->v_active <= 576) && !(crtc_info->interlaced)) { limit_freq_to_174_4_khz = true; @@ -737,8 +737,8 @@ void dce_aud_az_configure( /* search pixel clock value for Azalia HDMI Audio */ static void get_azalia_clock_info_hdmi( - uint32_t crtc_pixel_clock_in_khz, - uint32_t actual_pixel_clock_in_khz, + uint32_t crtc_pixel_clock_100hz, + uint32_t actual_pixel_clock_100Hz, struct azalia_clock_info *azalia_clock_info) { /* audio_dto_phase= 24 * 10,000; @@ -749,11 +749,11 @@ static void get_azalia_clock_info_hdmi( /* audio_dto_module = PCLKFrequency * 10,000; * [khz] -> [100Hz] */ azalia_clock_info->audio_dto_module = - actual_pixel_clock_in_khz * 10; + actual_pixel_clock_100Hz; } static void get_azalia_clock_info_dp( - uint32_t requested_pixel_clock_in_khz, + uint32_t requested_pixel_clock_100Hz, const struct audio_pll_info *pll_info, struct azalia_clock_info *azalia_clock_info) { @@ -792,15 +792,15 @@ void dce_aud_wall_dto_setup( /* calculate DTO settings */ get_azalia_clock_info_hdmi( - crtc_info->requested_pixel_clock, - crtc_info->calculated_pixel_clock, + crtc_info->requested_pixel_clock_100Hz, + crtc_info->calculated_pixel_clock_100Hz, &clock_info); - DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock = %d"\ - "calculated_pixel_clock =%d\n"\ + DC_LOG_HW_AUDIO("\n%s:Input::requested_pixel_clock_100Hz = %d"\ + "calculated_pixel_clock_100Hz =%d\n"\ "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\ - crtc_info->requested_pixel_clock,\ - crtc_info->calculated_pixel_clock,\ + crtc_info->requested_pixel_clock_100Hz,\ + crtc_info->calculated_pixel_clock_100Hz,\ clock_info.audio_dto_module,\ clock_info.audio_dto_phase); @@ -833,7 +833,7 @@ void dce_aud_wall_dto_setup( calculate DTO settings */ get_azalia_clock_info_dp( - crtc_info->requested_pixel_clock, + crtc_info->requested_pixel_clock_100Hz, pll_info, &clock_info); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 5e2b4d47c548..84bbff665be9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -1251,13 +1251,13 @@ static uint32_t calc_max_audio_packets_per_line( static void get_audio_clock_info( enum dc_color_depth color_depth, - uint32_t crtc_pixel_clock_in_khz, - uint32_t actual_pixel_clock_in_khz, + uint32_t crtc_pixel_clock_100Hz, + uint32_t actual_pixel_clock_100Hz, struct audio_clock_info *audio_clock_info) { const struct audio_clock_info *clock_info; uint32_t index; - uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10; + uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_100Hz / 100; uint32_t audio_array_size; switch (color_depth) { @@ -1294,16 +1294,16 @@ static void get_audio_clock_info( } /* not found */ - if (actual_pixel_clock_in_khz == 0) - actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz; + if (actual_pixel_clock_100Hz == 0) + actual_pixel_clock_100Hz = crtc_pixel_clock_100Hz; /* See HDMI spec the table entry under * pixel clock of "Other". */ audio_clock_info->pixel_clock_in_10khz = - actual_pixel_clock_in_khz / 10; - audio_clock_info->cts_32khz = actual_pixel_clock_in_khz; - audio_clock_info->cts_44khz = actual_pixel_clock_in_khz; - audio_clock_info->cts_48khz = actual_pixel_clock_in_khz; + actual_pixel_clock_100Hz / 100; + audio_clock_info->cts_32khz = actual_pixel_clock_100Hz / 10; + audio_clock_info->cts_44khz = actual_pixel_clock_100Hz / 10; + audio_clock_info->cts_48khz = actual_pixel_clock_100Hz / 10; audio_clock_info->n_32khz = 4096; audio_clock_info->n_44khz = 6272; @@ -1369,14 +1369,14 @@ static void dce110_se_setup_hdmi_audio( /* Program audio clock sample/regeneration parameters */ get_audio_clock_info(crtc_info->color_depth, - crtc_info->requested_pixel_clock, - crtc_info->calculated_pixel_clock, + crtc_info->requested_pixel_clock_100Hz, + crtc_info->calculated_pixel_clock_100Hz, &audio_clock_info); DC_LOG_HW_AUDIO( - "\n%s:Input::requested_pixel_clock = %d" \ - "calculated_pixel_clock = %d \n", __func__, \ - crtc_info->requested_pixel_clock, \ - crtc_info->calculated_pixel_clock); + "\n%s:Input::requested_pixel_clock_100Hz = %d" \ + "calculated_pixel_clock_100Hz = %d \n", __func__, \ + crtc_info->requested_pixel_clock_100Hz, \ + crtc_info->calculated_pixel_clock_100Hz); /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 858a58856ebd..3a937e297a89 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1162,27 +1162,27 @@ static void build_audio_output( stream->timing.flags.INTERLACE; audio_output->crtc_info.refresh_rate = - (stream->timing.pix_clk_100hz*10000)/ + (stream->timing.pix_clk_100hz*100)/ (stream->timing.h_total*stream->timing.v_total); audio_output->crtc_info.color_depth = stream->timing.display_color_depth; - audio_output->crtc_info.requested_pixel_clock = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; + audio_output->crtc_info.requested_pixel_clock_100Hz = + pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz; - audio_output->crtc_info.calculated_pixel_clock = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10; + audio_output->crtc_info.calculated_pixel_clock_100Hz = + pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz; /*for HDMI, audio ACR is with deep color ratio factor*/ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && - audio_output->crtc_info.requested_pixel_clock == - (stream->timing.pix_clk_100hz / 10)) { + audio_output->crtc_info.requested_pixel_clock_100Hz == + (stream->timing.pix_clk_100hz)) { if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) { - audio_output->crtc_info.requested_pixel_clock = - audio_output->crtc_info.requested_pixel_clock/2; - audio_output->crtc_info.calculated_pixel_clock = - pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz/20; + audio_output->crtc_info.requested_pixel_clock_100Hz = + audio_output->crtc_info.requested_pixel_clock_100Hz/2; + audio_output->crtc_info.calculated_pixel_clock_100Hz = + pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz/2; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index b9ffbf6b58ff..128e040b7848 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -1196,13 +1196,13 @@ static union audio_cea_channels speakers_to_channels( void get_audio_clock_info( enum dc_color_depth color_depth, - uint32_t crtc_pixel_clock_in_khz, - uint32_t actual_pixel_clock_in_khz, + uint32_t crtc_pixel_clock_100Hz, + uint32_t actual_pixel_clock_100Hz, struct audio_clock_info *audio_clock_info) { const struct audio_clock_info *clock_info; uint32_t index; - uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10; + uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_100Hz / 100; uint32_t audio_array_size; switch (color_depth) { @@ -1239,16 +1239,16 @@ void get_audio_clock_info( } /* not found */ - if (actual_pixel_clock_in_khz == 0) - actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz; + if (actual_pixel_clock_100Hz == 0) + actual_pixel_clock_100Hz = crtc_pixel_clock_100Hz; /* See HDMI spec the table entry under * pixel clock of "Other". */ audio_clock_info->pixel_clock_in_10khz = - actual_pixel_clock_in_khz / 10; - audio_clock_info->cts_32khz = actual_pixel_clock_in_khz; - audio_clock_info->cts_44khz = actual_pixel_clock_in_khz; - audio_clock_info->cts_48khz = actual_pixel_clock_in_khz; + actual_pixel_clock_100Hz / 100; + audio_clock_info->cts_32khz = actual_pixel_clock_100Hz / 10; + audio_clock_info->cts_44khz = actual_pixel_clock_100Hz / 10; + audio_clock_info->cts_48khz = actual_pixel_clock_100Hz / 10; audio_clock_info->n_32khz = 4096; audio_clock_info->n_44khz = 6272; @@ -1308,14 +1308,14 @@ static void enc1_se_setup_hdmi_audio( /* Program audio clock sample/regeneration parameters */ get_audio_clock_info(crtc_info->color_depth, - crtc_info->requested_pixel_clock, - crtc_info->calculated_pixel_clock, + crtc_info->requested_pixel_clock_100Hz, + crtc_info->calculated_pixel_clock_100Hz, &audio_clock_info); DC_LOG_HW_AUDIO( - "\n%s:Input::requested_pixel_clock = %d" \ - "calculated_pixel_clock = %d \n", __func__, \ - crtc_info->requested_pixel_clock, \ - crtc_info->calculated_pixel_clock); + "\n%s:Input::requested_pixel_clock_100Hz = %d" \ + "calculated_pixel_clock_100Hz = %d \n", __func__, \ + crtc_info->requested_pixel_clock_100Hz, \ + crtc_info->calculated_pixel_clock_100Hz); /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index bc2b4af9543b..075e49c1283a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -605,8 +605,8 @@ void enc1_se_enable_dp_audio( void get_audio_clock_info( enum dc_color_depth color_depth, - uint32_t crtc_pixel_clock_in_khz, - uint32_t actual_pixel_clock_in_khz, + uint32_t crtc_pixel_clock_100Hz, + uint32_t actual_pixel_clock_100Hz, struct audio_clock_info *audio_clock_info); #endif /* __DC_STREAM_ENCODER_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h index 6364fbc24cfe..66a54da0641c 100644 --- a/drivers/gpu/drm/amd/display/include/audio_types.h +++ b/drivers/gpu/drm/amd/display/include/audio_types.h @@ -38,8 +38,8 @@ struct audio_crtc_info { uint32_t h_active; uint32_t v_active; uint32_t pixel_repetition; - uint32_t requested_pixel_clock; /* in KHz */ - uint32_t calculated_pixel_clock; /* in KHz */ + uint32_t requested_pixel_clock_100Hz; /* in 100Hz */ + uint32_t calculated_pixel_clock_100Hz; /* in 100Hz */ uint32_t refresh_rate; enum dc_color_depth color_depth; bool interlaced; -- cgit v1.2.3 From 29344d152d0a7a54945606d9065c5de79161f100 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 11 Jun 2019 18:18:36 -0400 Subject: drm/amd/display: wait for the whole frame after global unlock [why] The current code will not wait for the entire frame after global unlock. This causes dsc dynamic target bpp update corruption when there is a surface update immediately happens after this. [how] Wait for the entire whole frame after unlock before continuing the rest of stream and surface update. Signed-off-by: Wenjing Liu Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 9fa11c001f1b..5fc4e0954eee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1281,6 +1281,8 @@ void dcn20_pipe_control_lock_global( CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); + pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, + CRTC_STATE_VACTIVE); pipe->stream_res.tg->funcs->lock_doublebuffer_disable( pipe->stream_res.tg); } -- cgit v1.2.3 From c1f2e0154065963f95ce22e03ff05a420cb3aac7 Mon Sep 17 00:00:00 2001 From: Su Sung Chung Date: Wed, 22 May 2019 14:28:52 -0400 Subject: drm/amd/display: refactor dump_clk_registers [why] for 2 purposes: 1. get raw register value dumped on the log, which will make it easier to talk to other team who only knows about the register 2. enable other HW to be able to use the same interface as raven to log clock register data Signed-off-by: Su Sung Chung Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c | 2 +- drivers/gpu/drm/amd/display/dc/dm_services.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 6e47444109d7..7f4766e45dff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -45,7 +45,7 @@ #include "dcn10_cm_common.h" #include "clk_mgr.h" -static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) +unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...) { unsigned int ret_vsnprintf; unsigned int chars_printed; diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index b426ba02b793..1a0429744630 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -151,6 +151,7 @@ void generic_reg_wait(const struct dc_context *ctx, unsigned int delay_between_poll_us, unsigned int time_out_num_tries, const char *func_name, int line); +unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...); /* These macros need to be used with soc15 registers in order to retrieve * the actual offset. -- cgit v1.2.3 From 606b355170b56549890e8202a2b62f97d28b395e Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 5 Jun 2019 16:35:08 -0400 Subject: drm/amd/display: add hdmi2.1 dsc pps packet programming This change adds EMP packet programming for enabling dsc with hdmi. The packets are structured according to VESA HDMI 2.1x r2 spec, section 10.10.2.2. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 14 +++++++------- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 13 ++++++------- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 3 ++- drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 1 + 4 files changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index caa707567b4f..07533cc377fd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2771,10 +2771,10 @@ void core_link_enable_stream( allocate_mst_payload(pipe_ctx); #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - if (pipe_ctx->stream->timing.flags.DSC && - (dc_is_dp_signal(pipe_ctx->stream->signal) || - dc_is_virtual_signal(pipe_ctx->stream->signal))) { - dp_set_dsc_enable(pipe_ctx, true); + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + dp_set_dsc_enable(pipe_ctx, true); pipe_ctx->stream_res.tg->funcs->wait_for_state( pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); @@ -2835,9 +2835,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - if (pipe_ctx->stream->timing.flags.DSC && - dc_is_dp_signal(pipe_ctx->stream->signal)) { - dp_set_dsc_enable(pipe_ctx, false); + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_set_dsc_enable(pipe_ctx, false); } #endif } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 211fadefe2f5..46257f0fcbe7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -396,7 +396,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) /* This has to be done after DSC was enabled on RX first, i.e. after dp_enable_dsc_on_rx() had been called */ -static void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) +void set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; struct dc *core_dc = pipe_ctx->stream->ctx->dc; @@ -435,7 +435,7 @@ static void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_optc_config_log(dsc, &dsc_optc_cfg); /* Enable DSC in encoder */ - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config) + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, optc_dsc_mode, dsc_optc_cfg.bytes_per_pixel, @@ -454,11 +454,10 @@ static void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) OPTC_DSC_DISABLED, 0, 0); /* disable DSC in stream encoder */ - if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0, NULL); - } /* disable DSC block */ pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); @@ -479,12 +478,12 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) if (enable) { if (dp_set_dsc_on_rx(pipe_ctx, true)) { - dp_set_dsc_on_stream(pipe_ctx, true); + set_dsc_on_stream(pipe_ctx, true); result = true; } } else { dp_set_dsc_on_rx(pipe_ctx, false); - dp_set_dsc_on_stream(pipe_ctx, false); + set_dsc_on_stream(pipe_ctx, false); result = true; } out: @@ -500,7 +499,7 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) if (!dsc) return false; - dp_set_dsc_on_stream(pipe_ctx, true); + set_dsc_on_stream(pipe_ctx, true); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 5fc4e0954eee..ddf15a3715e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1783,8 +1783,9 @@ static void dcn20_reset_back_end_for_pipe( } } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - else if (pipe_ctx->stream_res.dsc) + else if (pipe_ctx->stream_res.dsc) { dp_set_dsc_enable(pipe_ctx, false); + } #endif /* by upper caller loop, parent pipe: pipe0, will be reset last. diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 2d95eff94239..c5293f9508fa 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -66,6 +66,7 @@ void dp_enable_mst_on_sink(struct dc_link *link, bool enable); void dp_set_fec_ready(struct dc_link *link, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); +void set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx); #endif -- cgit v1.2.3 From 88eac241a1fc500ce5274a09ddc4bd5fc2b5adb6 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Wed, 12 Jun 2019 16:30:15 -0400 Subject: drm/amd/display: add monitor patch to add T7 delay [Why] Specifically to one panel, TCON is able to accept active video signal quickly, but the Source Driver requires 2-3 frames of extra time. It is a Panel issue since TCON needs to take care of all Sink requirements including Source Driver. But in this case it does not. Customer is asking to add fixed T7 delay as panel workaround. [How] Add monitor specific patch to add T7 delay Signed-off-by: Anthony Koo Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 4 ++++ drivers/gpu/drm/amd/display/dc/dc_types.h | 1 + 2 files changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 46257f0fcbe7..878f47b59d5a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -162,6 +162,10 @@ bool edp_receiver_ready_T7(struct dc_link *link) break; udelay(25); //MAx T7 is 50ms } while (++tries < 300); + + if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0) + udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000); + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 6eabb6491a3d..ce6d73d21cca 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -202,6 +202,7 @@ struct dc_panel_patch { unsigned int dppowerup_delay; unsigned int extra_t12_ms; unsigned int extra_delay_backlight_off; + unsigned int extra_t7_ms; }; struct dc_edid_caps { -- cgit v1.2.3 From 24253476977af5456af962bbaaf53e9f1dae5cc1 Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Thu, 13 Jun 2019 12:49:37 -0400 Subject: drm/amd/display: Poll for GPUVM context ready (v2) [Why] Hardware docs state that we must wait until the GPUVM context is ready after programming it. [How] Poll until the valid bit of PAGE_TABLE_BASE_ADDR_LO32 is set to 1 after programming it. v2: fix include for udelay (Alex) Signed-off-by: Julian Parkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c | 37 +++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c index 27679ef6ebe8..96c263223315 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c @@ -23,6 +23,8 @@ * */ +#include + #include "dcn20_vmid.h" #include "reg_helper.h" @@ -36,6 +38,38 @@ #define FN(reg_name, field_name) \ vmid->shifts->field_name, vmid->masks->field_name +static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid) +{ + /* According the hardware spec, we need to poll for the lowest + * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM + * context is updated. We can't use REG_WAIT here since we + * don't have a seperate field to wait on. + * + * TODO: Confirm timeout / poll interval with hardware team + */ + + int max_times = 10000; + int delay_us = 5; + int i; + + for (i = 0; i < max_times; ++i) { + uint32_t entry_lo32; + + REG_GET(PAGE_TABLE_BASE_ADDR_LO32, + VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, + &entry_lo32); + + if (entry_lo32 & 0x1) + return; + + udelay(delay_us); + } + + /* VM setup timed out */ + DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n"); + ASSERT(0); +} + void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config) { REG_SET(PAGE_TABLE_START_ADDR_HI32, 0, @@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_ REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF); + /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */ REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0, VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF); + + dcn20_wait_for_vmid_ready(vmid); } -- cgit v1.2.3 From 14b2584636c66efbb9a8dd1c702d5da73ecf620d Mon Sep 17 00:00:00 2001 From: Dingchen Zhang Date: Wed, 15 May 2019 17:15:05 -0400 Subject: drm/amd/display: add functionality to grab DPRX CRC entries. [Why] We need to compare DPRX CRCs with framebuffer CRCs for digital bypass mode. [How] Hook into DRM to grab DP receiver CRCs through drm_dp_start_crc. Signed-off-by: Dingchen Zhang Reviewed-by: Harry Wentland Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 17 ++-- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 16 +--- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 91 ++++++++++++++++------ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h | 56 +++++++++++++ 4 files changed, 136 insertions(+), 44 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e1871ad3c840..d0d52c38bba3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3665,7 +3665,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->abm_level = cur->abm_level; state->vrr_supported = cur->vrr_supported; state->freesync_config = cur->freesync_config; - state->crc_enabled = cur->crc_enabled; + state->crc_src = cur->crc_src; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; @@ -5975,6 +5975,7 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; int i; + enum amdgpu_dm_pipe_crc_source source; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -6000,9 +6001,13 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev, #ifdef CONFIG_DEBUG_FS /* The stream has changed so CRC capture needs to re-enabled. */ - if (dm_new_crtc_state->crc_enabled) { - dm_new_crtc_state->crc_enabled = false; - amdgpu_dm_crtc_set_crc_source(crtc, "auto"); + source = dm_new_crtc_state->crc_src; + if (amdgpu_dm_is_valid_crc_source(source)) { + dm_new_crtc_state->crc_src = AMDGPU_DM_PIPE_CRC_SOURCE_NONE; + if (source == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) + amdgpu_dm_crtc_set_crc_source(crtc, "crtc"); + else if (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) + amdgpu_dm_crtc_set_crc_source(crtc, "dprx"); } #endif } @@ -6058,7 +6063,7 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, * Drop the extra vblank reference added by CRC * capture if applicable. */ - if (dm_new_crtc_state->crc_enabled) + if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) drm_crtc_vblank_put(crtc); /* @@ -6066,7 +6071,7 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, * still a stream for the CRTC. */ if (!dm_new_crtc_state->stream) - dm_new_crtc_state->crc_enabled = false; + dm_new_crtc_state->crc_src = AMDGPU_DM_PIPE_CRC_SOURCE_NONE; manage_dm_interrupts(adev, acrtc, false); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b89cbbfcc0e9..35bee77def3b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -50,6 +50,7 @@ #include "irq_types.h" #include "signal_types.h" +#include "amdgpu_dm_crc.h" /* Forward declarations */ struct amdgpu_device; @@ -313,7 +314,7 @@ struct dm_crtc_state { bool interrupts_enabled; int crc_skip_count; - bool crc_enabled; + enum amdgpu_dm_pipe_crc_source crc_src; bool freesync_timing_changed; bool freesync_vrr_info_changed; @@ -380,19 +381,6 @@ void dm_restore_drm_connector_state(struct drm_device *dev, void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct edid *edid); -/* amdgpu_dm_crc.c */ -#ifdef CONFIG_DEBUG_FS -int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name); -int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, - const char *src_name, - size_t *values_cnt); -void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); -#else -#define amdgpu_dm_crtc_set_crc_source NULL -#define amdgpu_dm_crtc_verify_crc_source NULL -#define amdgpu_dm_crtc_handle_crc_irq(x) -#endif - #define MAX_COLOR_LUT_ENTRIES 4096 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */ #define MAX_COLOR_LEGACY_LUT_ENTRIES 256 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index bc67e6502733..9af270161a0e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -30,19 +30,14 @@ #include "amdgpu_dm.h" #include "dc.h" -enum amdgpu_dm_pipe_crc_source { - AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0, - AMDGPU_DM_PIPE_CRC_SOURCE_AUTO, - AMDGPU_DM_PIPE_CRC_SOURCE_MAX, - AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, -}; - static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) { if (!source || !strcmp(source, "none")) return AMDGPU_DM_PIPE_CRC_SOURCE_NONE; - if (!strcmp(source, "auto")) - return AMDGPU_DM_PIPE_CRC_SOURCE_AUTO; + if (!strcmp(source, "auto") || !strcmp(source, "crtc")) + return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC; + if (!strcmp(source, "dprx")) + return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX; return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID; } @@ -68,7 +63,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) struct amdgpu_device *adev = crtc->dev->dev_private; struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state); struct dc_stream_state *stream_state = crtc_state->stream; - bool enable; + struct amdgpu_dm_connector *aconn; + struct drm_dp_aux *aux = NULL; + bool enable = false; + bool enabled = false; enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); @@ -83,13 +81,42 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) return -EINVAL; } - enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO); + enable = amdgpu_dm_is_valid_crc_source(source); mutex_lock(&adev->dm.dc_lock); - if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, - enable, enable)) { - mutex_unlock(&adev->dm.dc_lock); - return -EINVAL; + /* + * USER REQ SRC | CURRENT SRC | BEHAVIOR + * ----------------------------- + * None | None | Do nothing + * None | CRTC | Disable CRTC CRC + * None | DPRX | Disable DPRX CRC, need 'aux' + * CRTC | XXXX | Enable CRTC CRC, configure DC strm + * DPRX | XXXX | Enable DPRX CRC, need 'aux' + */ + if (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX || + (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && + crtc_state->crc_src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX)) { + aconn = stream_state->link->priv; + + if (!aconn) { + DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index); + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; + } + + aux = &aconn->dm_dp_aux.aux; + + if (!aux) { + DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; + } + } else if (source == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) { + if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, + enable, enable)) { + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; + } } /* When enabling CRC, we should also disable dithering. */ @@ -103,12 +130,26 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) * Reading the CRC requires the vblank interrupt handler to be * enabled. Keep a reference until CRC capture stops. */ - if (!crtc_state->crc_enabled && enable) + enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src); + if (!enabled && enable) { drm_crtc_vblank_get(crtc); - else if (crtc_state->crc_enabled && !enable) + if (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) { + if (drm_dp_start_crc(aux, crtc)) { + DRM_DEBUG_DRIVER("dp start crc failed\n"); + return -EINVAL; + } + } + } else if (enabled && !enable) { drm_crtc_vblank_put(crtc); + if (crtc_state->crc_src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) { + if (drm_dp_stop_crc(aux)) { + DRM_DEBUG_DRIVER("dp stop crc failed\n"); + return -EINVAL; + } + } + } - crtc_state->crc_enabled = enable; + crtc_state->crc_src = source; /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; @@ -135,7 +176,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) stream_state = crtc_state->stream; /* Early return if CRC capture is not enabled. */ - if (!crtc_state->crc_enabled) + if (!amdgpu_dm_is_valid_crc_source(crtc_state->crc_src)) return; /* @@ -149,10 +190,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) return; } - if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, - &crcs[0], &crcs[1], &crcs[2])) - return; + if (crtc_state->crc_src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) { + if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, + &crcs[0], &crcs[1], &crcs[2])) + return; - drm_crtc_add_crc_entry(crtc, true, - drm_crtc_accurate_vblank_count(crtc), crcs); + drm_crtc_add_crc_entry(crtc, true, + drm_crtc_accurate_vblank_count(crtc), crcs); + } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h new file mode 100644 index 000000000000..3793dc872436 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -0,0 +1,56 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ +#define AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ + +enum amdgpu_dm_pipe_crc_source { + AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0, + AMDGPU_DM_PIPE_CRC_SOURCE_CRTC, + AMDGPU_DM_PIPE_CRC_SOURCE_DPRX, + AMDGPU_DM_PIPE_CRC_SOURCE_MAX, + AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, +}; + +static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source) +{ + return (source == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) || + (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX); +} + +/* amdgpu_dm_crc.c */ +#ifdef CONFIG_DEBUG_FS +int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name); +int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, + size_t *values_cnt); +void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); +#else +#define amdgpu_dm_crtc_set_crc_source NULL +#define amdgpu_dm_crtc_verify_crc_source NULL +#define amdgpu_dm_crtc_handle_crc_irq(x) +#endif + +#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ -- cgit v1.2.3 From 8fb843d179a6fff1bfe037601b06980ba9a56167 Mon Sep 17 00:00:00 2001 From: Dingchen Zhang Date: Wed, 29 May 2019 18:52:52 -0400 Subject: drm/amd/display: add functionality to get pipe CRC source. [Why] We need to check the pipe crc source through debugfs for bypass mode test. [How] add implementation of amdgpu_dm_crtc_get_crc_sources and hook into drm_crtc callback get_crc_sources. Signed-off-by: Dingchen Zhang Reviewed-by: Nicholas Kazlauskas Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 14 ++++++++++++++ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h | 3 +++ 3 files changed, 18 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d0d52c38bba3..13463fa31c3f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3735,6 +3735,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .atomic_destroy_state = dm_crtc_destroy_state, .set_crc_source = amdgpu_dm_crtc_set_crc_source, .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, + .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, .enable_vblank = dm_enable_vblank, .disable_vblank = dm_disable_vblank, }; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 9af270161a0e..b966e1410484 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -30,6 +30,13 @@ #include "amdgpu_dm.h" #include "dc.h" +static const char *const pipe_crc_sources[] = { + "none", + "crtc", + "dprx", + "auto", +}; + static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) { if (!source || !strcmp(source, "none")) @@ -42,6 +49,13 @@ static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID; } +const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, + size_t *count) +{ + *count = ARRAY_SIZE(pipe_crc_sources); + return pipe_crc_sources; +} + int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, size_t *values_cnt) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index 3793dc872436..b63a9011f511 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -46,10 +46,13 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name); int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, size_t *values_cnt); +const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, + size_t *count); void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); #else #define amdgpu_dm_crtc_set_crc_source NULL #define amdgpu_dm_crtc_verify_crc_source NULL +#define amdgpu_dm_crtc_get_crc_sources NULL #define amdgpu_dm_crtc_handle_crc_irq(x) #endif -- cgit v1.2.3 From d35d77a6a512f38a89aee9328c67d0c913c7fbea Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Fri, 14 Jun 2019 15:04:00 -0400 Subject: drm/amd/display: Wait for backlight programming completion in set backlight level [WHY] Currently we don't wait for blacklight programming completion in DMCU when setting backlight level. Some sequences such as PSR static screen event trigger reprogramming requires it to be complete. [How] Add generic wait for dmcu command completion in set backlight level. Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index f8903bcabe49..58bd131d5b48 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -239,6 +239,10 @@ static void dmcu_set_backlight_level( s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); REG_WRITE(BIOS_SCRATCH_2, s2); + + /* waitDMCUReadyForCmd */ + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, + 0, 1, 80000); } static void dce_abm_init(struct abm *abm) -- cgit v1.2.3 From 6d58f99444e29d68ff6d5d52ff42c77692fe306f Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 17 Jun 2019 10:58:57 -0400 Subject: drm/amd/display: 3.2.39 Signed-off-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a307a3a1cde4..0aad22b1ef36 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.38" +#define DC_VER "3.2.39" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3 From 4fb6ec466dd50fdd94a1288b27b692da67b6c23d Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Wed, 29 May 2019 18:52:17 -0400 Subject: drm/amd/display: Expose enc2_set_dynamic_metadata [Why] Need to implement register programming for HDR dynamic metadata transmission and tests. Signed-off-by: Ilya Bakoulin Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index f5bcffc426b8..5c2b7b54b126 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -373,7 +373,7 @@ static void enc2_read_state(struct stream_encoder *enc, struct enc_state *s) * * Ensure the OTG master update lock is set when changing DME configuration. */ -static void enc2_set_dynamic_metadata(struct stream_encoder *enc, +void enc2_set_dynamic_metadata(struct stream_encoder *enc, bool enable_dme, uint32_t hubp_requestor_id, enum dynamic_metadata_mode dmdata_mode) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index 6d40e8c9b78f..3f94a9f13c4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -104,4 +104,9 @@ void enc2_stream_encoder_dp_unblank( struct stream_encoder *enc, const struct encoder_unblank_param *param); +void enc2_set_dynamic_metadata(struct stream_encoder *enc, + bool enable_dme, + uint32_t hubp_requestor_id, + enum dynamic_metadata_mode dmdata_mode); + #endif /* __DC_STREAM_ENCODER_DCN20_H__ */ -- cgit v1.2.3 From 24f1d1cee2bc9cb591145e21823d7865208a7991 Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Wed, 29 May 2019 18:52:17 -0400 Subject: drm/amd/display: Check for valid stream_encode Before accessing it's vtable, check that stream_encoder is non-null. Signed-off-by: Ilya Bakoulin Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index af7f8be230f7..352862370390 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; - if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { + if (pipe_ctx->stream_res.stream_enc && + pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { if (pipe_ctx->stream->dmdata_address.quad_part != 0) { /* if using dynamic meta, don't set up generic infopackets */ pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; -- cgit v1.2.3 From eced4bceed22114697a17295a768cebea1dbc2b0 Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Wed, 12 Jun 2019 12:40:42 -0400 Subject: drm/amd/display: Fix some HUBP programming issues [Why] A hubp pointer was being passed to DCN1 functions, which expect the enclosing structure (for the purpose of container_of macros) to be dcn10_hubp, but the actual type was dcn20_hubp. [How] Copy existing DCN1 functions and alter them slightly for use with dcn20_hubp. Signed-off-by: Ilya Bakoulin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 682 +++++++++++++++++++++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h | 37 ++ 2 files changed, 705 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index d3f7dd374d50..02e8c0c6a233 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -156,7 +156,85 @@ void hubp2_program_deadline( { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - hubp1_program_deadline(hubp, dlg_attr, ttu_attr); + /* DLG - Per hubp */ + REG_SET_2(BLANK_OFFSET_0, 0, + REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end, + DLG_V_BLANK_END, dlg_attr->dlg_vblank_end); + + REG_SET(BLANK_OFFSET_1, 0, + MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start); + + REG_SET(DST_DIMENSIONS, 0, + REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal); + + REG_SET_2(DST_AFTER_SCALER, 0, + REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler); + + REG_SET(REF_FREQ_TO_PIX_FREQ, 0, + REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_SET(VBLANK_PARAMETERS_1, 0, + REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l); + + if (REG(NOM_PARAMETERS_0)) + REG_SET(NOM_PARAMETERS_0, 0, + DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l); + + if (REG(NOM_PARAMETERS_1)) + REG_SET(NOM_PARAMETERS_1, 0, + REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l); + + REG_SET(NOM_PARAMETERS_4, 0, + DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l); + + REG_SET(NOM_PARAMETERS_5, 0, + REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l); + + REG_SET_2(PER_LINE_DELIVERY, 0, + REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c); + + REG_SET(VBLANK_PARAMETERS_2, 0, + REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c); + + if (REG(NOM_PARAMETERS_2)) + REG_SET(NOM_PARAMETERS_2, 0, + DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c); + + if (REG(NOM_PARAMETERS_3)) + REG_SET(NOM_PARAMETERS_3, 0, + REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c); + + REG_SET(NOM_PARAMETERS_6, 0, + DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c); + + REG_SET(NOM_PARAMETERS_7, 0, + REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c); + + /* TTU - per hubp */ + REG_SET_2(DCN_TTU_QOS_WM, 0, + QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm, + QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + + REG_SET_3(DCN_SURF0_TTU_CNTL0, 0, + REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l, + QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l); + + REG_SET_3(DCN_SURF1_TTU_CNTL0, 0, + REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c, + QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c); + + REG_SET_3(DCN_CUR0_TTU_CNTL0, 0, + REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0, + QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0); REG_SET(FLIP_PARAMETERS_1, 0, REFCYC_PER_PTE_GROUP_FLIP_L, dlg_attr->refcyc_per_pte_group_flip_l); @@ -184,6 +262,39 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp, REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value); } +void hubp2_program_requestor( + struct hubp *hubp, + struct _vcs_dpi_display_rq_regs_st *rq_regs) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_UPDATE(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address); + REG_SET_4(DCN_EXPANSION_MODE, 0, + DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode, + PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode, + MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode, + CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode); + REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0, + CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size, + SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear); + REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0, + CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear); +} + static void hubp2_setup( struct hubp *hubp, struct _vcs_dpi_display_dlg_regs_st *dlg_attr, @@ -196,7 +307,7 @@ static void hubp2_setup( */ hubp2_vready_at_or_After_vsync(hubp, pipe_dest); - hubp1_program_requestor(hubp, rq_regs); + hubp2_program_requestor(hubp, rq_regs); hubp2_program_deadline(hubp, dlg_attr, ttu_attr); } @@ -283,6 +394,196 @@ static void hubp2_program_tiling( PIPE_ALIGNED, 0); } +void hubp2_program_size( + struct hubp *hubp, + enum surface_pixel_format format, + const union plane_size *plane_size, + struct dc_plane_dcc_param *dcc) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c; + + /* Program data and meta surface pitch (calculation from addrlib) + * 444 or 420 luma + */ + if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) { + ASSERT(plane_size->video.chroma_pitch != 0); + /* Chroma pitch zero can cause system hang! */ + + pitch = plane_size->video.luma_pitch - 1; + meta_pitch = dcc->video.meta_pitch_l - 1; + pitch_c = plane_size->video.chroma_pitch - 1; + meta_pitch_c = dcc->video.meta_pitch_c - 1; + } else { + pitch = plane_size->grph.surface_pitch - 1; + meta_pitch = dcc->grph.meta_pitch - 1; + pitch_c = 0; + meta_pitch_c = 0; + } + + if (!dcc->enable) { + meta_pitch = 0; + meta_pitch_c = 0; + } + + REG_UPDATE_2(DCSURF_SURFACE_PITCH, + PITCH, pitch, META_PITCH, meta_pitch); + + if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + REG_UPDATE_2(DCSURF_SURFACE_PITCH_C, + PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c); +} + +void hubp2_program_rotation( + struct hubp *hubp, + enum dc_rotation_angle rotation, + bool horizontal_mirror) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + uint32_t mirror; + + + if (horizontal_mirror) + mirror = 1; + else + mirror = 0; + + /* Program rotation angle and horz mirror - no mirror */ + if (rotation == ROTATION_ANGLE_0) + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + ROTATION_ANGLE, 0, + H_MIRROR_EN, mirror); + else if (rotation == ROTATION_ANGLE_90) + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + ROTATION_ANGLE, 1, + H_MIRROR_EN, mirror); + else if (rotation == ROTATION_ANGLE_180) + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + ROTATION_ANGLE, 2, + H_MIRROR_EN, mirror); + else if (rotation == ROTATION_ANGLE_270) + REG_UPDATE_2(DCSURF_SURFACE_CONFIG, + ROTATION_ANGLE, 3, + H_MIRROR_EN, mirror); +} + +void hubp2_dcc_control(struct hubp *hubp, bool enable, + bool independent_64b_blks) +{ + uint32_t dcc_en = enable ? 1 : 0; + uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0; + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_UPDATE_4(DCSURF_SURFACE_CONTROL, + PRIMARY_SURFACE_DCC_EN, dcc_en, + PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk, + SECONDARY_SURFACE_DCC_EN, dcc_en, + SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk); +} + +void hubp2_program_pixel_format( + struct hubp *hubp, + enum surface_pixel_format format) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + uint32_t red_bar = 3; + uint32_t blue_bar = 2; + + /* swap for ABGR format */ + if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS + || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) { + red_bar = 2; + blue_bar = 3; + } + + REG_UPDATE_2(HUBPRET_CONTROL, + CROSSBAR_SRC_CB_B, blue_bar, + CROSSBAR_SRC_CR_R, red_bar); + + /* Mapping is same as ipp programming (cnvc) */ + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 1); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 3); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 8); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 10); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 22); + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/ + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 24); + break; + + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 65); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 64); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 67); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 66); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 12); + break; +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 112); + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 113); + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 114); + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 118); + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + REG_UPDATE(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, 119); + break; +#endif + default: + BREAK_TO_DEBUGGER(); + break; + } + + /* don't see the need of program the xbar in DCN 1.0 */ +} + void hubp2_program_surface_config( struct hubp *hubp, enum surface_pixel_format format, @@ -295,11 +596,11 @@ void hubp2_program_surface_config( { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks); + hubp2_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks); hubp2_program_tiling(hubp2, tiling_info, format); - hubp1_program_size(hubp, format, plane_size, dcc); - hubp1_program_rotation(hubp, rotation, horizontal_mirror); - hubp1_program_pixel_format(hubp, format); + hubp2_program_size(hubp, format, plane_size, dcc); + hubp2_program_rotation(hubp, rotation, horizontal_mirror); + hubp2_program_pixel_format(hubp, format); } enum cursor_lines_per_chunk hubp2_get_lines_per_chunk( @@ -652,28 +953,381 @@ void hubp2_set_flip_control_surface_gsl(struct hubp *hubp, bool enable) REG_UPDATE(DCSURF_FLIP_CONTROL2, SURFACE_GSL_ENABLE, enable ? 1 : 0); } +bool hubp2_is_flip_pending(struct hubp *hubp) +{ + uint32_t flip_pending = 0; + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + struct dc_plane_address earliest_inuse_address; + + REG_GET(DCSURF_FLIP_CONTROL, + SURFACE_FLIP_PENDING, &flip_pending); + + REG_GET(DCSURF_SURFACE_EARLIEST_INUSE, + SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part); + + REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, + SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part); + + if (flip_pending) + return true; + + if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part) + return true; + + return false; +} + +void hubp2_set_blank(struct hubp *hubp, bool blank) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + uint32_t blank_en = blank ? 1 : 0; + + REG_UPDATE_2(DCHUBP_CNTL, + HUBP_BLANK_EN, blank_en, + HUBP_TTU_DISABLE, blank_en); + + if (blank) { + uint32_t reg_val = REG_READ(DCHUBP_CNTL); + + if (reg_val) { + /* init sequence workaround: in case HUBP is + * power gated, this wait would timeout. + * + * we just wrote reg_val to non-0, if it stay 0 + * it means HUBP is gated + */ + REG_WAIT(DCHUBP_CNTL, + HUBP_NO_OUTSTANDING_REQ, 1, + 1, 200); + } + + hubp->mpcc_id = 0xf; + hubp->opp_id = OPP_ID_INVALID; + } +} + +void hubp2_cursor_set_position( + struct hubp *hubp, + const struct dc_cursor_position *pos, + const struct dc_cursor_mi_param *param) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + int src_x_offset = pos->x - pos->x_hotspot - param->viewport.x; + int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y; + int x_hotspot = pos->x_hotspot; + int y_hotspot = pos->y_hotspot; + uint32_t dst_x_offset; + uint32_t cur_en = pos->enable ? 1 : 0; + + /* + * Guard aganst cursor_set_position() from being called with invalid + * attributes + * + * TODO: Look at combining cursor_set_position() and + * cursor_set_attributes() into cursor_update() + */ + if (hubp->curs_attr.address.quad_part == 0) + return; + + if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { + src_x_offset = pos->y - pos->y_hotspot - param->viewport.x; + y_hotspot = pos->x_hotspot; + x_hotspot = pos->y_hotspot; + } + + if (param->mirror) { + x_hotspot = param->viewport.width - x_hotspot; + src_x_offset = param->viewport.x + param->viewport.width - src_x_offset; + } + + dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0; + dst_x_offset *= param->ref_clk_khz; + dst_x_offset /= param->pixel_clk_khz; + + ASSERT(param->h_scale_ratio.value); + + if (param->h_scale_ratio.value) + dst_x_offset = dc_fixpt_floor(dc_fixpt_div( + dc_fixpt_from_int(dst_x_offset), + param->h_scale_ratio)); + + if (src_x_offset >= (int)param->viewport.width) + cur_en = 0; /* not visible beyond right edge*/ + + if (src_x_offset + (int)hubp->curs_attr.width <= 0) + cur_en = 0; /* not visible beyond left edge*/ + + if (src_y_offset >= (int)param->viewport.height) + cur_en = 0; /* not visible beyond bottom edge*/ + + if (src_y_offset + (int)hubp->curs_attr.height <= 0) + cur_en = 0; /* not visible beyond top edge*/ + + if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) + hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr); + + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, cur_en); + + REG_SET_2(CURSOR_POSITION, 0, + CURSOR_X_POSITION, pos->x, + CURSOR_Y_POSITION, pos->y); + + REG_SET_2(CURSOR_HOT_SPOT, 0, + CURSOR_HOT_SPOT_X, x_hotspot, + CURSOR_HOT_SPOT_Y, y_hotspot); + + REG_SET(CURSOR_DST_OFFSET, 0, + CURSOR_DST_X_OFFSET, dst_x_offset); + /* TODO Handle surface pixel formats other than 4:4:4 */ +} + +void hubp2_clk_cntl(struct hubp *hubp, bool enable) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + uint32_t clk_enable = enable ? 1 : 0; + + REG_UPDATE(HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, clk_enable); +} + +void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst); +} + +void hubp2_clear_underflow(struct hubp *hubp) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + + REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1); +} + +void hubp2_read_state_common(struct hubp *hubp) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + struct dcn_hubp_state *s = &hubp2->state; + struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr; + struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + /* Requester */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode); + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end); + + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start); + + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal); + + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler); + + if (REG(PREFETCH_SETTINS)) + REG_GET_2(PREFETCH_SETTINS, + DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch, + VRATIO_PREFETCH, &dlg_attr->vratio_prefetch); + else + REG_GET_2(PREFETCH_SETTINGS, + DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch, + VRATIO_PREFETCH, &dlg_attr->vratio_prefetch); + + REG_GET_2(VBLANK_PARAMETERS_0, + DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank, + DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank); + + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l); + + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l); + + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l); + + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l); + + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l); + + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l); + + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c); + + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c); + + if (REG(PREFETCH_SETTINS_C)) + REG_GET(PREFETCH_SETTINS_C, + VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c); + else + REG_GET(PREFETCH_SETTINGS_C, + VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c); + + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c); + + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c); + + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c); + + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c); + + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c); + + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm); + + REG_GET_2(DCN_GLOBAL_TTU_CNTL, + MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank, + QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l); + + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, + &ttu_attr->refcyc_per_req_delivery_pre_l); + + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c); + + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, + &ttu_attr->refcyc_per_req_delivery_pre_c); + + /* Rest of hubp */ + REG_GET(DCSURF_SURFACE_CONFIG, + SURFACE_PIXEL_FORMAT, &s->pixel_format); + + REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, + SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi); + + REG_GET(DCSURF_SURFACE_EARLIEST_INUSE, + SURFACE_EARLIEST_INUSE_ADDRESS, &s->inuse_addr_lo); + + REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION, + PRI_VIEWPORT_WIDTH, &s->viewport_width, + PRI_VIEWPORT_HEIGHT, &s->viewport_height); + + REG_GET_2(DCSURF_SURFACE_CONFIG, + ROTATION_ANGLE, &s->rotation_angle, + H_MIRROR_EN, &s->h_mirror_en); + + REG_GET(DCSURF_TILING_CONFIG, + SW_MODE, &s->sw_mode); + + REG_GET(DCSURF_SURFACE_CONTROL, + PRIMARY_SURFACE_DCC_EN, &s->dcc_en); + + REG_GET_3(DCHUBP_CNTL, + HUBP_BLANK_EN, &s->blank_en, + HUBP_TTU_DISABLE, &s->ttu_disable, + HUBP_UNDERFLOW_STATUS, &s->underflow_status); + + REG_GET(DCN_GLOBAL_TTU_CNTL, + MIN_TTU_VBLANK, &s->min_ttu_vblank); + + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &s->qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm); + +} + +void hubp2_read_state(struct hubp *hubp) +{ + struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); + struct dcn_hubp_state *s = &hubp2->state; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + hubp2_read_state_common(hubp); + + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear); + + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + +} + static struct hubp_funcs dcn20_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, .hubp_program_surface_config = hubp2_program_surface_config, - .hubp_is_flip_pending = hubp1_is_flip_pending, + .hubp_is_flip_pending = hubp2_is_flip_pending, .hubp_setup = hubp2_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, .hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings, - .set_blank = hubp1_set_blank, - .dcc_control = hubp1_dcc_control, + .set_blank = hubp2_set_blank, + .dcc_control = hubp2_dcc_control, .hubp_update_dchub = hubp2_update_dchub, .mem_program_viewport = min_set_viewport, .set_cursor_attributes = hubp2_cursor_set_attributes, - .set_cursor_position = hubp1_cursor_set_position, - .hubp_clk_cntl = hubp1_clk_cntl, - .hubp_vtg_sel = hubp1_vtg_sel, + .set_cursor_position = hubp2_cursor_set_position, + .hubp_clk_cntl = hubp2_clk_cntl, + .hubp_vtg_sel = hubp2_vtg_sel, .dmdata_set_attributes = hubp2_dmdata_set_attributes, .dmdata_load = hubp2_dmdata_load, .dmdata_status_done = hubp2_dmdata_status_done, - .hubp_read_state = hubp1_read_state, - .hubp_clear_underflow = hubp1_clear_underflow, + .hubp_read_state = hubp2_read_state, + .hubp_clear_underflow = hubp2_clear_underflow, .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl, .hubp_init = hubp1_init, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index 2c6405a62fc1..c8418235e154 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -267,6 +267,24 @@ bool hubp2_program_surface_flip_and_addr( const struct dc_plane_address *address, bool flip_immediate); +void hubp2_dcc_control(struct hubp *hubp, bool enable, + bool independent_64b_blks); + +void hubp2_program_size( + struct hubp *hubp, + enum surface_pixel_format format, + const union plane_size *plane_size, + struct dc_plane_dcc_param *dcc); + +void hubp2_program_rotation( + struct hubp *hubp, + enum dc_rotation_angle rotation, + bool horizontal_mirror); + +void hubp2_program_pixel_format( + struct hubp *hubp, + enum surface_pixel_format format); + void hubp2_program_surface_config( struct hubp *hubp, enum surface_pixel_format format, @@ -277,6 +295,25 @@ void hubp2_program_surface_config( bool horizontal_mirror, unsigned int compat_level); +bool hubp2_is_flip_pending(struct hubp *hubp); + +void hubp2_set_blank(struct hubp *hubp, bool blank); + +void hubp2_cursor_set_position( + struct hubp *hubp, + const struct dc_cursor_position *pos, + const struct dc_cursor_mi_param *param); + +void hubp2_clk_cntl(struct hubp *hubp, bool enable); + +void hubp2_vtg_sel(struct hubp *hubp, uint32_t otg_inst); + +void hubp2_clear_underflow(struct hubp *hubp); + +void hubp2_read_state_common(struct hubp *hubp); + +void hubp2_read_state(struct hubp *hubp); + #endif /* __DC_MEM_INPUT_DCN20_H__ */ -- cgit v1.2.3 From 701c75cea29b725a5447c7a7d3b8b4c8472ab79a Mon Sep 17 00:00:00 2001 From: Derek Lai Date: Tue, 18 Jun 2019 14:55:57 +0800 Subject: drm/amd/display: Read max down spread [Why] When launch D10.2, driver will write DPCD 0x107 with 0x00 [How] Read MAX_DOWNSPREAD (0x0003h) then keep in current link settings Signed-off-by: Derek Lai Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 07533cc377fd..d685cde284ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) uint32_t read_dpcd_retry_cnt = 10; enum dc_status status = DC_ERROR_UNEXPECTED; int i; + union max_down_spread max_down_spread = { {0} }; // Read DPCD 00101h to find out the number of lanes currently set for (i = 0; i < read_dpcd_retry_cnt; i++) { @@ -576,6 +577,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) link->cur_link_settings.link_rate = link_bw_set; link->cur_link_settings.use_link_rate_set = false; } + // Read DPCD 00003h to find the max down spread. + core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, + &max_down_spread.raw, sizeof(max_down_spread)); + link->cur_link_settings.link_spread = + max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; } static bool detect_dp( -- cgit v1.2.3 From 8fca3d94157bfd401801b18f0fa8f1d0c693f2d7 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 18 Jun 2019 15:57:03 -0400 Subject: drm/amd/display: Remove dsc disable_ich flag programming. Current default is sufficient for a flag that does not change. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c | 8 +++++--- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index ffd0014ec3b5..e870caa8d4fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -436,7 +436,7 @@ static void dsc_init_reg_values(struct dsc_reg_values *reg_vals) reg_vals->ich_reset_at_eol = 0; reg_vals->alternate_ich_encoding_en = 0; reg_vals->rc_buffer_model_size = 0; - reg_vals->disable_ich = 0; + /*reg_vals->disable_ich = 0;*/ reg_vals->dsc_dbg_en = 0; for (i = 0; i < 4; i++) @@ -518,9 +518,11 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const ALTERNATE_ICH_ENCODING_EN, reg_vals->alternate_ich_encoding_en, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, reg_vals->num_slices_v - 1); - REG_SET_2(DSCC_CONFIG1, 0, + REG_SET(DSCC_CONFIG1, 0, + DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size); + /*REG_SET_2(DSCC_CONFIG1, 0, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, reg_vals->rc_buffer_model_size, - DSCC_DISABLE_ICH, reg_vals->disable_ich); + DSCC_DISABLE_ICH, reg_vals->disable_ich);*/ REG_SET_4(DSCC_INTERRUPT_CONTROL_STATUS, 0, DSCC_RATE_CONTROL_BUFFER_MODEL0_OVERFLOW_OCCURRED_INT_EN, reg_vals->rc_buffer_model_overflow_int_en[0], diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h index 168865a16288..4e2fb38390a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h @@ -103,7 +103,7 @@ DSC_SF(DSCC0_DSCC_CONFIG0, ALTERNATE_ICH_ENCODING_EN, mask_sh), \ DSC_SF(DSCC0_DSCC_CONFIG0, NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION, mask_sh), \ DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, mask_sh), \ - DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh), \ + /*DSC_SF(DSCC0_DSCC_CONFIG1, DSCC_DISABLE_ICH, mask_sh),*/ \ DSC_SF(DSCC0_DSCC_STATUS, DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING, mask_sh), \ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED, mask_sh), \ DSC_SF(DSCC0_DSCC_INTERRUPT_CONTROL_STATUS, DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED, mask_sh), \ @@ -278,7 +278,7 @@ type ALTERNATE_ICH_ENCODING_EN; \ type NUMBER_OF_SLICES_IN_VERTICAL_DIRECTION; \ type DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE; \ - type DSCC_DISABLE_ICH; \ + /*type DSCC_DISABLE_ICH;*/ \ type DSCC_DOUBLE_BUFFER_REG_UPDATE_PENDING; \ type DSCC_RATE_BUFFER0_OVERFLOW_OCCURRED; \ type DSCC_RATE_BUFFER1_OVERFLOW_OCCURRED; \ -- cgit v1.2.3 From 75c35000235f3662f2810e9a59b0c8eed045432e Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Wed, 19 Jun 2019 14:30:52 -0400 Subject: drm/amd/display: Power-gate all DSCs at driver init time [why] DSC should be powered-on only on as-needed basis, i.e. if the mode requires it [how] Loop over all the DSCs at driver init time and power-gate each Signed-off-by: Nikola Cornij Reviewed-by: Nevenko Stupar Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index ddf15a3715e0..5ecf965bc3f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -624,6 +624,10 @@ static void dcn20_init_hw(struct dc *dc) } } + /* Power gate DSCs */ + for (i = 0; i < res_pool->res_cap->num_dsc; i++) + dcn20_dsc_pg_control(hws, res_pool->dscs[i]->inst, false); + /* Blank pixel data with OPP DPG */ for (i = 0; i < dc->res_pool->timing_generator_count; i++) { struct timing_generator *tg = dc->res_pool->timing_generators[i]; -- cgit v1.2.3 From 1d0610bc26d8e2fe82de77d8be3e1ed0958feddc Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 20 Jun 2019 13:03:25 -0400 Subject: drm/amd/display: Disable Audio on reinitialize hardware [Why] When we recover from hang, we do not want to skip the audio enable call. [How] Disable audio in dc_reinitialize_hardware Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 1 + .../drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 614a941eb9f2..e9a7a7af11df 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -331,6 +331,7 @@ void dcn20_clk_mgr_construct( struct dccg *dccg) { clk_mgr->base.ctx = ctx; + clk_mgr->pp_smu = pp_smu; clk_mgr->base.funcs = &dcn2_funcs; clk_mgr->regs = &clk_mgr_regs; clk_mgr->clk_mgr_shift = &clk_mgr_shift; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 3a937e297a89..41f5ef6c5085 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -965,11 +965,17 @@ void hwss_edp_backlight_control( void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ - struct dc *core_dc = pipe_ctx->stream->ctx->dc; + struct dc *core_dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = core_dc->clk_mgr; + struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; + if (!pipe_ctx->stream) + return; + + core_dc = pipe_ctx->stream->ctx->dc; + clk_mgr = core_dc->clk_mgr; + if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; @@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) { - struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc *dc; struct pp_smu_funcs *pp_smu = NULL; - struct clk_mgr *clk_mgr = dc->clk_mgr; + struct clk_mgr *clk_mgr; + + if (!pipe_ctx || !pipe_ctx->stream) + return; + + dc = pipe_ctx->stream->ctx->dc; + clk_mgr = dc->clk_mgr; if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; -- cgit v1.2.3 From 5a42dc2be09a550777619a29187867d0423c353a Mon Sep 17 00:00:00 2001 From: Zi Yu Liao Date: Thu, 20 Jun 2019 10:55:26 -0400 Subject: drm/amd/display: fix DMCU hang when going into Modern Standby [why] When the system is going into suspend, set_backlight gets called after the eDP got blanked. Since smooth brightness is enabled, the driver will make a call into the DMCU to ramp the brightness. The DMCU would try to enable ABM to do so. But since the display is blanked, this ends up causing ABM1_ACE_DBUF_REG_UPDATE_PENDING to get stuck at 1, which results in a dead lock in the DMCU firmware. [how] Disable brightness ramping when the eDP display is blanked. Signed-off-by: Zi Yu Liao Reviewed-by: Eric Yang Acked-by: Anthony Koo Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index d685cde284ec..5852dd512e2e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2352,7 +2352,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) { if (core_dc->current_state->res_ctx. pipe_ctx[i].stream->link - == link) + == link) { /* DMCU -1 for all controller id values, * therefore +1 here */ @@ -2360,6 +2360,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link, core_dc->current_state-> res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + + /* Disable brightness ramping when the display is blanked + * as it can hang the DMCU + */ + if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL) + frame_ramp = 0; + } } } abm->funcs->set_backlight_level_pwm( -- cgit v1.2.3 From 03f3e40c8b5c227e1435bee0afa2cbc657c15cf4 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Wed, 19 Jun 2019 21:35:35 -0400 Subject: drm/amd/display: Do not fill Null packet in the blank period [Description] Do not fill Null packet in the blank period for new packet gen This is based on HW IP team recommended default setting change. Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c | 3 ++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 128e040b7848..89e6a4c34018 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -512,11 +512,12 @@ void enc1_stream_encoder_hdmi_set_stream_attribute( enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); /* setup HDMI engine */ - REG_UPDATE_5(HDMI_CONTROL, + REG_UPDATE_6(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, 1, HDMI_KEEPOUT_MODE, 1, HDMI_DEEP_COLOR_ENABLE, 0, HDMI_DATA_SCRAMBLE_EN, 0, + HDMI_NO_EXTRA_NULL_PACKET_FILLED, 1, HDMI_CLOCK_CHANNEL_RATE, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 075e49c1283a..ab0ead3c3f46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -189,6 +189,7 @@ struct dcn10_stream_enc_registers { SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\ SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_NO_EXTRA_NULL_PACKET_FILLED, mask_sh),\ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\ @@ -374,6 +375,7 @@ struct dcn10_stream_enc_registers { type HDMI_GC_SEND;\ type HDMI_NULL_SEND;\ type HDMI_DATA_SCRAMBLE_EN;\ + type HDMI_NO_EXTRA_NULL_PACKET_FILLED;\ type HDMI_AUDIO_INFO_SEND;\ type AFMT_AUDIO_INFO_UPDATE;\ type HDMI_AUDIO_INFO_LINE;\ -- cgit v1.2.3 From 252f3d950d2e7c45869096aa1efc883464149b31 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 31 May 2019 14:17:43 -0400 Subject: drm/amd/display: Remove unnecessary NULL check in set_preferred_link_settings [Why] link_stream is never NULL here as we've dereferenced it a couple lines before and have done so for a couple months now. [How] - Drop the NULL check. - Initialize where we know link_stream is non-NULL Signed-off-by: Harry Wentland Reviewed-by: Nicholas Kazlauskas Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 5852dd512e2e..595f2a3cfeb7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3023,8 +3023,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc, for (i = 0; i < MAX_PIPES; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->link) { - if (pipe->stream->link == link) + if (pipe->stream->link == link) { + link_stream = pipe->stream; break; + } } } @@ -3032,14 +3034,11 @@ void dc_link_set_preferred_link_settings(struct dc *dc, if (i == MAX_PIPES) return; - link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream; - /* Cannot retrain link if backend is off */ if (link_stream->dpms_off) return; - if (link_stream) - decide_link_settings(link_stream, &store_settings); + decide_link_settings(link_stream, &store_settings); if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) && (store_settings.link_rate != LINK_RATE_UNKNOWN)) -- cgit v1.2.3 From 5feb9f07a2035394ed808e266d294a9df3a8d544 Mon Sep 17 00:00:00 2001 From: Tai Man Date: Fri, 7 Jun 2019 17:32:27 -0400 Subject: drm/amd/display: use encoder's engine id to find matched free audio device [Why] On some platforms, the encoder id 3 is not populated. So the encoders are not stored in right order as index (id: 0, 1, 2, 4, 5) at pool. This would cause encoders id 4 & id 5 to fail when finding corresponding audio device, defaulting to the first available audio device. As result, we cannot stream audio into two DP ports with encoders id 4 & id 5. [How] It need to create enough audio device objects (0 - 5) to perform matching. Then use encoder engine id to find matched audio device. Signed-off-by: Tai Man Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 51a78283a86d..a0e29c37ab69 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -258,7 +258,7 @@ bool resource_construct( * PORT_CONNECTIVITY == 1 (as instructed by HW team). */ update_num_audio(&straps, &num_audio, &pool->audio_support); - for (i = 0; i < pool->pipe_count && i < num_audio; i++) { + for (i = 0; i < caps->num_audio; i++) { struct audio *aud = create_funcs->create_audio(ctx, i); if (aud == NULL) { @@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio( return pool->audios[i]; } } + + /* use engine id to find free audio */ + if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) { + return pool->audios[id]; + } + /*not found the matching one, first come first serve*/ for (i = 0; i < pool->audio_count; i++) { if (res_ctx->is_audio_acquired[i] == false) { -- cgit v1.2.3 From 18b401874aee10c80b5745c9b93280dae5a59809 Mon Sep 17 00:00:00 2001 From: Su Sung Chung Date: Fri, 21 Jun 2019 16:14:36 -0400 Subject: drm/amd/display: fix not calling ppsmu to trigger PME [why] dcn20_clk_mgr_construct was not initializing pp_smu, and PME call gets filtered out by the null check [how] initialize pp_smu dcn20_clk_mgr_construct Signed-off-by: Su Sung Chung Reviewed-by: Eric Yang Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index e9a7a7af11df..4842c91771d8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -346,6 +346,8 @@ void dcn20_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved + clk_mgr->pp_smu = pp_smu; + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; clk_mgr->dentist_vco_freq_khz = 3850000; -- cgit v1.2.3 From 1b3c61036e9fb643d35b09aa8172420c3da21b2d Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 21 Jun 2019 17:58:41 -0400 Subject: drm/amd/display: Remove second initialization of pp_smu [why] We initialize pp_smu twice [how] Remove second initialization of pp_smu Signed-off-by: Alvin Lee Reviewed-by: Jaehyun Chung Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 4842c91771d8..e9a7a7af11df 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -346,8 +346,6 @@ void dcn20_clk_mgr_construct( clk_mgr->base.dprefclk_khz = 700000; // 700 MHz planned if VCO is 3.85 GHz, will be retrieved - clk_mgr->pp_smu = pp_smu; - if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; clk_mgr->dentist_vco_freq_khz = 3850000; -- cgit v1.2.3 From 7e6472966e8341bf72f558babfdafac8aeff3257 Mon Sep 17 00:00:00 2001 From: Fatemeh Darbehani Date: Fri, 21 Jun 2019 17:44:50 -0400 Subject: drm/amd/display: Change min_h_sync_width from 8 to 4 [Why] Some display's hsync width is lower than the minimum dcn20 is set to support right now. This will cause optc1_validate_timing to fail which eventually will result in wrong set mode. This was set to 8 as per HW team's request for no valid reason. [How] Changing min_h_sync_width to 4 will let us validate timing for preffered mode and light up the headset. This change was made to Vega 10 before for a similar issue. Signed-off-by: Fatemeh Darbehani Reviewed-by: Joshua Aberback Acked-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 26a66ccf6e72..1ae973962d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1) optc1->min_h_blank = 32; optc1->min_v_blank = 3; optc1->min_v_blank_interlace = 5; - optc1->min_h_sync_width = 8; + optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue. optc1->min_v_sync_width = 1; optc1->comb_opp_id = 0xf; } -- cgit v1.2.3 From 351960b09850deaa6e74f4e5f39d53deb1114833 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 23 Jun 2019 17:27:43 -0400 Subject: drm/amd/display: 3.2.40 Signed-off-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0aad22b1ef36..14664bed8c84 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.39" +#define DC_VER "3.2.40" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3 From 4c6a9618076c2690a361e8bc32549720f2f42305 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 24 Jun 2019 09:49:44 -0400 Subject: drm/amd/display: Wait for flip to complete [why] In pipe split issue occurs when we program immediate flip while vsync flip is pending [how] Don't program immediate flip until flip is no longer pending Signed-off-by: Alvin Lee Reviewed-by: Jaehyun Chung Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 5ecf965bc3f5..9daab57a96ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1308,6 +1308,17 @@ void dcn20_pipe_control_lock( if (pipe->plane_state != NULL) flip_immediate = pipe->plane_state->flip_immediate; + if (flip_immediate && lock) { + while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) { + udelay(1); + } + + if (pipe->bottom_pipe != NULL) + while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) { + udelay(1); + } + } + /* In flip immediate and pipe splitting case, we need to use GSL * for synchronization. Only do setup on locking and on flip type change. */ -- cgit v1.2.3 From d40605b6d088b20827e442903022c65f0f165c84 Mon Sep 17 00:00:00 2001 From: Murton Liu Date: Mon, 24 Jun 2019 11:28:06 -0400 Subject: drm/amd/display: Implement generic MUX registers (v2) [Why] Logic & structures for generic regs does not exist in DC currently. [How] Implement register masks/shifts and relevant functions for generic mux, similar to existing HPD and DDC objects. V2: fix includes for kalloc/free (Alex) Signed-off-by: Murton Liu Reviewed-by: Aric Cyr Acked-by: Joshua Aberback Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/gpio/Makefile | 2 +- .../amd/display/dc/gpio/dcn20/hw_factory_dcn20.c | 41 ++++++- .../display/dc/gpio/diagnostics/hw_factory_diag.c | 1 + drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h | 66 ++++++++++ drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h | 3 + drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c | 134 +++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h | 46 +++++++ 7 files changed, 290 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h create mode 100644 drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c create mode 100644 drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile index c3d92878875d..113affea49bf 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile +++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile @@ -24,7 +24,7 @@ # It provides the control and status of HW GPIO pins. GPIO = gpio_base.o gpio_service.o hw_factory.o \ - hw_gpio.o hw_hpd.o hw_ddc.o hw_translate.o + hw_gpio.o hw_hpd.o hw_ddc.o hw_generic.o hw_translate.o AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO)) diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c index abd76d855375..afb7c0f111bf 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c @@ -31,6 +31,7 @@ #include "../hw_gpio.h" #include "../hw_ddc.h" #include "../hw_hpd.h" +#include "../hw_generic.h" #include "hw_factory_dcn20.h" @@ -138,6 +139,32 @@ static const struct ddc_sh_mask ddc_mask[] = { DDC_MASK_SH_LIST_DCN2(_MASK, 6) }; +#include "../generic_regs.h" + +/* set field name */ +#define SF_GENERIC(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define generic_regs(id) \ +{\ + GENERIC_REG_LIST(id)\ +} + +static const struct generic_registers generic_regs[] = { + generic_regs(A), + generic_regs(B), +}; + +static const struct generic_sh_mask generic_shift[] = { + GENERIC_MASK_SH_LIST(__SHIFT, A), + GENERIC_MASK_SH_LIST(__SHIFT, B), +}; + +static const struct generic_sh_mask generic_mask[] = { + GENERIC_MASK_SH_LIST(_MASK, A), + GENERIC_MASK_SH_LIST(_MASK, B), +}; + static void define_ddc_registers( struct hw_gpio_pin *pin, uint32_t en) @@ -173,17 +200,27 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) hpd->base.regs = &hpd_regs[en].gpio; } +static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en) +{ + struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin); + + generic->regs = &generic_regs[en]; + generic->shifts = &generic_shift[en]; + generic->masks = &generic_mask[en]; + generic->base.regs = &generic_regs[en].gpio; +} /* fucntion table */ static const struct hw_factory_funcs funcs = { .create_ddc_data = dal_hw_ddc_create, .create_ddc_clock = dal_hw_ddc_create, - .create_generic = NULL, + .create_generic = dal_hw_generic_create, .create_hpd = dal_hw_hpd_create, .create_sync = NULL, .create_gsl = NULL, .define_hpd_registers = define_hpd_registers, - .define_ddc_registers = define_ddc_registers + .define_ddc_registers = define_ddc_registers, + .define_generic_registers = define_generic_registers, }; /* * dal_hw_factory_dcn10_init diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c index 26695b963c58..f15288c3986e 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c @@ -38,6 +38,7 @@ #include "../hw_gpio.h" #include "../hw_ddc.h" #include "../hw_hpd.h" +#include "../hw_generic.h" /* function table */ static const struct hw_factory_funcs funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h new file mode 100644 index 000000000000..8c05295c05c2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h @@ -0,0 +1,66 @@ +/* + * Copyright 2012-16 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GENERIC_REGS_H_ +#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GENERIC_REGS_H_ + +#include "gpio_regs.h" + +#define GENERIC_GPIO_REG_LIST_ENTRY(type, cd, id) \ + .type ## _reg = REG(DC_GPIO_GENERIC_## type),\ + .type ## _mask = DC_GPIO_GENERIC_ ## type ## __DC_GPIO_GENERIC ## id ## _ ## type ## _MASK,\ + .type ## _shift = DC_GPIO_GENERIC_ ## type ## __DC_GPIO_GENERIC ## id ## _ ## type ## __SHIFT + +#define GENERIC_GPIO_REG_LIST(id) \ + {\ + GENERIC_GPIO_REG_LIST_ENTRY(MASK, cd, id),\ + GENERIC_GPIO_REG_LIST_ENTRY(A, cd, id),\ + GENERIC_GPIO_REG_LIST_ENTRY(EN, cd, id),\ + GENERIC_GPIO_REG_LIST_ENTRY(Y, cd, id)\ + } + +#define GENERIC_REG_LIST(id) \ + GENERIC_GPIO_REG_LIST(id), \ + .mux = REG(DC_GENERIC ## id),\ + +#define GENERIC_MASK_SH_LIST(mask_sh, cd) \ + {(DC_GENERIC ## cd ##__GENERIC ## cd ##_EN## mask_sh),\ + (DC_GENERIC ## cd ##__GENERIC ## cd ##_SEL## mask_sh)} + +struct generic_registers { + struct gpio_registers gpio; + uint32_t mux; +}; + +struct generic_sh_mask { + /* enable */ + uint32_t GENERIC_EN; + /* select */ + uint32_t GENERIC_SEL; + +}; + + +#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GENERIC_REGS_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h index 6e4dd3521935..7017c9337348 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h @@ -63,6 +63,9 @@ struct hw_factory { void (*define_ddc_registers)( struct hw_gpio_pin *pin, uint32_t en); + void (*define_generic_registers)( + struct hw_gpio_pin *pin, + uint32_t en); } *funcs; }; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c new file mode 100644 index 000000000000..8b7a8ffe3cd7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c @@ -0,0 +1,134 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "dm_services.h" + +#include "include/gpio_types.h" +#include "hw_gpio.h" +#include "hw_generic.h" + +#include "reg_helper.h" +#include "generic_regs.h" + +#undef FN +#define FN(reg_name, field_name) \ + generic->shifts->field_name, generic->masks->field_name + +#define CTX \ + generic->base.base.ctx +#define REG(reg)\ + (generic->regs->reg) + +static void dal_hw_generic_construct( + struct hw_generic *pin, + enum gpio_id id, + uint32_t en, + struct dc_context *ctx) +{ + dal_hw_gpio_construct(&pin->base, id, en, ctx); +} + +static void dal_hw_generic_destruct( + struct hw_generic *pin) +{ + dal_hw_gpio_destruct(&pin->base); +} + +static void destroy( + struct hw_gpio_pin **ptr) +{ + struct hw_generic *generic = HW_GENERIC_FROM_BASE(*ptr); + + dal_hw_generic_destruct(generic); + + kfree(generic); + + *ptr = NULL; +} + +static enum gpio_result set_config( + struct hw_gpio_pin *ptr, + const struct gpio_config_data *config_data) +{ + struct hw_generic *generic = HW_GENERIC_FROM_BASE(ptr); + + if (!config_data) + return GPIO_RESULT_INVALID_DATA; + + REG_UPDATE_2(mux, + GENERIC_EN, config_data->config.generic_mux.enable_output_from_mux, + GENERIC_SEL, config_data->config.generic_mux.mux_select); + + return GPIO_RESULT_OK; +} + +static const struct hw_gpio_pin_funcs funcs = { + .destroy = destroy, + .open = dal_hw_gpio_open, + .get_value = dal_hw_gpio_get_value, + .set_value = dal_hw_gpio_set_value, + .set_config = set_config, + .change_mode = dal_hw_gpio_change_mode, + .close = dal_hw_gpio_close, +}; + +static void construct( + struct hw_generic *generic, + enum gpio_id id, + uint32_t en, + struct dc_context *ctx) +{ + dal_hw_generic_construct(generic, id, en, ctx); + generic->base.base.funcs = &funcs; +} + +struct hw_gpio_pin *dal_hw_generic_create( + struct dc_context *ctx, + enum gpio_id id, + uint32_t en) +{ + struct hw_generic *generic; + + if (id != GPIO_ID_GENERIC) { + ASSERT_CRITICAL(false); + return NULL; + } + + if ((en < GPIO_GENERIC_MIN) || (en > GPIO_GENERIC_MAX)) { + ASSERT_CRITICAL(false); + return NULL; + } + + generic = kzalloc(sizeof(struct hw_generic), GFP_KERNEL); + if (!generic) { + ASSERT_CRITICAL(false); + return NULL; + } + + construct(generic, id, en, ctx); + return &generic->base.base; +} diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h new file mode 100644 index 000000000000..3ea1c13e3ea6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h @@ -0,0 +1,46 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_HW_generic_H__ +#define __DAL_HW_generic_H__ + +#include "generic_regs.h" + +struct hw_generic { + struct hw_gpio base; + const struct generic_registers *regs; + const struct generic_sh_mask *shifts; + const struct generic_sh_mask *masks; +}; + +#define HW_GENERIC_FROM_BASE(hw_gpio) \ + container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_generic, base) + +struct hw_gpio_pin *dal_hw_generic_create( + struct dc_context *ctx, + enum gpio_id id, + uint32_t en); + +#endif -- cgit v1.2.3 From a280a71f5e4a7c4ed44ac2cacc9718bccbb2cf65 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Mon, 24 Jun 2019 14:11:55 -0400 Subject: drm/amd/display: Use helper for determining HDMI signal Use helper to determine if HDMI signal when processing avmute. Signed-off-by: Eric Bernstein Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 595f2a3cfeb7..0c9b64031774 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2860,7 +2860,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; - if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) + if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) return; core_dc->hwss.set_avmute(pipe_ctx, enable); -- cgit v1.2.3 From 008a4016c5cf922d33456916ec3fad9ac4c98962 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Mon, 24 Jun 2019 15:44:42 -0400 Subject: drm/amd/display: Set FEC_READY always before link training [why] Right now we FEC_READY is set only before the final link training, i.e. at mode set time. This means FEC_READY won't be set when doing link training as a response to HPD. It also fails UCD400 FEC test in DP compliance. [how] Move FEC_READY setup to link training. Signed-off-by: Nikola Cornij Reviewed-by: Anthony Koo Acked-by: Abdoulaye Berthe Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 14 +++++--------- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 18 +++++++++++++++--- 2 files changed, 20 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 0c9b64031774..520d014a4ed8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1523,15 +1523,6 @@ static enum dc_status enable_link_dp( if (link_settings.link_rate == LINK_RATE_LOW) skip_video_pattern = false; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - if (link->preferred_training_settings.fec_enable != NULL) - fec_enable = *link->preferred_training_settings.fec_enable; - else - fec_enable = true; - - dp_set_fec_ready(link, fec_enable); -#endif - if (link->aux_access_disabled) { dc_link_dp_perform_link_training_skip_aux(link, &link_settings); @@ -1549,6 +1540,11 @@ static enum dc_status enable_link_dp( status = DC_FAIL_DP_LINK_TRAINING; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + if (link->preferred_training_settings.fec_enable != NULL) + fec_enable = *link->preferred_training_settings.fec_enable; + else + fec_enable = true; + dp_set_fec_enable(link, fec_enable); #endif return status; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 4442e7b1e5b5..5c8e3318239c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1179,14 +1179,26 @@ enum link_training_result dc_link_dp_perform_link_training( bool skip_video_pattern) { enum link_training_result status = LINK_TRAINING_SUCCESS; - struct link_training_settings lt_settings; +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + bool fec_enable; +#endif initialize_training_settings(link, link_setting, <_settings); /* 1. set link rate, lane count and spread. */ dpcd_set_link_settings(link, <_settings); +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + if (link->preferred_training_settings.fec_enable != NULL) + fec_enable = *link->preferred_training_settings.fec_enable; + else + fec_enable = true; + + dp_set_fec_ready(link, fec_enable); +#endif + + /* 2. perform link training (set link training done * to false is done as well) */ @@ -3153,7 +3165,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready) if (link_enc->funcs->fec_set_ready && link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { - if (link->fec_state == dc_link_fec_not_ready && ready) { + if (ready) { fec_config = 1; if (core_link_write_dpcd(link, DP_FEC_CONFIGURATION, @@ -3164,7 +3176,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready) } else { dm_error("dpcd write failed to set fec_ready"); } - } else if (link->fec_state == dc_link_fec_ready && !ready) { + } else if (link->fec_state == dc_link_fec_ready) { fec_config = 0; core_link_write_dpcd(link, DP_FEC_CONFIGURATION, -- cgit v1.2.3 From c43f89f81cc0c54c7c460f1d6365445939069c83 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Mon, 24 Jun 2019 18:18:58 -0400 Subject: drm/amd/display: put back front end initialization sequence [Why] Seamless boot optimization removed proper front end power off sequence. In driver disable enable case, this causes driver to power gate hubp and dpp while there is still memory fetching going on, this can cause invalid memory requests to be generated which will hang data fabric. [How] Put back proper front end power off sequence Signed-off-by: Eric Yang Reviewed-by: Anthony Koo Acked-by: Leo Li Acked-by: Tony Cheng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0c4340404e24..2118ea21d7e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc) * everything down. */ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct hubp *hubp = dc->res_pool->hubps[i]; - struct dpp *dpp = dc->res_pool->dpps[i]; - - hubp->funcs->hubp_init(hubp); - dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst; - plane_atomic_power_down(dc, dpp, hubp); - } - - apply_DEGVIDCN10_253_wa(dc); + dc->hwss.init_pipes(dc, dc->current_state); } for (i = 0; i < dc->res_pool->audio_count; i++) { @@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, return result; } - - - - static bool dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream) -- cgit v1.2.3 From e752058b8671c6c87e484cff144c5c6309a37253 Mon Sep 17 00:00:00 2001 From: Krunoslav Kovac Date: Tue, 18 Jun 2019 17:38:43 -0400 Subject: drm/amd/display: Optimize gamma calculations [Why&How] 1. Stack usage is pretty high as fixed31_32 struct is 8 bytes and we have functions with >30 vars on the stack. 2. Optimize gamma calculation by reducing number of calls to dc_fixpt_pow Our X points are divided into 32 regions wth 16 pts each. Each region is 2x the previous, meaning x[i] = 2*x[i-16] for i>=16. Using (2x)^gamma = 2^gamma * x^gamma, we can recursively compute powers of gamma, we just need first 16 pts to start it up. dc_fixpt_pow() is expensive, it computes x^y by doing exp(y*logx) Exp is done by Taylor series approximation, and log by Newton-like approximation that also uses exp internally. In short, it's significantly heavier than run-of-the-mill addition/subtraction/multiply. Signed-off-by: Krunoslav Kovac Reviewed-by: Anthony Koo Acked-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 - .../drm/amd/display/modules/color/color_gamma.c | 163 +++++++++++++-------- .../drm/amd/display/modules/color/color_gamma.h | 9 ++ 3 files changed, 111 insertions(+), 62 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 22db5682aa6c..e9a6225f4720 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -482,7 +482,6 @@ struct dc_gamma { * is_logical_identity indicates the given gamma ramp regardless of type is identity. */ bool is_identity; - bool is_logical_identity; }; /* Used by both ipp amd opp functions*/ diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 88898935a5e6..ed894cddeee5 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -40,6 +40,33 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2]; static struct fixed31_32 pq_table[MAX_HW_POINTS + 2]; static struct fixed31_32 de_pq_table[MAX_HW_POINTS + 2]; +// these are helpers for calculations to reduce stack usage +// do not depend on these being preserved across calls +static struct fixed31_32 scratch_1; +static struct fixed31_32 scratch_2; +static struct translate_from_linear_space_args scratch_gamma_args; + +/* Helper to optimize gamma calculation, only use in translate_from_linear, in + * particular the dc_fixpt_pow function which is very expensive + * The idea is that our regions for X points are exponential and currently they all use + * the same number of points (NUM_PTS_IN_REGION) and in each region every point + * is exactly 2x the one at the same index in the previous region. In other words + * X[i] = 2 * X[i-NUM_PTS_IN_REGION] for i>=16 + * The other fact is that (2x)^gamma = 2^gamma * x^gamma + * So we compute and save x^gamma for the first 16 regions, and for every next region + * just multiply with 2^gamma which can be computed once, and save the result so we + * recursively compute all the values. + */ +static struct fixed31_32 pow_buffer[NUM_PTS_IN_REGION]; +static struct fixed31_32 gamma_of_2; // 2^gamma +int pow_buffer_ptr = -1; + +static const int32_t gamma_numerator01[] = { 31308, 180000, 0}; +static const int32_t gamma_numerator02[] = { 12920, 4500, 0}; +static const int32_t gamma_numerator03[] = { 55, 99, 0}; +static const int32_t gamma_numerator04[] = { 55, 99, 0}; +static const int32_t gamma_numerator05[] = { 2400, 2200, 2200}; + static bool pq_initialized; /* = false; */ static bool de_pq_initialized; /* = false; */ @@ -251,11 +278,7 @@ enum gamma_type_index { static void build_coefficients(struct gamma_coefficients *coefficients, enum gamma_type_index type) { - static const int32_t numerator01[] = { 31308, 180000, 0}; - static const int32_t numerator02[] = { 12920, 4500, 0}; - static const int32_t numerator03[] = { 55, 99, 0}; - static const int32_t numerator04[] = { 55, 99, 0}; - static const int32_t numerator05[] = { 2400, 2200, 2200}; + uint32_t i = 0; uint32_t index = 0; @@ -267,69 +290,74 @@ static void build_coefficients(struct gamma_coefficients *coefficients, enum gam do { coefficients->a0[i] = dc_fixpt_from_fraction( - numerator01[index], 10000000); + gamma_numerator01[index], 10000000); coefficients->a1[i] = dc_fixpt_from_fraction( - numerator02[index], 1000); + gamma_numerator02[index], 1000); coefficients->a2[i] = dc_fixpt_from_fraction( - numerator03[index], 1000); + gamma_numerator03[index], 1000); coefficients->a3[i] = dc_fixpt_from_fraction( - numerator04[index], 1000); + gamma_numerator04[index], 1000); coefficients->user_gamma[i] = dc_fixpt_from_fraction( - numerator05[index], 1000); + gamma_numerator05[index], 1000); ++i; } while (i != ARRAY_SIZE(coefficients->a0)); } static struct fixed31_32 translate_from_linear_space( - struct fixed31_32 arg, - struct fixed31_32 a0, - struct fixed31_32 a1, - struct fixed31_32 a2, - struct fixed31_32 a3, - struct fixed31_32 gamma) + struct translate_from_linear_space_args *args) { const struct fixed31_32 one = dc_fixpt_from_int(1); - if (dc_fixpt_lt(one, arg)) + if (dc_fixpt_le(one, args->arg)) return one; - if (dc_fixpt_le(arg, dc_fixpt_neg(a0))) - return dc_fixpt_sub( - a2, - dc_fixpt_mul( - dc_fixpt_add( - one, - a3), - dc_fixpt_pow( - dc_fixpt_neg(arg), - dc_fixpt_recip(gamma)))); - else if (dc_fixpt_le(a0, arg)) - return dc_fixpt_sub( - dc_fixpt_mul( - dc_fixpt_add( - one, - a3), - dc_fixpt_pow( - arg, - dc_fixpt_recip(gamma))), - a2); + if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0))) { + scratch_1 = dc_fixpt_add(one, args->a3); + scratch_2 = dc_fixpt_pow( + dc_fixpt_neg(args->arg), + dc_fixpt_recip(args->gamma)); + scratch_1 = dc_fixpt_mul(scratch_1, scratch_2); + scratch_1 = dc_fixpt_sub(args->a2, scratch_1); + + return scratch_1; + } else if (dc_fixpt_le(args->a0, args->arg)) { + if (pow_buffer_ptr == 0) { + gamma_of_2 = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_recip(args->gamma)); + } + scratch_1 = dc_fixpt_add(one, args->a3); + if (pow_buffer_ptr < 16) + scratch_2 = dc_fixpt_pow(args->arg, + dc_fixpt_recip(args->gamma)); + else + scratch_2 = dc_fixpt_mul(gamma_of_2, + pow_buffer[pow_buffer_ptr%16]); + + pow_buffer[pow_buffer_ptr%16] = scratch_2; + pow_buffer_ptr++; + + scratch_1 = dc_fixpt_mul(scratch_1, scratch_2); + scratch_1 = dc_fixpt_sub(scratch_1, args->a2); + + return scratch_1; + } else - return dc_fixpt_mul( - arg, - a1); + return dc_fixpt_mul(args->arg, args->a1); } static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) { struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); - return translate_from_linear_space(arg, - dc_fixpt_zero, - dc_fixpt_zero, - dc_fixpt_zero, - dc_fixpt_zero, - gamma); + scratch_gamma_args.arg = arg; + scratch_gamma_args.a0 = dc_fixpt_zero; + scratch_gamma_args.a1 = dc_fixpt_zero; + scratch_gamma_args.a2 = dc_fixpt_zero; + scratch_gamma_args.a3 = dc_fixpt_zero; + scratch_gamma_args.gamma = gamma; + + return translate_from_linear_space(&scratch_gamma_args); } static struct fixed31_32 translate_to_linear_space( @@ -365,18 +393,19 @@ static struct fixed31_32 translate_to_linear_space( return linear; } -static inline struct fixed31_32 translate_from_linear_space_ex( +static struct fixed31_32 translate_from_linear_space_ex( struct fixed31_32 arg, struct gamma_coefficients *coeff, uint32_t color_index) { - return translate_from_linear_space( - arg, - coeff->a0[color_index], - coeff->a1[color_index], - coeff->a2[color_index], - coeff->a3[color_index], - coeff->user_gamma[color_index]); + scratch_gamma_args.arg = arg; + scratch_gamma_args.a0 = coeff->a0[color_index]; + scratch_gamma_args.a1 = coeff->a1[color_index]; + scratch_gamma_args.a2 = coeff->a2[color_index]; + scratch_gamma_args.a3 = coeff->a3[color_index]; + scratch_gamma_args.gamma = coeff->user_gamma[color_index]; + + return translate_from_linear_space(&scratch_gamma_args); } @@ -715,24 +744,32 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma, { uint32_t i; - struct gamma_coefficients coeff; + struct gamma_coefficients *coeff; struct pwl_float_data_ex *rgb = rgb_regamma; const struct hw_x_point *coord_x = coordinate_x; - build_coefficients(&coeff, type); + coeff = kvzalloc(sizeof(*coeff), GFP_KERNEL); + if (!coeff) + return; - i = 0; + build_coefficients(coeff, type); - while (i != hw_points_num + 1) { + memset(pow_buffer, 0, NUM_PTS_IN_REGION * sizeof(struct fixed31_32)); + pow_buffer_ptr = 0; // see variable definition for more info + i = 0; + while (i <= hw_points_num) { /*TODO use y vs r,g,b*/ rgb->r = translate_from_linear_space_ex( - coord_x->x, &coeff, 0); + coord_x->x, coeff, 0); rgb->g = rgb->r; rgb->b = rgb->r; ++coord_x; ++rgb; ++i; } + pow_buffer_ptr = -1; // reset back to no optimize + + kfree(coeff); } static void hermite_spline_eetf(struct fixed31_32 input_x, @@ -862,6 +899,8 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, else max_content = max_display; + if (!use_eetf) + pow_buffer_ptr = 0; // see var definition for more info rgb += 32; // first 32 points have problems with fixed point, too small coord_x += 32; for (i = 32; i <= hw_points_num; i++) { @@ -900,6 +939,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, ++coord_x; ++rgb; } + pow_buffer_ptr = -1; return true; } @@ -1572,14 +1612,15 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, output_tf->tf == TRANSFER_FUNCTION_SRGB) { if (ramp == NULL) return true; - if ((ramp->is_logical_identity) || + if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || (!mapUserRamp && ramp->type == GAMMA_RGB_256)) return true; } output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) { + if (ramp && ramp->type != GAMMA_CS_TFM_1D && + (mapUserRamp || ramp->type != GAMMA_RGB_256)) { rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*rgb_user), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 369953fafadf..69cecd2ec251 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -82,6 +82,15 @@ struct freesync_hdr_tf_params { unsigned int skip_tm; // skip tm }; +struct translate_from_linear_space_args { + struct fixed31_32 arg; + struct fixed31_32 a0; + struct fixed31_32 a1; + struct fixed31_32 a2; + struct fixed31_32 a3; + struct fixed31_32 gamma; +}; + void setup_x_points_distribution(void); void precompute_pq(void); void precompute_de_pq(void); -- cgit v1.2.3 From d68a74541735e030dea56f72746cd26d19986f41 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Tue, 25 Jun 2019 17:19:25 -0400 Subject: drm/amd/display: Clear FEC_READY shadow register if DPCD write fails [why] As a fail-safe, in case 'set FEC_READY' DPCD write fails, a HW shadow register should be cleared and the internal FEC stat should be set to 'not ready'. This is to make sure HW settings will be consistent with FEC_READY state on the RX. Signed-off-by: Nikola Cornij Reviewed-by: Joshua Aberback Acked-by: Chris Park Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 5c8e3318239c..b512fecae061 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3174,6 +3174,8 @@ void dp_set_fec_ready(struct dc_link *link, bool ready) link_enc->funcs->fec_set_ready(link_enc, true); link->fec_state = dc_link_fec_ready; } else { + link->link_enc->funcs->fec_set_ready(link->link_enc, false); + link->fec_state = dc_link_fec_not_ready; dm_error("dpcd write failed to set fec_ready"); } } else if (link->fec_state == dc_link_fec_ready) { -- cgit v1.2.3 From 70f1476a7eedf85078f5b5a3b2647591f6df4d17 Mon Sep 17 00:00:00 2001 From: Joshua Aberback Date: Fri, 17 May 2019 14:43:10 -0400 Subject: drm/amd/display: Add debug option to disable timing sync [Why] We want a debug option to disable timing sync for testing. [How] New dc debug option that must be false to call program_timing_sync Signed-off-by: Joshua Aberback Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +- drivers/gpu/drm/amd/display/dc/dc.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4ef4dc63e221..32fd9184ef22 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1062,7 +1062,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (result != DC_OK) return result; - if (context->stream_count > 1) { + if (context->stream_count > 1 && !dc->debug.disable_timing_sync) { enable_timing_multisync(dc, context); program_timing_sync(dc, context); } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 14664bed8c84..786f61eb381d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -378,6 +378,7 @@ struct dc_debug_options { * watermarks are not affected. */ unsigned int force_min_dcfclk_mhz; + bool disable_timing_sync; }; struct dc_debug_data { -- cgit v1.2.3 From a2080098692173cf27282f686e341b184f9d68e3 Mon Sep 17 00:00:00 2001 From: Vitaly Prosyak Date: Fri, 21 Jun 2019 10:13:16 -0500 Subject: drm/amd/display: Add MPC 3DLUT resource management [Why & How] Number of 3DLUT's in MPC are not equal to number of pipes. Resource management is required. Activate on FPGA entire tm solution which includes the following :hdr multiplier, shaper, 3dlut. Signed-off-by: Vitaly Prosyak Reviewed-by: Charlene Liu Acked-by: Gary Kattan Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 3 +-- drivers/gpu/drm/amd/display/dc/dc.h | 16 +++++++++++++++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 4 ++-- 3 files changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index f40e4fd52fa2..b9d6a5bd8522 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -60,7 +60,6 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state plane_state->lut3d_func = dc_create_3dlut_func(); if (plane_state->lut3d_func != NULL) { plane_state->lut3d_func->ctx = ctx; - plane_state->lut3d_func->initialized = false; } plane_state->blend_tf = dc_create_transfer_func(); if (plane_state->blend_tf != NULL) { @@ -279,7 +278,7 @@ struct dc_3dlut *dc_create_3dlut_func(void) goto alloc_fail; kref_init(&lut->refcount); - lut->initialized = false; + lut->state.raw = 0; return lut; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 786f61eb381d..421932ac3b26 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -616,12 +616,26 @@ struct dc_transfer_func { #if defined(CONFIG_DRM_AMD_DC_DCN2_0) +union dc_3dlut_state { + struct { + uint32_t initialized:1; /*if 3dlut is went through color module for initialization */ + uint32_t rmu_idx_valid:1; /*if mux settings are valid*/ + uint32_t rmu_mux_num:3; /*index of mux to use*/ + uint32_t mpc_rmu0_mux:4; /*select mpcc on mux, one of the following : mpcc0, mpcc1, mpcc2, mpcc3*/ + uint32_t mpc_rmu1_mux:4; + uint32_t mpc_rmu2_mux:4; + uint32_t reserved:15; + } bits; + uint32_t raw; +}; + struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; uint32_t hdr_multiplier; - bool initialized; + bool initialized; /*remove after diag fix*/ + union dc_3dlut_state state; struct dc_context *ctx; }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 9daab57a96ea..64ebfdbbba9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -952,14 +952,14 @@ static bool dcn20_set_shaper_3dlut( result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); if (plane_state->lut3d_func && - plane_state->lut3d_func->initialized == true) + plane_state->lut3d_func->state.bits.initialized == 1) result = dpp_base->funcs->dpp_program_3dlut(dpp_base, &plane_state->lut3d_func->lut_3d); else result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); if (plane_state->lut3d_func && - plane_state->lut3d_func->initialized == true && + plane_state->lut3d_func->state.bits.initialized == 1 && plane_state->lut3d_func->hdr_multiplier != 0) dpp_base->funcs->dpp_set_hdr_multiplier(dpp_base, plane_state->lut3d_func->hdr_multiplier); -- cgit v1.2.3 From 290129c256179d5bbf822016e1729b8e9ed4875b Mon Sep 17 00:00:00 2001 From: Qingqing Zhuo Date: Thu, 27 Jun 2019 19:17:23 -0400 Subject: drm/amd/display: Add CM_BYPASS via debug option [Why] bypass CM block and MPC ogam for debug or triage use. [How] create a new flag cm_bypass_mode, which will set both CM_CONTROL and MPCC_OGAM_MODE to bypass when set to 1. Signed-off-by: Qingqing Zhuo Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 3 +++ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c | 7 ++++++- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c | 11 +++++++++++ drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h | 1 + 5 files changed, 22 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 421932ac3b26..27900297bdcf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -379,6 +379,9 @@ struct dc_debug_options { */ unsigned int force_min_dcfclk_mhz; bool disable_timing_sync; +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + bool cm_in_bypass; +#endif }; struct dc_debug_data { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c index e28b8e7bedf5..2d112c316424 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c @@ -52,7 +52,12 @@ static void dpp2_enable_cm_block( { struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - REG_UPDATE(CM_CONTROL, CM_BYPASS, 0); + unsigned int cm_bypass_mode = 0; + //Temp, put CM in bypass mode + if (dpp_base->ctx->dc->debug.cm_in_bypass) + cm_bypass_mode = 1; + + REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 240749e4cf83..f4d3008e5efa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -368,6 +368,11 @@ void apply_DEDCN20_305_wa( { struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + if (mpc->ctx->dc->debug.cm_in_bypass) { + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); + return; + } + if (mpc->ctx->dc->work_arounds.dedcn20_305_wa == false) { /*hw fixed in new review*/ return; @@ -390,10 +395,16 @@ void mpc2_set_output_gamma( enum dc_lut_mode next_mode; struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); + if (mpc->ctx->dc->debug.cm_in_bypass) { + REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); + return; + } + if (params == NULL) { REG_SET(MPCC_OGAM_MODE[mpcc_id], 0, MPCC_OGAM_MODE, 0); return; } + current_mode = mpc20_get_ogam_current(mpc, mpcc_id); if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) next_mode = LUT_RAM_B; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 60c671fcf186..9b69a06ab46f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -42,6 +42,7 @@ struct dpp { #if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params shaper_params; + bool cm_bypass_mode; #endif }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 45b94e319cd4..9f00289bda78 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -128,6 +128,7 @@ struct mpc { struct mpcc mpcc_array[MAX_MPCC]; #if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct pwl_params blender_params; + bool cm_bypass_mode; #endif }; -- cgit v1.2.3 From 69866d132355c83f115070c79537c72f9e2d6965 Mon Sep 17 00:00:00 2001 From: Nevenko Stupar Date: Fri, 28 Jun 2019 12:12:13 -0400 Subject: drm/amd/display: Add DIG_CLOCK_PATTERN register Add this register for future use Signed-off-by: Nevenko Stupar Reviewed-by: Vitaly Prosyak Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index ab0ead3c3f46..f585e7b620cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -89,7 +89,8 @@ SRI(DP_VID_STREAM_CNTL, DP, id), \ SRI(DP_VID_TIMING, DP, id), \ SRI(DP_SEC_AUD_N, DP, id), \ - SRI(DP_SEC_TIMESTAMP, DP, id) + SRI(DP_SEC_TIMESTAMP, DP, id), \ + SRI(DIG_CLOCK_PATTERN, DIG, id) #define SE_DCN_REG_LIST(id)\ SE_COMMON_DCN_REG_LIST(id) @@ -170,6 +171,7 @@ struct dcn10_stream_enc_registers { uint32_t HDMI_METADATA_PACKET_CONTROL; uint32_t DP_SEC_FRAMING4; #endif + uint32_t DIG_CLOCK_PATTERN; }; @@ -298,7 +300,8 @@ struct dcn10_stream_enc_registers { SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\ SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh),\ - SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh) + SE_SF(DIG0_DIG_FE_CNTL, DIG_SOURCE_SELECT, mask_sh),\ + SE_SF(DIG0_DIG_CLOCK_PATTERN, DIG_CLOCK_PATTERN, mask_sh) #define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\ SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) @@ -460,7 +463,8 @@ struct dcn10_stream_enc_registers { type HDMI_DB_DISABLE;\ type DP_VID_N_MUL;\ type DP_VID_M_DOUBLE_VALUE_EN;\ - type DIG_SOURCE_SELECT + type DIG_SOURCE_SELECT;\ + type DIG_CLOCK_PATTERN #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define SE_REG_FIELD_LIST_DCN2_0(type) \ -- cgit v1.2.3 From 8bee5c5c7e9ed478272bcd1288a425b5a101b376 Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Wed, 19 Jun 2019 17:33:02 -0400 Subject: drm/amd/display: Cache the use_pitch_c conditional For clarity, save the use_pitch_c logic to a bool Signed-off-by: Ilya Bakoulin Reviewed-by: Eric Bernstein Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 02e8c0c6a233..a167f867cb72 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -402,11 +402,14 @@ void hubp2_program_size( { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c; + bool use_pitch_c = false; /* Program data and meta surface pitch (calculation from addrlib) * 444 or 420 luma */ - if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) { + use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN + && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END; + if (use_pitch_c) { ASSERT(plane_size->video.chroma_pitch != 0); /* Chroma pitch zero can cause system hang! */ @@ -429,7 +432,8 @@ void hubp2_program_size( REG_UPDATE_2(DCSURF_SURFACE_PITCH, PITCH, pitch, META_PITCH, meta_pitch); - if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN; + if (use_pitch_c) REG_UPDATE_2(DCSURF_SURFACE_PITCH_C, PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c); } -- cgit v1.2.3 From 473e0ecbb4f62e0a971b47b27ec778371653f5ac Mon Sep 17 00:00:00 2001 From: Ilya Bakoulin Date: Wed, 26 Jun 2019 14:52:46 -0400 Subject: drm/amd/display: Fixes for some MPO cases [Why] Alpha could be improperly applied (only affecting half the frame) for some source pixel formats. [How] Change how alpha is enabled in MPC/DPP LB and change the bottom plane blend mode in MPC. Signed-off-by: Ilya Bakoulin Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 0bca011ed7c9..4f7a10390c57 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -211,7 +211,7 @@ struct mpcc *mpc1_insert_plane( } else { new_mpcc->mpcc_bot = NULL; REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf); - REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH); + REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_ONLY); } REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id); REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 64ebfdbbba9b..566cd4cdfef4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1863,7 +1863,7 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; struct mpcc_blnd_cfg blnd_cfg = { {0} }; - bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; + bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha; int mpcc_id; struct mpcc *new_mpcc; struct mpc *mpc = dc->res_pool->mpc; -- cgit v1.2.3 From 692626fc4dfc214cecd6125b47884920990feb85 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 1 Jul 2019 23:15:42 -0400 Subject: drm/amd/display: 3.2.41 Signed-off-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 27900297bdcf..abc039b836d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.40" +#define DC_VER "3.2.41" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3 From c2cd9d04ecf0f26fb472ffbd8274a55c05df0ffb Mon Sep 17 00:00:00 2001 From: Murton Liu Date: Tue, 25 Jun 2019 11:15:09 -0400 Subject: drm/amd/display: Hook up calls to do stereo mux and dig programming to stereo control interface [Why] Implementation of stereo mux register is complete, but unused. Need to call functions to write relevant configs. [How] Add function to write stereo config for enable/disable case and call in stereo control interface. Signed-off-by: Murton Liu Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 47 ++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 3 + .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 +++ .../amd/display/dc/gpio/dcn10/hw_factory_dcn10.c | 42 ++++++++++++- drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c | 72 ++++++++++++++++++++++ .../amd/display/include/gpio_service_interface.h | 18 +++++- 6 files changed, 186 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 32fd9184ef22..a8516deb5ac3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1236,6 +1236,53 @@ void dc_release_state(struct dc_state *context) kref_put(&context->refcount, dc_state_free); } +bool dc_set_generic_gpio_for_stereo(bool enable, + struct gpio_service *gpio_service) +{ + enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR; + struct gpio_pin_info pin_info; + struct gpio *generic; + struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), + GFP_KERNEL); + + pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); + + if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { + kfree(config); + return false; + } else { + generic = dal_gpio_service_create_generic_mux( + gpio_service, + pin_info.offset, + pin_info.mask); + } + + if (!generic) { + kfree(config); + return false; + } + + gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT); + + config->enable_output_from_mux = enable; + config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC; + + if (gpio_result == GPIO_RESULT_OK) + gpio_result = dal_mux_setup_config(generic, config); + + if (gpio_result == GPIO_RESULT_OK) { + dal_gpio_close(generic); + dal_gpio_destroy_generic_mux(&generic); + kfree(config); + return true; + } else { + dal_gpio_close(generic); + dal_gpio_destroy_generic_mux(&generic); + kfree(config); + return false; + } +} + static bool is_surface_in_context( const struct dc_state *context, const struct dc_plane_state *plane_state) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index abc039b836d4..b314fd2869dd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -852,6 +852,9 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); +bool dc_set_generic_gpio_for_stereo(bool enable, + struct gpio_service *gpio_service); + /* * fast_validate: we return after determining if we can support the new state, * but before we populate the programming info diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 2118ea21d7e9..89c958f00e9a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2692,6 +2692,13 @@ static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) dcn10_config_stereo_parameters(stream, &flags); + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) { + if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service)) + dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service); + } else { + dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service); + } + pipe_ctx->stream_res.opp->funcs->opp_program_stereo( pipe_ctx->stream_res.opp, flags.PROGRAM_STEREO == 1 ? true:false, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c index 32aa47a04a0d..5711f30cf848 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c @@ -31,6 +31,7 @@ #include "../hw_gpio.h" #include "../hw_ddc.h" #include "../hw_hpd.h" +#include "../hw_generic.h" #include "hw_factory_dcn10.h" @@ -121,6 +122,42 @@ static const struct ddc_sh_mask ddc_mask = { DDC_MASK_SH_LIST(_MASK) }; +#include "../generic_regs.h" + +/* set field name */ +#define SF_GENERIC(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define generic_regs(id) \ +{\ + GENERIC_REG_LIST(id)\ +} + +static const struct generic_registers generic_regs[] = { + generic_regs(A), + generic_regs(B), +}; + +static const struct generic_sh_mask generic_shift[] = { + GENERIC_MASK_SH_LIST(__SHIFT, A), + GENERIC_MASK_SH_LIST(__SHIFT, B), +}; + +static const struct generic_sh_mask generic_mask[] = { + GENERIC_MASK_SH_LIST(_MASK, A), + GENERIC_MASK_SH_LIST(_MASK, B), +}; + +static void define_generic_registers(struct hw_gpio_pin *pin, uint32_t en) +{ + struct hw_generic *generic = HW_GENERIC_FROM_BASE(pin); + + generic->regs = &generic_regs[en]; + generic->shifts = &generic_shift[en]; + generic->masks = &generic_mask[en]; + generic->base.regs = &generic_regs[en].gpio; +} + static void define_ddc_registers( struct hw_gpio_pin *pin, uint32_t en) @@ -161,12 +198,13 @@ static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en) static const struct hw_factory_funcs funcs = { .create_ddc_data = dal_hw_ddc_create, .create_ddc_clock = dal_hw_ddc_create, - .create_generic = NULL, + .create_generic = dal_hw_generic_create, .create_hpd = dal_hw_hpd_create, .create_sync = NULL, .create_gsl = NULL, .define_hpd_registers = define_hpd_registers, - .define_ddc_registers = define_ddc_registers + .define_ddc_registers = define_ddc_registers, + .define_generic_registers = define_generic_registers }; /* * dal_hw_factory_dcn10_init diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index a7fab44f66b6..5b02db13eb2a 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -141,6 +141,62 @@ struct gpio *dal_gpio_service_create_irq( return dal_gpio_create_irq(service, id, en); } +struct gpio *dal_gpio_service_create_generic_mux( + struct gpio_service *service, + uint32_t offset, + uint32_t mask) +{ + enum gpio_id id; + uint32_t en; + struct gpio *generic; + + if (mask == 1) + en = GPIO_GENERIC_A; + else if (mask == 0x00000100L) + en = GPIO_GENERIC_B; + else + return NULL; + + id = GPIO_ID_GENERIC; + + generic = dal_gpio_create( + service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT); + + return generic; +} + +void dal_gpio_destroy_generic_mux( + struct gpio **mux) +{ + if (!mux || !*mux) { + ASSERT_CRITICAL(false); + return; + } + + dal_gpio_close(*mux); + dal_gpio_destroy(mux); + kfree(*mux); + + *mux = NULL; +} + +struct gpio_pin_info dal_gpio_get_generic_pin_info( + struct gpio_service *service, + enum gpio_id id, + uint32_t en) +{ + struct gpio_pin_info pin; + + if (service->translate.funcs->id_to_offset) { + service->translate.funcs->id_to_offset(id, en, &pin); + } else { + pin.mask = 0xFFFFFFFF; + pin.offset = 0xFFFFFFFF; + } + + return pin; +} + void dal_gpio_service_destroy( struct gpio_service **ptr) { @@ -165,6 +221,21 @@ void dal_gpio_service_destroy( *ptr = NULL; } +enum gpio_result dal_mux_setup_config( + struct gpio *mux, + struct gpio_generic_mux_config *config) +{ + struct gpio_config_data config_data; + + if (!config) + return GPIO_RESULT_INVALID_DATA; + + config_data.config.generic_mux = *config; + config_data.type = GPIO_CONFIG_TYPE_GENERIC_MUX; + + return dal_gpio_set_config(mux, &config_data); +} + /* * @brief * Private API. @@ -255,6 +326,7 @@ enum gpio_result dal_gpio_service_open( case GPIO_ID_GENERIC: pin = service->factory.funcs->create_generic( service->ctx, id, en); + service->factory.funcs->define_generic_registers(pin, en); break; case GPIO_ID_HPD: pin = service->factory.funcs->create_hpd( diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h index f40259bade40..9c55d247227e 100644 --- a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h +++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h @@ -51,13 +51,29 @@ struct gpio *dal_gpio_service_create_irq( uint32_t offset, uint32_t mask); +struct gpio *dal_gpio_service_create_generic_mux( + struct gpio_service *service, + uint32_t offset, + uint32_t mask); + +void dal_gpio_destroy_generic_mux( + struct gpio **mux); + +enum gpio_result dal_mux_setup_config( + struct gpio *mux, + struct gpio_generic_mux_config *config); + +struct gpio_pin_info dal_gpio_get_generic_pin_info( + struct gpio_service *service, + enum gpio_id id, + uint32_t en); + struct ddc *dal_gpio_create_ddc( struct gpio_service *service, uint32_t offset, uint32_t mask, struct gpio_ddc_hw_info *info); - void dal_gpio_destroy_ddc( struct ddc **ddc); -- cgit v1.2.3 From 30db43b62c16cb0a0ff3eb89d9b25bf455859ef8 Mon Sep 17 00:00:00 2001 From: Derek Lai Date: Tue, 2 Jul 2019 17:50:41 +0800 Subject: drm/amd/display: allocate 4 ddc engines for RV2 [Why] Driver will create 0, 1, and 2 ddc engines for RV2, but some platforms used 0, 1, and 3. [How] Still allocate 4 ddc engines for RV2. Signed-off-by: Derek Lai Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1a20461c2937..a12530a3ab9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = { .num_audio = 3, .num_stream_encoder = 3, .num_pll = 3, - .num_ddc = 3, + .num_ddc = 4, }; static const struct dc_plane_cap plane_cap = { -- cgit v1.2.3 From 925f566cb7aedbcf26005035cf894ec824e8ca2f Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Thu, 27 Jun 2019 18:16:21 -0400 Subject: drm/amd/display: add set and get clock for testing purposes add dc_set_clock add dc_get_clock this is for testing and diagnostics to get/set DPPCLK and DISPCLK. Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 23 +++++++++- .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h | 5 ++ drivers/gpu/drm/amd/display/dc/core/dc.c | 11 +++++ drivers/gpu/drm/amd/display/dc/dc.h | 5 ++ drivers/gpu/drm/amd/display/dc/dc_types.h | 13 ++++++ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 53 +++++++++++++++++++++- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 5 ++ drivers/gpu/drm/amd/display/dc/inc/core_status.h | 3 ++ drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 7 +++ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 9 ++++ 10 files changed, 132 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index e9a7a7af11df..9a873e2b3736 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -316,11 +316,32 @@ void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) } } +void dcn2_get_clock(struct clk_mgr *clk_mgr, + struct dc_state *context, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg) +{ + + if (clock_type == DC_CLOCK_TYPE_DISPCLK) { + clock_cfg->max_clock_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz; + clock_cfg->min_clock_khz = DCN_MINIMUM_DISPCLK_Khz; + clock_cfg->current_clock_khz = clk_mgr->clks.dispclk_khz; + clock_cfg->bw_requirequired_clock_khz = context->bw_ctx.bw.dcn.clk.bw_dispclk_khz; + } + if (clock_type == DC_CLOCK_TYPE_DPPCLK) { + clock_cfg->max_clock_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz; + clock_cfg->min_clock_khz = DCN_MINIMUM_DPPCLK_Khz; + clock_cfg->current_clock_khz = clk_mgr->clks.dppclk_khz; + clock_cfg->bw_requirequired_clock_khz = context->bw_ctx.bw.dcn.clk.bw_dppclk_khz; + } +} + static struct clk_mgr_funcs dcn2_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = dcn2_update_clocks, .init_clocks = dcn2_init_clocks, - .enable_pme_wa = dcn2_enable_pme_wa + .enable_pme_wa = dcn2_enable_pme_wa, + .get_clock = dcn2_get_clock, }; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index 5661a5a89847..ac31a9787305 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -45,4 +45,9 @@ void dcn20_clk_mgr_construct(struct dc_context *ctx, uint32_t dentist_get_did_from_divider(int divider); +void dcn2_get_clock(struct clk_mgr *clk_mgr, + struct dc_state *context, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg); + #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index a8516deb5ac3..c86c86c07fd9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2431,3 +2431,14 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz; info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz; } +enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping) +{ + if (dc->hwss.set_clock) + return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping); + return DC_ERROR_UNEXPECTED; +} +void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg) +{ + if (dc->hwss.get_clock) + dc->hwss.get_clock(dc, clock_type, clock_cfg); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index b314fd2869dd..a5b5afd48a7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -252,7 +252,10 @@ enum wm_report_mode { struct dc_clocks { int dispclk_khz; int max_supported_dppclk_khz; + int max_supported_dispclk_khz; int dppclk_khz; + int bw_dppclk_khz; /*a copy of dppclk_khz*/ + int bw_dispclk_khz; int dcfclk_khz; int socclk_khz; int dcfclk_deep_sleep_khz; @@ -1041,6 +1044,8 @@ unsigned int dc_get_target_backlight_pwm(struct dc *dc); bool dc_is_dmcu_initialized(struct dc *dc); +enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); +void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); #if defined(CONFIG_DRM_AMD_DC_DSC_SUPPORT) /******************************************************************************* * DSC Interfaces diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index ce6d73d21cca..b273735b6a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -726,6 +726,19 @@ struct AsicStateEx { unsigned int phyClock; }; + +enum dc_clock_type { + DC_CLOCK_TYPE_DISPCLK = 0, + DC_CLOCK_TYPE_DPPCLK = 1, +}; + +struct dc_clock_config { + uint32_t max_clock_khz; + uint32_t min_clock_khz; + uint32_t bw_requirequired_clock_khz; + uint32_t current_clock_khz;/*current clock in use*/ +}; + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC DPCD capabilities */ union dsc_slice_caps1 { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 89c958f00e9a..29db9b2d4412 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3069,6 +3069,56 @@ static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx, sdp_message_size); } } +static enum dc_status dcn10_set_clock(struct dc *dc, + enum dc_clock_type clock_type, + uint32_t clk_khz, + uint32_t stepping) +{ + struct dc_state *context = dc->current_state; + struct dc_clock_config clock_cfg = {0}; + struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk; + + if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock) + dc->clk_mgr->funcs->get_clock(dc->clk_mgr, + context, clock_type, &clock_cfg); + + if (!dc->clk_mgr->funcs->get_clock) + return DC_FAIL_UNSUPPORTED_1; + + if (clk_khz > clock_cfg.max_clock_khz) + return DC_FAIL_CLK_EXCEED_MAX; + + if (clk_khz < clock_cfg.min_clock_khz) + return DC_FAIL_CLK_BELOW_MIN; + + if (clk_khz < clock_cfg.bw_requirequired_clock_khz) + return DC_FAIL_CLK_BELOW_CFG_REQUIRED; + + /*update internal request clock for update clock use*/ + if (clock_type == DC_CLOCK_TYPE_DISPCLK) + current_clocks->dispclk_khz = clk_khz; + else if (clock_type == DC_CLOCK_TYPE_DPPCLK) + current_clocks->dppclk_khz = clk_khz; + else + return DC_ERROR_UNEXPECTED; + + if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks) + dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, + context, true); + return DC_OK; + +} + +static void dcn10_get_clock(struct dc *dc, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg) +{ + struct dc_state *context = dc->current_state; + + if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock) + dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg); + +} static const struct hw_sequencer_funcs dcn10_funcs = { .program_gamut_remap = program_gamut_remap, @@ -3123,7 +3173,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .enable_stream_gating = NULL, .setup_periodic_interrupt = dcn10_setup_periodic_interrupt, .setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt, - .did_underflow_occur = dcn10_did_underflow_occur + .set_clock = dcn10_set_clock, + .get_clock = dcn10_get_clock, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index ae2545fb8ece..6ff779256729 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2398,6 +2398,11 @@ void dcn20_calculate_dlg_params( context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; pipe_idx++; } + /*save a original dppclock copy*/ + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz*1000; + context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz*1000; for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index 0a094d7c9380..fd39e2abe2ed 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -48,6 +48,9 @@ enum dc_status { DC_NO_DSC_RESOURCE = 17, #endif DC_FAIL_UNSUPPORTED_1 = 18, + DC_FAIL_CLK_EXCEED_MAX = 21, + DC_FAIL_CLK_BELOW_MIN = 22, /*THIS IS MIN PER IP*/ + DC_FAIL_CLK_BELOW_CFG_REQUIRED = 23, /*THIS IS hard_min in PPLIB*/ DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 36ebd5bc7863..938bdc5c21a1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -28,6 +28,9 @@ #include "dc.h" +#define DCN_MINIMUM_DISPCLK_Khz 100000 +#define DCN_MINIMUM_DPPCLK_Khz 100000 + /* Public interfaces */ struct clk_states { @@ -51,6 +54,10 @@ struct clk_mgr_funcs { void (*init_clocks)(struct clk_mgr *clk_mgr); void (*enable_pme_wa) (struct clk_mgr *clk_mgr); + void (*get_clock)(struct clk_mgr *clk_mgr, + struct dc_state *context, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg); }; struct clk_mgr { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 4d56d48a3179..36be08adae05 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -294,6 +294,15 @@ struct hw_sequencer_funcs { void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); #endif + enum dc_status (*set_clock)(struct dc *dc, + enum dc_clock_type clock_type, + uint32_t clk_khz, + uint32_t stepping); + + void (*get_clock)(struct dc *dc, + enum dc_clock_type clock_type, + struct dc_clock_config *clock_cfg); + }; void color_space_to_black_color( -- cgit v1.2.3 From 54eef8a411699779fdbb1a2cd333f1b914bd5699 Mon Sep 17 00:00:00 2001 From: Murton Liu Date: Tue, 2 Jul 2019 11:32:19 -0400 Subject: drm/amd/display: Change offset_to_id to reflect what id_to_offset returns id_to_offset does not point to the same reg offset that offset_to_id checks for, causing unintended asserts Signed-off-by: Murton Liu Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c | 2 +- drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c | 10 +++------- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c index b393cc13298a..915e896e0e91 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c @@ -71,7 +71,7 @@ static bool offset_to_id( { switch (offset) { /* GENERIC */ - case REG(DC_GENERICA): + case REG(DC_GPIO_GENERIC_A): *id = GPIO_ID_GENERIC; switch (mask) { case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index 5b02db13eb2a..a7bc3ee5dfec 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -150,14 +150,10 @@ struct gpio *dal_gpio_service_create_generic_mux( uint32_t en; struct gpio *generic; - if (mask == 1) - en = GPIO_GENERIC_A; - else if (mask == 0x00000100L) - en = GPIO_GENERIC_B; - else + if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) { + ASSERT_CRITICAL(false); return NULL; - - id = GPIO_ID_GENERIC; + } generic = dal_gpio_create( service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT); -- cgit v1.2.3 From 39bca3599aab8edbc4a6c6d10fc76b1a932e5dfa Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Tue, 2 Jul 2019 14:04:35 -0400 Subject: drm/amd/display: add a option to force the clock at every mode change. [Description] This is for HW negative stress testing use. force reset the dispclk and dppclk even the same clock already set in HW. Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 9 ++++++++- drivers/gpu/drm/amd/display/dc/dc.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 9a873e2b3736..3cff4f0518d3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -151,7 +151,14 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, bool enter_display_off = false; bool dpp_clock_lowered = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; + bool force_reset = false; + if (clk_mgr_base->clks.dispclk_khz == 0 || + dc->debug.force_clock_mode & 0x1) { + //this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. + force_reset = true; + //force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level. + } display_count = clk_mgr_helper_get_active_display_cnt(dc, context); if (dc->res_pool->pp_smu) pp_smu = &dc->res_pool->pp_smu->nv_funcs; @@ -223,7 +230,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, update_dispclk = true; } - if (dc->config.forced_clocks == false) { + if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { if (dpp_clock_lowered) { // if clock is being lowered, increase DTO before lowering refclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a5b5afd48a7b..584fabf5a9a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -385,6 +385,7 @@ struct dc_debug_options { #if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool cm_in_bypass; #endif + int force_clock_mode;/*every mode change.*/ }; struct dc_debug_data { -- cgit v1.2.3 From e9bcc1e03048b3872df46cc6ee28d5c43850e379 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 2 Jul 2019 14:51:01 -0400 Subject: drm/amd/display: use min disp and dpp clk debug option for dcn2 This allows to set a minimum display and dpp clock on dcn2+ HW by adjusting clocks used for dml calculations. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 584fabf5a9a1..489f6240f2ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -344,6 +344,7 @@ struct dc_debug_options { bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; unsigned int min_disp_clk_khz; + unsigned int min_dpp_clk_khz; int sr_exit_time_dpm0_ns; int sr_enter_plus_exit_time_dpm0_ns; int sr_exit_time_ns; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 6ff779256729..8b338275624e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2170,10 +2170,6 @@ bool dcn20_fast_validate_bw( } if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; - if (dc->config.forced_clocks == true) { - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] = - context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; - } if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); @@ -2293,6 +2289,10 @@ void dcn20_calculate_wm( pipes[pipe_cnt].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; pipes[pipe_cnt].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; } + if (dc->debug.min_disp_clk_khz > pipes[pipe_cnt].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_cnt].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_cnt].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_cnt].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; pipe_cnt++; } -- cgit v1.2.3 From f1cdc98fd9d9bb0eb4b6599825b52ce25757eb9f Mon Sep 17 00:00:00 2001 From: Dingchen Zhang Date: Fri, 28 Jun 2019 17:23:24 -0400 Subject: drm/amd/display: add pipe CRC sources without disabling dithering. [Why] need to verify the impact of spatial dithering on 8bpc bypass mode. [How] added CRC sources and configure dihter option from dc stream. Signed-off-by: Dingchen Zhang Reviewed-by: Hanghong Ma Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | 58 ++++++++++++++++------ .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h | 6 ++- 2 files changed, 48 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index b966e1410484..365aaef3ecaf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -33,7 +33,9 @@ static const char *const pipe_crc_sources[] = { "none", "crtc", + "crtc dither", "dprx", + "dprx dither", "auto", }; @@ -45,10 +47,33 @@ static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC; if (!strcmp(source, "dprx")) return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX; + if (!strcmp(source, "crtc dither")) + return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER; + if (!strcmp(source, "dprx dither")) + return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER; return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID; } +static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER); +} + +static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER); +} + +static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE); +} + const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count) { @@ -102,14 +127,18 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) * USER REQ SRC | CURRENT SRC | BEHAVIOR * ----------------------------- * None | None | Do nothing - * None | CRTC | Disable CRTC CRC - * None | DPRX | Disable DPRX CRC, need 'aux' - * CRTC | XXXX | Enable CRTC CRC, configure DC strm - * DPRX | XXXX | Enable DPRX CRC, need 'aux' + * None | CRTC | Disable CRTC CRC, set default to dither + * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither + * None | CRTC DITHER | Disable CRTC CRC + * None | DPRX DITHER | Disable DPRX CRC, need 'aux' + * CRTC | XXXX | Enable CRTC CRC, no dither + * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither + * CRTC DITHER | XXXX | Enable CRTC CRC, set dither + * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither */ - if (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX || + if (dm_is_crc_source_dprx(source) || (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && - crtc_state->crc_src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX)) { + dm_is_crc_source_dprx(crtc_state->crc_src))) { aconn = stream_state->link->priv; if (!aconn) { @@ -125,7 +154,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) mutex_unlock(&adev->dm.dc_lock); return -EINVAL; } - } else if (source == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) { + } else if (dm_is_crc_source_crtc(source)) { if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, enable, enable)) { mutex_unlock(&adev->dm.dc_lock); @@ -133,10 +162,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } } - /* When enabling CRC, we should also disable dithering. */ - dc_stream_set_dither_option(stream_state, - enable ? DITHER_OPTION_TRUN8 - : DITHER_OPTION_DEFAULT); + /* configure dithering */ + if (!dm_need_crc_dither(source)) + dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); + else if (!dm_need_crc_dither(crtc_state->crc_src)) + dc_stream_set_dither_option(stream_state, DITHER_OPTION_DEFAULT); mutex_unlock(&adev->dm.dc_lock); @@ -147,7 +177,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src); if (!enabled && enable) { drm_crtc_vblank_get(crtc); - if (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) { + if (dm_is_crc_source_dprx(source)) { if (drm_dp_start_crc(aux, crtc)) { DRM_DEBUG_DRIVER("dp start crc failed\n"); return -EINVAL; @@ -155,7 +185,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } } else if (enabled && !enable) { drm_crtc_vblank_put(crtc); - if (crtc_state->crc_src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) { + if (dm_is_crc_source_dprx(source)) { if (drm_dp_stop_crc(aux)) { DRM_DEBUG_DRIVER("dp stop crc failed\n"); return -EINVAL; @@ -204,7 +234,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) return; } - if (crtc_state->crc_src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) { + if (dm_is_crc_source_crtc(crtc_state->crc_src)) { if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, &crcs[0], &crcs[1], &crcs[2])) return; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index b63a9011f511..14de7301c28d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -29,15 +29,17 @@ enum amdgpu_dm_pipe_crc_source { AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0, AMDGPU_DM_PIPE_CRC_SOURCE_CRTC, + AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER, AMDGPU_DM_PIPE_CRC_SOURCE_DPRX, + AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER, AMDGPU_DM_PIPE_CRC_SOURCE_MAX, AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, }; static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source) { - return (source == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) || - (source == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX); + return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) && + (source < AMDGPU_DM_PIPE_CRC_SOURCE_MAX); } /* amdgpu_dm_crc.c */ -- cgit v1.2.3 From f7938bc0f08b3d748d20dc2c88914f03331dce94 Mon Sep 17 00:00:00 2001 From: Reza Amini Date: Mon, 10 Jun 2019 16:45:50 -0400 Subject: drm/amd/display: Implement DAL3 GPU Integer Scaling [WHY] Users want to not have filtering when scaling by integer multiples to native timing. [HOW] If timing is a multiple integer of view, we set number of taps to 1 (effectivly closest neighbour). Signed-off-by: Reza Amini Reviewed-by: Aric Cyr Acked-by: Leo Li Acked-by: Tony Cheng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 + 2 files changed, 17 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index a0e29c37ab69..e4d0035b4ed4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -940,7 +940,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); } +static bool are_rect_integer_multiples(struct rect src, struct rect dest) +{ + if (dest.width >= src.width && dest.width % src.width == 0 && + dest.height >= src.height && dest.height % src.height == 0) + return true; + return false; +} bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -983,6 +990,15 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) if (pipe_ctx->plane_res.dpp != NULL) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); + + if (res && + plane_state->scaling_quality.integer_scaling && + are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport, + pipe_ctx->plane_res.scl_data.recout)) { + pipe_ctx->plane_res.scl_data.taps.v_taps = 1; + pipe_ctx->plane_res.scl_data.taps.h_taps = 1; + } + if (!res) { /* Try 24 bpp linebuffer */ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index e9a6225f4720..28a2cd2d2a49 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -614,6 +614,7 @@ struct scaling_taps { uint32_t h_taps; uint32_t v_taps_c; uint32_t h_taps_c; + bool integer_scaling; }; enum dc_timing_standard { -- cgit v1.2.3 From 12e2b2d4c65f6164830e25fcd9624519a424b182 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 3 Jul 2019 16:20:42 -0400 Subject: drm/amd/display: add dcc programming for dual plane Add dual plane dcc programming support for surfaces. Removes unions from plane size and dcc params as they serve no practical purpose only making our code more convoluted. This results in easy dual plane dcc and surface size programming. Temporary diags_dm code is used to handle the interface change without breaking functionality as a diags change needs to be applied after this one. Signed-off-by: Dmytro Laktyushkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 42 ++++++++--------- drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 2 +- drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 10 ++-- drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 40 ++++++++-------- drivers/gpu/drm/amd/display/dc/dc.h | 4 +- drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 54 +++++++++++----------- drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c | 10 ++-- .../drm/amd/display/dc/dce110/dce110_mem_input_v.c | 42 ++++++++--------- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 20 ++++---- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 4 +- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 4 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 20 ++++---- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h | 4 +- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 12 ++--- drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | 2 +- 17 files changed, 136 insertions(+), 138 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 13463fa31c3f..9f31c17f725d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2672,7 +2672,7 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, const struct amdgpu_framebuffer *afb, const enum surface_pixel_format format, const enum dc_rotation_angle rotation, - const union plane_size *plane_size, + const struct plane_size *plane_size, const union dc_tiling_info *tiling_info, const uint64_t info, struct dc_plane_dcc_param *dcc, @@ -2698,8 +2698,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, return -EINVAL; input.format = format; - input.surface_size.width = plane_size->grph.surface_size.width; - input.surface_size.height = plane_size->grph.surface_size.height; + input.surface_size.width = plane_size->surface_size.width; + input.surface_size.height = plane_size->surface_size.height; input.swizzle_mode = tiling_info->gfx9.swizzle; if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) @@ -2717,9 +2717,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev, return -EINVAL; dcc->enable = 1; - dcc->grph.meta_pitch = + dcc->meta_pitch = AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1; - dcc->grph.independent_64b_blks = i64b; + dcc->independent_64b_blks = i64b; dcc_address = get_dcc_address(afb->address, info); address->grph.meta_addr.low_part = lower_32_bits(dcc_address); @@ -2735,7 +2735,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, const enum dc_rotation_angle rotation, const uint64_t tiling_flags, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, struct dc_plane_dcc_param *dcc, struct dc_plane_address *address) { @@ -2748,11 +2748,11 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, memset(address, 0, sizeof(*address)); if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - plane_size->grph.surface_size.x = 0; - plane_size->grph.surface_size.y = 0; - plane_size->grph.surface_size.width = fb->width; - plane_size->grph.surface_size.height = fb->height; - plane_size->grph.surface_pitch = + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = fb->pitches[0] / fb->format->cpp[0]; address->type = PLN_ADDR_TYPE_GRAPHICS; @@ -2761,20 +2761,20 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { uint64_t chroma_addr = afb->address + fb->offsets[1]; - plane_size->video.luma_size.x = 0; - plane_size->video.luma_size.y = 0; - plane_size->video.luma_size.width = fb->width; - plane_size->video.luma_size.height = fb->height; - plane_size->video.luma_pitch = + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = fb->pitches[0] / fb->format->cpp[0]; - plane_size->video.chroma_size.x = 0; - plane_size->video.chroma_size.y = 0; + plane_size->chroma_size.x = 0; + plane_size->chroma_size.y = 0; /* TODO: set these based on surface format */ - plane_size->video.chroma_size.width = fb->width / 2; - plane_size->video.chroma_size.height = fb->height / 2; + plane_size->chroma_size.width = fb->width / 2; + plane_size->chroma_size.height = fb->height / 2; - plane_size->video.chroma_pitch = + plane_size->chroma_pitch = fb->pitches[1] / fb->format->cpp[1]; address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 7108d51a9c5b..9f12e21f8b9b 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -2852,7 +2852,7 @@ static void populate_initial_data( data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height); data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width); data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed( - pipe[i].bottom_pipe->plane_state->plane_size.grph.surface_pitch); + pipe[i].bottom_pipe->plane_state->plane_size.surface_pitch); data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps); data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps); data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed( diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 38365dd911a3..061c6e3a3088 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -329,7 +329,7 @@ static void pipe_ctx_to_e2e_pipe_params ( dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0; } input->src.dcc_rate = 1; - input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch; + input->src.meta_pitch = pipe->plane_state->dcc.meta_pitch; input->src.source_scan = dm_horz; input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c86c86c07fd9..4b1d314f2939 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1349,8 +1349,8 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa } if (u->plane_info->dcc.enable != u->surface->dcc.enable - || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks - || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) { + || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks + || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) { update_flags->bits.dcc_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); } @@ -1364,9 +1364,9 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa elevate_update_type(&update_type, UPDATE_TYPE_FULL); } - if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch - || u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch - || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch) { + if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch + || u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch + || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) { update_flags->bits.plane_size_change = 1; elevate_update_type(&update_type, UPDATE_TYPE_MED); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 5903e7822f98..b9227d5de3a3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -115,16 +115,16 @@ void pre_surface_trace( plane_state->clip_rect.height); SURFACE_TRACE( - "plane_state->plane_size.grph.surface_size.x = %d;\n" - "plane_state->plane_size.grph.surface_size.y = %d;\n" - "plane_state->plane_size.grph.surface_size.width = %d;\n" - "plane_state->plane_size.grph.surface_size.height = %d;\n" - "plane_state->plane_size.grph.surface_pitch = %d;\n", - plane_state->plane_size.grph.surface_size.x, - plane_state->plane_size.grph.surface_size.y, - plane_state->plane_size.grph.surface_size.width, - plane_state->plane_size.grph.surface_size.height, - plane_state->plane_size.grph.surface_pitch); + "plane_state->plane_size.surface_size.x = %d;\n" + "plane_state->plane_size.surface_size.y = %d;\n" + "plane_state->plane_size.surface_size.width = %d;\n" + "plane_state->plane_size.surface_size.height = %d;\n" + "plane_state->plane_size.surface_pitch = %d;\n", + plane_state->plane_size.surface_size.x, + plane_state->plane_size.surface_size.y, + plane_state->plane_size.surface_size.width, + plane_state->plane_size.surface_size.height, + plane_state->plane_size.surface_pitch); SURFACE_TRACE( @@ -202,20 +202,20 @@ void update_surface_trace( SURFACE_TRACE( "plane_info->color_space = %d;\n" "plane_info->format = %d;\n" - "plane_info->plane_size.grph.surface_pitch = %d;\n" - "plane_info->plane_size.grph.surface_size.height = %d;\n" - "plane_info->plane_size.grph.surface_size.width = %d;\n" - "plane_info->plane_size.grph.surface_size.x = %d;\n" - "plane_info->plane_size.grph.surface_size.y = %d;\n" + "plane_info->plane_size.surface_pitch = %d;\n" + "plane_info->plane_size.surface_size.height = %d;\n" + "plane_info->plane_size.surface_size.width = %d;\n" + "plane_info->plane_size.surface_size.x = %d;\n" + "plane_info->plane_size.surface_size.y = %d;\n" "plane_info->rotation = %d;\n" "plane_info->stereo_format = %d;\n", update->plane_info->color_space, update->plane_info->format, - update->plane_info->plane_size.grph.surface_pitch, - update->plane_info->plane_size.grph.surface_size.height, - update->plane_info->plane_size.grph.surface_size.width, - update->plane_info->plane_size.grph.surface_size.x, - update->plane_info->plane_size.grph.surface_size.y, + update->plane_info->plane_size.surface_pitch, + update->plane_info->plane_size.surface_size.height, + update->plane_info->plane_size.surface_size.width, + update->plane_info->plane_size.surface_size.x, + update->plane_info->plane_size.surface_size.y, update->plane_info->rotation, update->plane_info->stereo_format); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 489f6240f2ed..c28fca5c0a3b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -705,7 +705,7 @@ struct dc_plane_state { struct rect dst_rect; struct rect clip_rect; - union plane_size plane_size; + struct plane_size plane_size; union dc_tiling_info tiling_info; struct dc_plane_dcc_param dcc; @@ -754,7 +754,7 @@ struct dc_plane_state { }; struct dc_plane_info { - union plane_size plane_size; + struct plane_size plane_size; union dc_tiling_info tiling_info; struct dc_plane_dcc_param dcc; enum surface_pixel_format format; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 28a2cd2d2a49..929c4eadc1dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -115,42 +115,40 @@ struct rect { int height; }; -union plane_size { - /* Grph or Video will be selected - * based on format above: - * Use Video structure if - * format >= DalPixelFormat_VideoBegin - * else use Grph structure +struct plane_size { + /* Graphic surface pitch in pixels. + * In LINEAR_GENERAL mode, pitch + * is 32 pixel aligned. */ - struct { - struct rect surface_size; - /* Graphic surface pitch in pixels. - * In LINEAR_GENERAL mode, pitch - * is 32 pixel aligned. - */ - int surface_pitch; - } grph; + int surface_pitch; + int chroma_pitch; + struct rect surface_size; + struct rect chroma_size; - struct { - struct rect luma_size; - /* Graphic surface pitch in pixels. - * In LINEAR_GENERAL mode, pitch is - * 32 pixel aligned. - */ - int luma_pitch; + union { + struct { + struct rect surface_size; + int surface_pitch; + } grph; - struct rect chroma_size; - /* Graphic surface pitch in pixels. - * In LINEAR_GENERAL mode, pitch is - * 32 pixel aligned. - */ - int chroma_pitch; - } video; + struct { + struct rect luma_size; + int luma_pitch; + struct rect chroma_size; + int chroma_pitch; + } video; + }; }; struct dc_plane_dcc_param { bool enable; + int meta_pitch; + bool independent_64b_blks; + + int meta_pitch_c; + bool independent_64b_blks_c; + union { struct { int meta_pitch; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index a24a2bda8656..1488ffddf4e3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -391,10 +391,10 @@ static void program_tiling( static void program_size_and_rotation( struct dce_mem_input *dce_mi, enum dc_rotation_angle rotation, - const union plane_size *plane_size) + const struct plane_size *plane_size) { - const struct rect *in_rect = &plane_size->grph.surface_size; - struct rect hw_rect = plane_size->grph.surface_size; + const struct rect *in_rect = &plane_size->surface_size; + struct rect hw_rect = plane_size->surface_size; const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = { [ROTATION_ANGLE_0] = 0, [ROTATION_ANGLE_90] = 1, @@ -423,7 +423,7 @@ static void program_size_and_rotation( GRPH_Y_END, hw_rect.height); REG_SET(GRPH_PITCH, 0, - GRPH_PITCH, plane_size->grph.surface_pitch); + GRPH_PITCH, plane_size->surface_pitch); REG_SET(HW_ROTATION, 0, GRPH_ROTATION_ANGLE, rotation_angles[rotation]); @@ -505,7 +505,7 @@ static void dce_mi_program_surface_config( struct mem_input *mi, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c index 9b9fc3d96c07..d54172d88f5f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c @@ -229,26 +229,26 @@ static void program_tiling( static void program_size_and_rotation( struct dce_mem_input *mem_input110, enum dc_rotation_angle rotation, - const union plane_size *plane_size) + const struct plane_size *plane_size) { uint32_t value = 0; - union plane_size local_size = *plane_size; + struct plane_size local_size = *plane_size; if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) { - swap(local_size.video.luma_size.x, - local_size.video.luma_size.y); - swap(local_size.video.luma_size.width, - local_size.video.luma_size.height); - swap(local_size.video.chroma_size.x, - local_size.video.chroma_size.y); - swap(local_size.video.chroma_size.width, - local_size.video.chroma_size.height); + swap(local_size.surface_size.x, + local_size.surface_size.y); + swap(local_size.surface_size.width, + local_size.surface_size.height); + swap(local_size.chroma_size.x, + local_size.chroma_size.y); + swap(local_size.chroma_size.width, + local_size.chroma_size.height); } value = 0; - set_reg_field_value(value, local_size.video.luma_pitch, + set_reg_field_value(value, local_size.surface_pitch, UNP_GRPH_PITCH_L, GRPH_PITCH_L); dm_write_reg( @@ -257,7 +257,7 @@ static void program_size_and_rotation( value); value = 0; - set_reg_field_value(value, local_size.video.chroma_pitch, + set_reg_field_value(value, local_size.chroma_pitch, UNP_GRPH_PITCH_C, GRPH_PITCH_C); dm_write_reg( mem_input110->base.ctx, @@ -297,8 +297,8 @@ static void program_size_and_rotation( value); value = 0; - set_reg_field_value(value, local_size.video.luma_size.x + - local_size.video.luma_size.width, + set_reg_field_value(value, local_size.surface_size.x + + local_size.surface_size.width, UNP_GRPH_X_END_L, GRPH_X_END_L); dm_write_reg( mem_input110->base.ctx, @@ -306,8 +306,8 @@ static void program_size_and_rotation( value); value = 0; - set_reg_field_value(value, local_size.video.chroma_size.x + - local_size.video.chroma_size.width, + set_reg_field_value(value, local_size.chroma_size.x + + local_size.chroma_size.width, UNP_GRPH_X_END_C, GRPH_X_END_C); dm_write_reg( mem_input110->base.ctx, @@ -315,8 +315,8 @@ static void program_size_and_rotation( value); value = 0; - set_reg_field_value(value, local_size.video.luma_size.y + - local_size.video.luma_size.height, + set_reg_field_value(value, local_size.surface_size.y + + local_size.surface_size.height, UNP_GRPH_Y_END_L, GRPH_Y_END_L); dm_write_reg( mem_input110->base.ctx, @@ -324,8 +324,8 @@ static void program_size_and_rotation( value); value = 0; - set_reg_field_value(value, local_size.video.chroma_size.y + - local_size.video.chroma_size.height, + set_reg_field_value(value, local_size.chroma_size.y + + local_size.chroma_size.height, UNP_GRPH_Y_END_C, GRPH_Y_END_C); dm_write_reg( mem_input110->base.ctx, @@ -637,7 +637,7 @@ void dce_mem_input_v_program_surface_config( struct mem_input *mem_input, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizotal_mirror) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index a16128814d62..03f5aa10c4c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -163,7 +163,7 @@ void hubp1_program_tiling( void hubp1_program_size( struct hubp *hubp, enum surface_pixel_format format, - const union plane_size *plane_size, + const struct plane_size *plane_size, struct dc_plane_dcc_param *dcc) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); @@ -173,16 +173,16 @@ void hubp1_program_size( * 444 or 420 luma */ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END) { - ASSERT(plane_size->video.chroma_pitch != 0); + ASSERT(plane_size->chroma_pitch != 0); /* Chroma pitch zero can cause system hang! */ - pitch = plane_size->video.luma_pitch - 1; - meta_pitch = dcc->video.meta_pitch_l - 1; - pitch_c = plane_size->video.chroma_pitch - 1; - meta_pitch_c = dcc->video.meta_pitch_c - 1; + pitch = plane_size->surface_pitch - 1; + meta_pitch = dcc->meta_pitch - 1; + pitch_c = plane_size->chroma_pitch - 1; + meta_pitch_c = dcc->meta_pitch_c - 1; } else { - pitch = plane_size->grph.surface_pitch - 1; - meta_pitch = dcc->grph.meta_pitch - 1; + pitch = plane_size->surface_pitch - 1; + meta_pitch = dcc->meta_pitch - 1; pitch_c = 0; meta_pitch_c = 0; } @@ -526,13 +526,13 @@ void hubp1_program_surface_config( struct hubp *hubp, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror, unsigned int compat_level) { - hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks); + hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks); hubp1_program_tiling(hubp, tiling_info, format); hubp1_program_size(hubp, format, plane_size, dcc); hubp1_program_rotation(hubp, rotation, horizontal_mirror); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 8f4bcdc74116..344e446e337d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -685,7 +685,7 @@ void hubp1_program_surface_config( struct hubp *hubp, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror, @@ -707,7 +707,7 @@ void hubp1_program_pixel_format( void hubp1_program_size( struct hubp *hubp, enum surface_pixel_format format, - const union plane_size *plane_size, + const struct plane_size *plane_size, struct dc_plane_dcc_param *dcc); void hubp1_program_rotation( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 29db9b2d4412..e720be6be369 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2132,7 +2132,7 @@ void update_dchubp_dpp( struct hubp *hubp = pipe_ctx->plane_res.hubp; struct dpp *dpp = pipe_ctx->plane_res.dpp; struct dc_plane_state *plane_state = pipe_ctx->plane_state; - union plane_size size = plane_state->plane_size; + struct plane_size size = plane_state->plane_size; unsigned int compat_level = 0; /* depends on DML calculation, DPP clock value may change dynamically */ @@ -2178,7 +2178,7 @@ void update_dchubp_dpp( &pipe_ctx->ttu_regs); } - size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport; + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; if (plane_state->update_flags.bits.full_update || plane_state->update_flags.bits.bpp_change) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index a167f867cb72..487de87b03eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -397,7 +397,7 @@ static void hubp2_program_tiling( void hubp2_program_size( struct hubp *hubp, enum surface_pixel_format format, - const union plane_size *plane_size, + const struct plane_size *plane_size, struct dc_plane_dcc_param *dcc) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); @@ -410,16 +410,16 @@ void hubp2_program_size( use_pitch_c = format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN && format < SURFACE_PIXEL_FORMAT_SUBSAMPLE_END; if (use_pitch_c) { - ASSERT(plane_size->video.chroma_pitch != 0); + ASSERT(plane_size->chroma_pitch != 0); /* Chroma pitch zero can cause system hang! */ - pitch = plane_size->video.luma_pitch - 1; - meta_pitch = dcc->video.meta_pitch_l - 1; - pitch_c = plane_size->video.chroma_pitch - 1; - meta_pitch_c = dcc->video.meta_pitch_c - 1; + pitch = plane_size->surface_pitch - 1; + meta_pitch = dcc->meta_pitch - 1; + pitch_c = plane_size->chroma_pitch - 1; + meta_pitch_c = dcc->meta_pitch_c - 1; } else { - pitch = plane_size->grph.surface_pitch - 1; - meta_pitch = dcc->grph.meta_pitch - 1; + pitch = plane_size->surface_pitch - 1; + meta_pitch = dcc->meta_pitch - 1; pitch_c = 0; meta_pitch_c = 0; } @@ -592,7 +592,7 @@ void hubp2_program_surface_config( struct hubp *hubp, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror, @@ -600,7 +600,7 @@ void hubp2_program_surface_config( { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - hubp2_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks); + hubp2_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks); hubp2_program_tiling(hubp2, tiling_info, format); hubp2_program_size(hubp, format, plane_size, dcc); hubp2_program_rotation(hubp, rotation, horizontal_mirror); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index c8418235e154..1c53af4811e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -273,7 +273,7 @@ void hubp2_dcc_control(struct hubp *hubp, bool enable, void hubp2_program_size( struct hubp *hubp, enum surface_pixel_format format, - const union plane_size *plane_size, + const struct plane_size *plane_size, struct dc_plane_dcc_param *dcc); void hubp2_program_rotation( @@ -289,7 +289,7 @@ void hubp2_program_surface_config( struct hubp *hubp, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 8b338275624e..20295f61b61b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1822,13 +1822,13 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height; pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height; if (pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { - pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.video.luma_pitch; - pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.video.chroma_pitch; - pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.video.meta_pitch_l; - pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.video.meta_pitch_c; + pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; + pipes[pipe_cnt].pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; + pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; + pipes[pipe_cnt].pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; } else { - pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.grph.surface_pitch; - pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.grph.meta_pitch; + pipes[pipe_cnt].pipe.src.data_pitch = pln->plane_size.surface_pitch; + pipes[pipe_cnt].pipe.src.meta_pitch = pln->dcc.meta_pitch; } pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable; pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 51bff8717cc9..61cd4f8752c3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -103,7 +103,7 @@ struct hubp_funcs { struct hubp *hubp, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index da89c2edb07c..7193acfcd779 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -149,7 +149,7 @@ struct mem_input_funcs { struct mem_input *mem_input, enum surface_pixel_format format, union dc_tiling_info *tiling_info, - union plane_size *plane_size, + struct plane_size *plane_size, enum dc_rotation_angle rotation, struct dc_plane_dcc_param *dcc, bool horizontal_mirror); -- cgit v1.2.3 From 6da16270ee026a0397a67f5e2b4661b6dfa45afb Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Wed, 3 Jul 2019 16:52:38 -0400 Subject: drm/amd/display: populate last calculated bb state with max clocks [why] update_bounding_box calculates intermediate bb states based on clock relationship however, the last state doesn't need to maintain a minimum relationship, but should actually contain maximum of every clock. otherwise maximum clocks are not usable [how] once the calculated bb is built, override the last state with max values Signed-off-by: Jun Lei Reviewed-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 20295f61b61b..346a9b9e7624 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2751,6 +2751,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ num_calculated_states++; } + calculated_states[num_calculated_states - 1].socclk_mhz = max_clocks->socClockInKhz / 1000; + calculated_states[num_calculated_states - 1].fabricclk_mhz = max_clocks->socClockInKhz / 1000; + calculated_states[num_calculated_states - 1].dcfclk_mhz = max_clocks->dcfClockInKhz / 1000; + memcpy(bb->clock_limits, calculated_states, sizeof(bb->clock_limits)); bb->num_states = num_calculated_states; -- cgit v1.2.3 From 8897810adfa768b0c127c1e204bedc4449e3f945 Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Tue, 25 Jun 2019 14:55:53 -0400 Subject: drm/amd/display: Fix dc_create failure handling and 666 color depths [Why] It is possible (but very unlikely) that constructing dc fails before current_state is created. We support 666 color depth in some scenarios, but this isn't handled in get_norm_pix_clk. It uses exactly the same pixel clock as the 888 case. [How] Check for non null current_state before destructing. Add case for 666 color depth to get_norm_pix_clk to avoid assertion. Signed-off-by: Julian Parkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 6 ++++-- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4b1d314f2939..d6a0a08f5cda 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -502,8 +502,10 @@ void dc_stream_set_static_screen_events(struct dc *dc, static void destruct(struct dc *dc) { - dc_release_state(dc->current_state); - dc->current_state = NULL; + if (dc->current_state) { + dc_release_state(dc->current_state); + dc->current_state = NULL; + } destroy_links(dc); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index e4d0035b4ed4..6f9d310de542 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1855,6 +1855,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) pix_clk /= 2; if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) { switch (timing->display_color_depth) { + case COLOR_DEPTH_666: case COLOR_DEPTH_888: normalized_pix_clk = pix_clk; break; -- cgit v1.2.3 From ce08aad3896f342d6c99e8e2dc1f71a0926efafc Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 4 Jul 2019 15:17:42 -0400 Subject: drm/amd/display: Only enable audio if speaker allocation exists [Why] In dm_helpers_parse_edid_caps, there is a corner case where no speakers can be allocated even though the audio mode count is greater than 0. Enabling audio when no speaker allocations exists can cause issues in the video stream. [How] Add a check to not enable audio unless one or more speaker allocations exist (since doing this can cause issues in the video stream). Signed-off-by: Alvin Lee Reviewed-by: Jun Lei Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 6f9d310de542..c227b86420a0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2002,7 +2002,7 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count) { + stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); -- cgit v1.2.3 From f591344e89dca887afeb5bbe21fa199687fd4a5f Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Wed, 3 Jul 2019 13:59:26 -0400 Subject: drm/amd/display: Clean up dynamic metadata logic [Why] Code to enable DCN20 dynamic metadata feature is duplicated in two places and was added to DCE110 enable stream. [How] Create DCN20 specific enable stream function for clarity, and add a hardware sequencer function to program dynamic metadata to avoid the duplicate code. Signed-off-by: Julian Parkin Reviewed-by: Charlene Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 27 +++----- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 24 +------ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 74 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 1 + 4 files changed, 86 insertions(+), 40 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 352862370390..bf1d7bb90e0f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -566,6 +566,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream, return ret; } + #if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream) { @@ -597,6 +598,14 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, struct hubp *hubp; int i; + /* Dynamic metadata is only supported on HDMI or DP */ + if (!dc_is_hdmi_signal(stream->signal) && !dc_is_dp_signal(stream->signal)) + return false; + + /* Check hardware support */ + if (!dc->hwss.program_dmdata_engine) + return false; + for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream == stream) @@ -612,23 +621,7 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, pipe_ctx->stream->dmdata_address = attr->address; - if (pipe_ctx->stream_res.stream_enc && - pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) { - if (pipe_ctx->stream->dmdata_address.quad_part != 0) { - /* if using dynamic meta, don't set up generic infopackets */ - pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; - pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata( - pipe_ctx->stream_res.stream_enc, - true, pipe_ctx->plane_res.hubp->inst, - dc_is_dp_signal(pipe_ctx->stream->signal) ? - dmdata_dp : dmdata_hdmi); - } else - pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata( - pipe_ctx->stream_res.stream_enc, - false, pipe_ctx->plane_res.hubp->inst, - dc_is_dp_signal(pipe_ctx->stream->signal) ? - dmdata_dp : dmdata_hdmi); - } + dc->hwss.program_dmdata_engine(pipe_ctx); if (hubp->funcs->dmdata_set_attributes != NULL && pipe_ctx->stream->dmdata_address.quad_part != 0) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 41f5ef6c5085..ddc5add503ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -667,29 +667,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - /* update AVI info frame (HDMI, DP)*/ - /* TODO: FPGA may change to hwss.update_info_frame */ - -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) - if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL && - pipe_ctx->plane_res.hubp != NULL) { - if (pipe_ctx->stream->dmdata_address.quad_part != 0) { - /* if using dynamic meta, don't set up generic infopackets */ - pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; - pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata( - pipe_ctx->stream_res.stream_enc, - true, pipe_ctx->plane_res.hubp->inst, - dc_is_dp_signal(pipe_ctx->stream->signal) ? - dmdata_dp : dmdata_hdmi); - } else - pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata( - pipe_ctx->stream_res.stream_enc, - false, pipe_ctx->plane_res.hubp->inst, - dc_is_dp_signal(pipe_ctx->stream->signal) ? - dmdata_dp : dmdata_hdmi); - } -#endif - dce110_update_info_frame(pipe_ctx); + link->dc->hwss.update_info_frame(pipe_ctx); /* enable early control to avoid corruption on DP monitor*/ active_total_with_borders = diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 566cd4cdfef4..76ce622e1421 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2056,6 +2056,78 @@ static void dcn20_set_flip_control_gsl( } +static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) +{ + enum dc_lane_count lane_count = + pipe_ctx->stream->link->cur_link_settings.lane_count; + + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + struct dc_link *link = pipe_ctx->stream->link; + + uint32_t active_total_with_borders; + uint32_t early_control = 0; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + + /* For MST, there are multiply stream go to only one link. + * connect DIG back_end to front_end while enable_stream and + * disconnect them during disable_stream + * BY this, it is logic clean to separate stream and link + */ + link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + + if (link->dc->hwss.program_dmdata_engine) + link->dc->hwss.program_dmdata_engine(pipe_ctx); + + link->dc->hwss.update_info_frame(pipe_ctx); + + /* enable early control to avoid corruption on DP monitor*/ + active_total_with_borders = + timing->h_addressable + + timing->h_border_left + + timing->h_border_right; + + if (lane_count != 0) + early_control = active_total_with_borders % lane_count; + + if (early_control == 0) + early_control = lane_count; + + tg->funcs->set_early_control(tg, early_control); + + /* enable audio only within mode set */ + if (pipe_ctx->stream_res.audio != NULL) { + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc); + } +} + +static void dcn20_program_dmdata_engine(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct hubp *hubp = pipe_ctx->plane_res.hubp; + bool enable = false; + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + enum dynamic_metadata_mode mode = dc_is_dp_signal(stream->signal) + ? dmdata_dp + : dmdata_hdmi; + + /* if using dynamic meta, don't set up generic infopackets */ + if (pipe_ctx->stream->dmdata_address.quad_part != 0) { + pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false; + enable = true; + } + + if (!hubp) + return; + + if (!stream_enc || !stream_enc->funcs->set_dynamic_metadata) + return; + + stream_enc->funcs->set_dynamic_metadata(stream_enc, enable, + hubp->inst, mode); +} + void dcn20_hw_sequencer_construct(struct dc *dc) { dcn10_hw_sequencer_construct(dc); @@ -2080,6 +2152,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss.update_odm = dcn20_update_odm; dc->hwss.blank_pixel_data = dcn20_blank_pixel_data; dc->hwss.dmdata_status_done = dcn20_dmdata_status_done; + dc->hwss.program_dmdata_engine = dcn20_program_dmdata_engine; + dc->hwss.enable_stream = dcn20_enable_stream; dc->hwss.disable_stream = dcn20_disable_stream; dc->hwss.init_sys_ctx = dcn20_init_sys_ctx; dc->hwss.init_vm_ctx = dcn20_init_vm_ctx; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 36be08adae05..28645e10f854 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -231,6 +231,7 @@ struct hw_sequencer_funcs { bool (*update_bandwidth)( struct dc *dc, struct dc_state *context); + void (*program_dmdata_engine)(struct pipe_ctx *pipe_ctx); bool (*dmdata_status_done)(struct pipe_ctx *pipe_ctx); #endif -- cgit v1.2.3 From 3f52aa9f9f6f611d19035961fba0f8343bcb3e6c Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 3 Jul 2019 10:02:39 -0400 Subject: drm/amd/display: Set enabled to false at start of audio disable [Why] In an effort to stop redundant calls to dce110_disable_audio_stream the audio->enabled flag was added to the audio resource struct. While this state probably shouldn't have been tracked on the audio struct itself it still works fine for some sequences. However, it does not work for cases where we're freeing the audio resource (such as hotplugs) or when dynamic audio is enabled. In these cases the pipe_ctx->stream_res.audio = NULL before we can set audio->enabled = false. The next time we acquire the audio resource such as on hotplug the audio will not be enabled for the stream since DC thinks it's still enabled. Audio state tracking should cover this sequence. [How] Set audio->enabled = false at the start as long as we have pipe_ctx->stream_res.audio. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Zhan Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index ddc5add503ec..9c50f09233bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -999,6 +999,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { + pipe_ctx->stream_res.audio->enabled = false; + if (dc->res_pool->pp_smu) pp_smu = dc->res_pool->pp_smu; @@ -1029,8 +1031,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, * stream->stream_engine_id); */ - if (pipe_ctx->stream_res.audio) - pipe_ctx->stream_res.audio->enabled = false; } } -- cgit v1.2.3 From ba94b08d0ba008c9f162e1df3ed0dc52194d8ccb Mon Sep 17 00:00:00 2001 From: Zhan Liu Date: Tue, 2 Jul 2019 15:17:07 -0400 Subject: drm/amd/display: drop ASSERT() if eDP panel is not connected [Why] For boards that support eDP but do not have a physical eDP display connected an ASSERT will be thrown. This is not a critical failure and shouldn't be treated as such. [How] Drop the assertion. Signed-off-by: Zhan Liu Reviewed-by: Nicholas Kazlauskas Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 520d014a4ed8..2dd585b9b540 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -554,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) msleep(8); } - ASSERT(status == DC_OK); - // Read DPCD 00100h to find if standard link rates are set core_link_read_dpcd(link, DP_LINK_BW_SET, &link_bw_set, sizeof(link_bw_set)); -- cgit v1.2.3 From f53fc02b9ac38ad096842bea7880e6c066e01814 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 7 Jul 2019 21:25:35 -0400 Subject: drm/amd/display: 3.2.42 Signed-off-by: Aric Cyr Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c28fca5c0a3b..b12eee93c253 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.41" +#define DC_VER "3.2.42" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3 From 1b1ff454bd66f9b3117aafe3c39df0b3efecd629 Mon Sep 17 00:00:00 2001 From: Tai Man Date: Fri, 28 Jun 2019 11:40:38 -0400 Subject: drm/amd/display: Increase size of audios array [Why] The audios array defined in "struct resource_pool" is only 6 (MAX_PIPES) but the max number of audio devices (num_audio) is 7. In some projects, it will run out of audios array. [How] Incraese the audios array size to 7. Signed-off-by: Tai Man Reviewed-by: Joshua Aberback Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c89393c19232..a148ffde8b12 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -212,7 +212,7 @@ struct resource_pool { struct clock_source *clock_sources[MAX_CLOCK_SOURCES]; unsigned int clk_src_count; - struct audio *audios[MAX_PIPES]; + struct audio *audios[MAX_AUDIOS]; unsigned int audio_count; struct audio_support audio_support; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 8759ec03aede..f82365e2d03c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -34,6 +34,7 @@ * Data types shared between different Virtual HW blocks ******************************************************************************/ +#define MAX_AUDIOS 7 #define MAX_PIPES 6 #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #define MAX_DWB_PIPES 1 -- cgit v1.2.3 From dcbb45b6eeedfbc3a087f4fa375c706adabd1ce0 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Sat, 29 Jun 2019 14:38:04 -0400 Subject: drm/amd/display: do not read link setting if edp not connected [Why] Previously assume eDP sink present if connector present. Do not need to enforce this restriction. Fix issue where driver attempt to read link setting even though no edp connected. {How] Only read link setting after reading connection status. Signed-off-by: Eric Yang Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 2dd585b9b540..193d6f14e684 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -722,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) return false; } - if (link->connector_signal == SIGNAL_TYPE_EDP) { - /* On detect, we want to make sure current link settings are - * up to date, especially if link was powered on by GOP. - */ - read_edp_current_link_settings_on_detect(link); - } - prev_sink = link->local_sink; if (prev_sink != NULL) { dc_sink_retain(prev_sink); @@ -770,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { + read_edp_current_link_settings_on_detect(link); detect_edp_sink_caps(link); sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; -- cgit v1.2.3 From 0488a56465877a48596a874bc0f4f31d6fbf334e Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Sat, 29 Jun 2019 16:02:37 -0400 Subject: drm/amd/display: fix mpcc assert condition [Why] In DCN2x asic, the MPCC status register definition changed, and our logic for assert is incorrect. disabled is valid state, where we should see idle and not busy, where as in not disabled state, we should see not idle. [How] Change assert condition to be more sensible. Signed-off-by: Eric Yang Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index f4d3008e5efa..67f0128f0b38 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -446,23 +446,22 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) { struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); unsigned int top_sel, mpc_busy, mpc_idle, mpc_disabled; - REG_GET(MPCC_STATUS[mpcc_id], MPCC_DISABLED, &mpc_disabled); - - if (mpc_disabled) { - ASSERT(0); - return; - } REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel); - if (top_sel == 0xf) { - REG_GET_2(MPCC_STATUS[mpcc_id], - MPCC_BUSY, &mpc_busy, - MPCC_IDLE, &mpc_idle); + REG_GET_3(MPCC_STATUS[mpcc_id], + MPCC_BUSY, &mpc_busy, + MPCC_IDLE, &mpc_idle, + MPCC_DISABLED, &mpc_disabled); - ASSERT(mpc_busy == 0); - ASSERT(mpc_idle == 1); + if (top_sel == 0xf) { + ASSERT(!mpc_busy); + ASSERT(mpc_idle); + ASSERT(mpc_disabled); + } else { + ASSERT(!mpc_disabled); + ASSERT(!mpc_idle); } } -- cgit v1.2.3 From 057fc695e934a77bae0c6c7f3be01251774b61cf Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 8 Jul 2019 15:15:42 -0400 Subject: drm/amd/display: support "dummy pstate" [why] Existing support in DC for pstate only accounts for a single latency. This is sufficient when the variance of latency is small, or that pstate support isn't necessary for correct ASIC functionality. Newer ASICs violate both existing assumptions. PState support is mandatory of correct ASIC functionality, but not all latencies have to be supported. Existing code supports a "full p state" which allows memory clock to change, but is hard for DCN to support (as it requires very large buffers). New code will now fall back to a "dummy p state" support when "full p state" cannot be support. This easy p state support should always be allowed. [how] Define a new latency in socBB. Add fallback logic to support it. Note DML is also updated to ensure that fallback will always work. Signed-off-by: Jun Lei Reviewed-by: Dmytro Laktyushkin Acked-by: Leo Li Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 + drivers/gpu/drm/amd/display/dc/dc.h | 7 + .../gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 11 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 10 +- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 59 +- drivers/gpu/drm/amd/display/dc/dml/Makefile | 3 + .../display/dc/dml/dcn20/display_mode_vba_20v2.c | 5109 ++++++++++++++++++++ .../display/dc/dml/dcn20/display_mode_vba_20v2.h | 32 + .../dc/dml/dcn20/display_rq_dlg_calc_20v2.c | 1701 +++++++ .../dc/dml/dcn20/display_rq_dlg_calc_20v2.h | 74 + .../gpu/drm/amd/display/dc/dml/display_mode_lib.c | 12 + .../gpu/drm/amd/display/dc/dml/display_mode_lib.h | 1 + .../drm/amd/display/dc/dml/display_mode_structs.h | 1 + .../gpu/drm/amd/display/dc/dml/display_mode_vba.c | 8 +- 14 files changed, 7022 insertions(+), 8 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h create mode 100644 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 3cff4f0518d3..7ff0396956b3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -201,6 +201,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; if (pp_smu && pp_smu->set_pstate_handshake_support) pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support); @@ -308,6 +309,7 @@ void dcn2_init_clocks(struct clk_mgr *clk_mgr) memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; } void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index b12eee93c253..6da0a6fe2973 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -121,6 +121,7 @@ struct dc_caps { struct dc_bug_wa { bool no_connect_phy_config; bool dedcn20_305_wa; + struct display_mode_lib alternate_dml; }; #endif @@ -263,6 +264,12 @@ struct dc_clocks { int phyclk_khz; int dramclk_khz; bool p_state_change_support; + + /* + * Elements below are not compared for the purposes of + * optimization required + */ + bool prev_p_state_change_support; }; struct dc_bw_validation_profile { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 6e2dbd03f9bf..31d6e79ba2b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -26,6 +26,7 @@ #include "dcn20_hubbub.h" #include "reg_helper.h" +#include "clk_mgr.h" #define REG(reg)\ hubbub1->regs->reg @@ -553,6 +554,16 @@ static void hubbub2_program_watermarks( */ hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); + + /* + * There's a special case when going from p-state support to p-state unsupported + * here we are going to LOWER watermarks to go to dummy p-state only, but this has + * to be done prepare_bandwidth, not optimize + */ + if (hubbub1->base.ctx->dc->clk_mgr->clks.prev_p_state_change_support == true && + hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false) + safe_to_lower = true; + hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 76ce622e1421..b61774d2e8b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1443,16 +1443,16 @@ void dcn20_prepare_bandwidth( { struct hubbub *hubbub = dc->res_pool->hubbub; + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); + /* program dchubbub watermarks */ hubbub->funcs->program_watermarks(hubbub, &context->bw_ctx.bw.dcn.watermarks, dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000, false); - - dc->clk_mgr->funcs->update_clocks( - dc->clk_mgr, - context, - false); } void dcn20_optimize_bandwidth( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 346a9b9e7624..ad015290e17b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2427,7 +2427,7 @@ void dcn20_calculate_dlg_params( } } -bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, +static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, bool fast_validate) { bool out = false; @@ -2479,6 +2479,62 @@ validate_out: return out; } + +bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, + bool fast_validate) +{ + bool voltage_supported = false; + bool full_pstate_supported = false; + bool dummy_pstate_supported = false; + double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; + + if (fast_validate) + return dcn20_validate_bandwidth_internal(dc, context, true); + + + // Best case, we support full UCLK switch latency + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); + full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; + + if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || + (voltage_supported && full_pstate_supported)) { + context->bw_ctx.bw.dcn.clk.p_state_change_support = true; + goto restore_dml_state; + } + + // Fallback #1: Try to only support G6 temperature read latency + context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; + + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); + dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; + + if (voltage_supported && dummy_pstate_supported) { + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + goto restore_dml_state; + } + + // Fallback #2: Retry with "new" DCN20 to support G6 temperature read latency + memcpy (&context->bw_ctx.dml, &dc->work_arounds.alternate_dml, sizeof (struct display_mode_lib)); + context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; + + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); + dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; + + if (voltage_supported && dummy_pstate_supported) { + context->bw_ctx.bw.dcn.clk.p_state_change_support = false; + goto restore_dml_state; + } + + // ERROR: fallback #2 is supposed to always work. + ASSERT(false); + +restore_dml_state: + memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); + context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; + + return voltage_supported; +} + struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer( struct dc_state *state, const struct resource_pool *pool, @@ -3085,6 +3141,7 @@ static bool construct( } dml_init_instance(&dc->dml, &dcn2_0_soc, &dcn2_0_ip, DML_PROJECT_NAVI10); + dml_init_instance(&dc->work_arounds.alternate_dml, &dcn2_0_soc, &dcn2_0_ip, DML_PROJECT_NAVI10v2); if (!dc->debug.disable_pplib_wm_range) { struct pp_smu_wm_range_sets ranges = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 0bb7a20675c4..1735fc1e2eb1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -38,6 +38,8 @@ ifdef CONFIG_DRM_AMD_DC_DCN2_0 CFLAGS_display_mode_vba.o := $(dml_ccflags) CFLAGS_display_mode_vba_20.o := $(dml_ccflags) CFLAGS_display_rq_dlg_calc_20.o := $(dml_ccflags) +CFLAGS_display_mode_vba_20v2.o := $(dml_ccflags) +CFLAGS_display_rq_dlg_calc_20v2.o := $(dml_ccflags) endif ifdef CONFIG_DRM_AMD_DCN3AG CFLAGS_display_mode_vba_3ag.o := $(dml_ccflags) @@ -51,6 +53,7 @@ DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ ifdef CONFIG_DRM_AMD_DC_DCN2_0 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o +DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c new file mode 100644 index 000000000000..22455db54980 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -0,0 +1,5109 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../display_mode_lib.h" +#include "display_mode_vba_20v2.h" +#include "../dml_inline_defs.h" + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +#define BPP_INVALID 0 +#define BPP_BLENDED_PIPE 0xffffffff + +static double adjust_ReturnBW( + struct display_mode_lib *mode_lib, + double ReturnBW, + bool DCCEnabledAnyPlane, + double ReturnBandwidthToDCN); +static unsigned int dscceComputeDelay( + unsigned int bpc, + double bpp, + unsigned int sliceWidth, + unsigned int numSlices, + enum output_format_class pixelFormat); +static unsigned int dscComputeDelay(enum output_format_class pixelFormat); +static bool CalculateDelayAfterScaler( + struct display_mode_lib *mode_lib, + double ReturnBW, + double ReadBandwidthPlaneLuma, + double ReadBandwidthPlaneChroma, + double TotalDataReadBandwidth, + double DisplayPipeLineDeliveryTimeLuma, + double DisplayPipeLineDeliveryTimeChroma, + double DPPCLK, + double DISPCLK, + double PixelClock, + unsigned int DSCDelay, + unsigned int DPPPerPlane, + bool ScalerEnabled, + unsigned int NumberOfCursors, + double DPPCLKDelaySubtotal, + double DPPCLKDelaySCL, + double DPPCLKDelaySCLLBOnly, + double DPPCLKDelayCNVCFormater, + double DPPCLKDelayCNVCCursor, + double DISPCLKDelaySubtotal, + unsigned int ScalerRecoutWidth, + enum output_format_class OutputFormat, + unsigned int HTotal, + unsigned int SwathWidthSingleDPPY, + double BytePerPixelDETY, + double BytePerPixelDETC, + unsigned int SwathHeightY, + unsigned int SwathHeightC, + bool Interlace, + bool ProgressiveToInterlaceUnitInOPP, + double *DSTXAfterScaler, + double *DSTYAfterScaler + ); +// Super monster function with some 45 argument +static bool CalculatePrefetchSchedule( + struct display_mode_lib *mode_lib, + double DPPCLK, + double DISPCLK, + double PixelClock, + double DCFCLKDeepSleep, + unsigned int DPPPerPlane, + unsigned int NumberOfCursors, + unsigned int VBlank, + unsigned int HTotal, + unsigned int MaxInterDCNTileRepeaters, + unsigned int VStartup, + unsigned int PageTableLevels, + bool GPUVMEnable, + bool DynamicMetadataEnable, + unsigned int DynamicMetadataLinesBeforeActiveRequired, + unsigned int DynamicMetadataTransmittedBytes, + bool DCCEnable, + double UrgentLatencyPixelDataOnly, + double UrgentExtraLatency, + double TCalc, + unsigned int PDEAndMetaPTEBytesFrame, + unsigned int MetaRowByte, + unsigned int PixelPTEBytesPerRow, + double PrefetchSourceLinesY, + unsigned int SwathWidthY, + double BytePerPixelDETY, + double VInitPreFillY, + unsigned int MaxNumSwathY, + double PrefetchSourceLinesC, + double BytePerPixelDETC, + double VInitPreFillC, + unsigned int MaxNumSwathC, + unsigned int SwathHeightY, + unsigned int SwathHeightC, + double TWait, + bool XFCEnabled, + double XFCRemoteSurfaceFlipDelay, + bool InterlaceEnable, + bool ProgressiveToInterlaceUnitInOPP, + double DSTXAfterScaler, + double DSTYAfterScaler, + double *DestinationLinesForPrefetch, + double *PrefetchBandwidth, + double *DestinationLinesToRequestVMInVBlank, + double *DestinationLinesToRequestRowInVBlank, + double *VRatioPrefetchY, + double *VRatioPrefetchC, + double *RequiredPrefetchPixDataBW, + double *Tno_bw, + unsigned int *VUpdateOffsetPix, + double *VUpdateWidthPix, + double *VReadyOffsetPix); +static double RoundToDFSGranularityUp(double Clock, double VCOSpeed); +static double RoundToDFSGranularityDown(double Clock, double VCOSpeed); +static double CalculatePrefetchSourceLines( + struct display_mode_lib *mode_lib, + double VRatio, + double vtaps, + bool Interlace, + bool ProgressiveToInterlaceUnitInOPP, + unsigned int SwathHeight, + unsigned int ViewportYStart, + double *VInitPreFill, + unsigned int *MaxNumSwath); +static unsigned int CalculateVMAndRowBytes( + struct display_mode_lib *mode_lib, + bool DCCEnable, + unsigned int BlockHeight256Bytes, + unsigned int BlockWidth256Bytes, + enum source_format_class SourcePixelFormat, + unsigned int SurfaceTiling, + unsigned int BytePerPixel, + enum scan_direction_class ScanDirection, + unsigned int ViewportWidth, + unsigned int ViewportHeight, + unsigned int SwathWidthY, + bool GPUVMEnable, + unsigned int VMMPageSize, + unsigned int PTEBufferSizeInRequestsLuma, + unsigned int PDEProcessingBufIn64KBReqs, + unsigned int Pitch, + unsigned int DCCMetaPitch, + unsigned int *MacroTileWidth, + unsigned int *MetaRowByte, + unsigned int *PixelPTEBytesPerRow, + bool *PTEBufferSizeNotExceeded, + unsigned int *dpte_row_height, + unsigned int *meta_row_height); +static double CalculateTWait( + unsigned int PrefetchMode, + double DRAMClockChangeLatency, + double UrgentLatencyPixelDataOnly, + double SREnterPlusExitTime); +static double CalculateRemoteSurfaceFlipDelay( + struct display_mode_lib *mode_lib, + double VRatio, + double SwathWidth, + double Bpp, + double LineTime, + double XFCTSlvVupdateOffset, + double XFCTSlvVupdateWidth, + double XFCTSlvVreadyOffset, + double XFCXBUFLatencyTolerance, + double XFCFillBWOverhead, + double XFCSlvChunkSize, + double XFCBusTransportTime, + double TCalc, + double TWait, + double *SrcActiveDrainRate, + double *TInitXFill, + double *TslvChk); +static void CalculateActiveRowBandwidth( + bool GPUVMEnable, + enum source_format_class SourcePixelFormat, + double VRatio, + bool DCCEnable, + double LineTime, + unsigned int MetaRowByteLuma, + unsigned int MetaRowByteChroma, + unsigned int meta_row_height_luma, + unsigned int meta_row_height_chroma, + unsigned int PixelPTEBytesPerRowLuma, + unsigned int PixelPTEBytesPerRowChroma, + unsigned int dpte_row_height_luma, + unsigned int dpte_row_height_chroma, + double *meta_row_bw, + double *dpte_row_bw, + double *qual_row_bw); +static void CalculateFlipSchedule( + struct display_mode_lib *mode_lib, + double UrgentExtraLatency, + double UrgentLatencyPixelDataOnly, + unsigned int GPUVMMaxPageTableLevels, + bool GPUVMEnable, + double BandwidthAvailableForImmediateFlip, + unsigned int TotImmediateFlipBytes, + enum source_format_class SourcePixelFormat, + unsigned int ImmediateFlipBytes, + double LineTime, + double VRatio, + double Tno_bw, + double PDEAndMetaPTEBytesFrame, + unsigned int MetaRowByte, + unsigned int PixelPTEBytesPerRow, + bool DCCEnable, + unsigned int dpte_row_height, + unsigned int meta_row_height, + double qual_row_bw, + double *DestinationLinesToRequestVMInImmediateFlip, + double *DestinationLinesToRequestRowInImmediateFlip, + double *final_flip_bw, + bool *ImmediateFlipSupportedForPipe); +static double CalculateWriteBackDelay( + enum source_format_class WritebackPixelFormat, + double WritebackHRatio, + double WritebackVRatio, + unsigned int WritebackLumaHTaps, + unsigned int WritebackLumaVTaps, + unsigned int WritebackChromaHTaps, + unsigned int WritebackChromaVTaps, + unsigned int WritebackDestinationWidth); + +static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib); +static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( + struct display_mode_lib *mode_lib); + +void dml20v2_recalculate(struct display_mode_lib *mode_lib) +{ + ModeSupportAndSystemConfiguration(mode_lib); + mode_lib->vba.FabricAndDRAMBandwidth = dml_min( + mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels * mode_lib->vba.DRAMChannelWidth, + mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000.0; + PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib); + dml20v2_DisplayPipeConfiguration(mode_lib); + dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib); +} + +static double adjust_ReturnBW( + struct display_mode_lib *mode_lib, + double ReturnBW, + bool DCCEnabledAnyPlane, + double ReturnBandwidthToDCN) +{ + double CriticalCompression; + + if (DCCEnabledAnyPlane + && ReturnBandwidthToDCN + > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0) + ReturnBW = + dml_min( + ReturnBW, + ReturnBandwidthToDCN * 4 + * (1.0 + - mode_lib->vba.UrgentLatencyPixelDataOnly + / ((mode_lib->vba.ROBBufferSizeInKByte + - mode_lib->vba.PixelChunkSizeInKByte) + * 1024 + / ReturnBandwidthToDCN + - mode_lib->vba.DCFCLK + * mode_lib->vba.ReturnBusWidth + / 4) + + mode_lib->vba.UrgentLatencyPixelDataOnly)); + + CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK + * mode_lib->vba.UrgentLatencyPixelDataOnly + / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatencyPixelDataOnly + + (mode_lib->vba.ROBBufferSizeInKByte + - mode_lib->vba.PixelChunkSizeInKByte) + * 1024); + + if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0) + ReturnBW = + dml_min( + ReturnBW, + 4.0 * ReturnBandwidthToDCN + * (mode_lib->vba.ROBBufferSizeInKByte + - mode_lib->vba.PixelChunkSizeInKByte) + * 1024 + * mode_lib->vba.ReturnBusWidth + * mode_lib->vba.DCFCLK + * mode_lib->vba.UrgentLatencyPixelDataOnly + / dml_pow( + (ReturnBandwidthToDCN + * mode_lib->vba.UrgentLatencyPixelDataOnly + + (mode_lib->vba.ROBBufferSizeInKByte + - mode_lib->vba.PixelChunkSizeInKByte) + * 1024), + 2)); + + return ReturnBW; +} + +static unsigned int dscceComputeDelay( + unsigned int bpc, + double bpp, + unsigned int sliceWidth, + unsigned int numSlices, + enum output_format_class pixelFormat) +{ + // valid bpc = source bits per component in the set of {8, 10, 12} + // valid bpp = increments of 1/16 of a bit + // min = 6/7/8 in N420/N422/444, respectively + // max = such that compression is 1:1 + //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode) + //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4} + //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420} + + // fixed value + unsigned int rcModelSize = 8192; + + // N422/N420 operate at 2 pixels per clock + unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l, + Delay, pixels; + + if (pixelFormat == dm_n422 || pixelFormat == dm_420) + pixelsPerClock = 2; + // #all other modes operate at 1 pixel per clock + else + pixelsPerClock = 1; + + //initial transmit delay as per PPS + initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock); + + //compute ssm delay + if (bpc == 8) + D = 81; + else if (bpc == 10) + D = 89; + else + D = 113; + + //divide by pixel per cycle to compute slice width as seen by DSC + w = sliceWidth / pixelsPerClock; + + //422 mode has an additional cycle of delay + if (pixelFormat == dm_s422) + s = 1; + else + s = 0; + + //main calculation for the dscce + ix = initalXmitDelay + 45; + wx = (w + 2) / 3; + p = 3 * wx - w; + l0 = ix / w; + a = ix + p * l0; + ax = (a + 2) / 3 + D + 6 + 1; + l = (ax + wx - 1) / wx; + if ((ix % w) == 0 && p != 0) + lstall = 1; + else + lstall = 0; + Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22; + + //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels + pixels = Delay * 3 * pixelsPerClock; + return pixels; +} + +static unsigned int dscComputeDelay(enum output_format_class pixelFormat) +{ + unsigned int Delay = 0; + + if (pixelFormat == dm_420) { + // sfr + Delay = Delay + 2; + // dsccif + Delay = Delay + 0; + // dscc - input deserializer + Delay = Delay + 3; + // dscc gets pixels every other cycle + Delay = Delay + 2; + // dscc - input cdc fifo + Delay = Delay + 12; + // dscc gets pixels every other cycle + Delay = Delay + 13; + // dscc - cdc uncertainty + Delay = Delay + 2; + // dscc - output cdc fifo + Delay = Delay + 7; + // dscc gets pixels every other cycle + Delay = Delay + 3; + // dscc - cdc uncertainty + Delay = Delay + 2; + // dscc - output serializer + Delay = Delay + 1; + // sft + Delay = Delay + 1; + } else if (pixelFormat == dm_n422) { + // sfr + Delay = Delay + 2; + // dsccif + Delay = Delay + 1; + // dscc - input deserializer + Delay = Delay + 5; + // dscc - input cdc fifo + Delay = Delay + 25; + // dscc - cdc uncertainty + Delay = Delay + 2; + // dscc - output cdc fifo + Delay = Delay + 10; + // dscc - cdc uncertainty + Delay = Delay + 2; + // dscc - output serializer + Delay = Delay + 1; + // sft + Delay = Delay + 1; + } else { + // sfr + Delay = Delay + 2; + // dsccif + Delay = Delay + 0; + // dscc - input deserializer + Delay = Delay + 3; + // dscc - input cdc fifo + Delay = Delay + 12; + // dscc - cdc uncertainty + Delay = Delay + 2; + // dscc - output cdc fifo + Delay = Delay + 7; + // dscc - output serializer + Delay = Delay + 1; + // dscc - cdc uncertainty + Delay = Delay + 2; + // sft + Delay = Delay + 1; + } + + return Delay; +} + +static bool CalculateDelayAfterScaler( + struct display_mode_lib *mode_lib, + double ReturnBW, + double ReadBandwidthPlaneLuma, + double ReadBandwidthPlaneChroma, + double TotalDataReadBandwidth, + double DisplayPipeLineDeliveryTimeLuma, + double DisplayPipeLineDeliveryTimeChroma, + double DPPCLK, + double DISPCLK, + double PixelClock, + unsigned int DSCDelay, + unsigned int DPPPerPlane, + bool ScalerEnabled, + unsigned int NumberOfCursors, + double DPPCLKDelaySubtotal, + double DPPCLKDelaySCL, + double DPPCLKDelaySCLLBOnly, + double DPPCLKDelayCNVCFormater, + double DPPCLKDelayCNVCCursor, + double DISPCLKDelaySubtotal, + unsigned int ScalerRecoutWidth, + enum output_format_class OutputFormat, + unsigned int HTotal, + unsigned int SwathWidthSingleDPPY, + double BytePerPixelDETY, + double BytePerPixelDETC, + unsigned int SwathHeightY, + unsigned int SwathHeightC, + bool Interlace, + bool ProgressiveToInterlaceUnitInOPP, + double *DSTXAfterScaler, + double *DSTYAfterScaler + ) +{ + unsigned int DPPCycles, DISPCLKCycles; + double DataFabricLineDeliveryTimeLuma; + double DataFabricLineDeliveryTimeChroma; + double DSTTotalPixelsAfterScaler; + + DataFabricLineDeliveryTimeLuma = SwathWidthSingleDPPY * SwathHeightY * dml_ceil(BytePerPixelDETY, 1) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneLuma / TotalDataReadBandwidth); + mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeLuma - DisplayPipeLineDeliveryTimeLuma); + + if (BytePerPixelDETC != 0) { + DataFabricLineDeliveryTimeChroma = SwathWidthSingleDPPY / 2 * SwathHeightC * dml_ceil(BytePerPixelDETC, 2) / (mode_lib->vba.ReturnBW * ReadBandwidthPlaneChroma / TotalDataReadBandwidth); + mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(mode_lib->vba.LastPixelOfLineExtraWatermark, DataFabricLineDeliveryTimeChroma - DisplayPipeLineDeliveryTimeChroma); + } + + if (ScalerEnabled) + DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL; + else + DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly; + + DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor; + + DISPCLKCycles = DISPCLKDelaySubtotal; + + if (DPPCLK == 0.0 || DISPCLK == 0.0) + return true; + + *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK + + DSCDelay; + + if (DPPPerPlane > 1) + *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth; + + if (OutputFormat == dm_420 || (Interlace && ProgressiveToInterlaceUnitInOPP)) + *DSTYAfterScaler = 1; + else + *DSTYAfterScaler = 0; + + DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler; + *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1); + *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal)); + + return true; +} + +static bool CalculatePrefetchSchedule( + struct display_mode_lib *mode_lib, + double DPPCLK, + double DISPCLK, + double PixelClock, + double DCFCLKDeepSleep, + unsigned int DPPPerPlane, + unsigned int NumberOfCursors, + unsigned int VBlank, + unsigned int HTotal, + unsigned int MaxInterDCNTileRepeaters, + unsigned int VStartup, + unsigned int PageTableLevels, + bool GPUVMEnable, + bool DynamicMetadataEnable, + unsigned int DynamicMetadataLinesBeforeActiveRequired, + unsigned int DynamicMetadataTransmittedBytes, + bool DCCEnable, + double UrgentLatencyPixelDataOnly, + double UrgentExtraLatency, + double TCalc, + unsigned int PDEAndMetaPTEBytesFrame, + unsigned int MetaRowByte, + unsigned int PixelPTEBytesPerRow, + double PrefetchSourceLinesY, + unsigned int SwathWidthY, + double BytePerPixelDETY, + double VInitPreFillY, + unsigned int MaxNumSwathY, + double PrefetchSourceLinesC, + double BytePerPixelDETC, + double VInitPreFillC, + unsigned int MaxNumSwathC, + unsigned int SwathHeightY, + unsigned int SwathHeightC, + double TWait, + bool XFCEnabled, + double XFCRemoteSurfaceFlipDelay, + bool InterlaceEnable, + bool ProgressiveToInterlaceUnitInOPP, + double DSTXAfterScaler, + double DSTYAfterScaler, + double *DestinationLinesForPrefetch, + double *PrefetchBandwidth, + double *DestinationLinesToRequestVMInVBlank, + double *DestinationLinesToRequestRowInVBlank, + double *VRatioPrefetchY, + double *VRatioPrefetchC, + double *RequiredPrefetchPixDataBW, + double *Tno_bw, + unsigned int *VUpdateOffsetPix, + double *VUpdateWidthPix, + double *VReadyOffsetPix) +{ + bool MyError = false; + double TotalRepeaterDelayTime; + double Tdm, LineTime, Tsetup; + double dst_y_prefetch_equ; + double Tsw_oto; + double prefetch_bw_oto; + double Tvm_oto; + double Tr0_oto; + double Tpre_oto; + double dst_y_prefetch_oto; + double TimeForFetchingMetaPTE = 0; + double TimeForFetchingRowInVBlank = 0; + double LinesToRequestPrefetchPixelData = 0; + + *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1); + TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK); + *VUpdateWidthPix = (14.0 / DCFCLKDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime) + * PixelClock; + + *VReadyOffsetPix = dml_max( + 150.0 / DPPCLK, + TotalRepeaterDelayTime + 20.0 / DCFCLKDeepSleep + 10.0 / DPPCLK) + * PixelClock; + + Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock; + + LineTime = (double) HTotal / PixelClock; + + if (DynamicMetadataEnable) { + double Tdmbf, Tdmec, Tdmsks; + + Tdm = dml_max(0.0, UrgentExtraLatency - TCalc); + Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK; + Tdmec = LineTime; + if (DynamicMetadataLinesBeforeActiveRequired == 0) + Tdmsks = VBlank * LineTime / 2.0; + else + Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime; + if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP) + Tdmsks = Tdmsks / 2; + if (VStartup * LineTime + < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) { + MyError = true; + } + } else + Tdm = 0; + + if (GPUVMEnable) { + if (PageTableLevels == 4) + *Tno_bw = UrgentExtraLatency + UrgentLatencyPixelDataOnly; + else if (PageTableLevels == 3) + *Tno_bw = UrgentExtraLatency; + else + *Tno_bw = 0; + } else if (DCCEnable) + *Tno_bw = LineTime; + else + *Tno_bw = LineTime / 4; + + dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime + - (Tsetup + Tdm) / LineTime + - (DSTYAfterScaler + DSTXAfterScaler / HTotal); + + Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime; + + prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow + + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1) + + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2)) + / Tsw_oto; + + if (GPUVMEnable == true) { + Tvm_oto = + dml_max( + *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto, + dml_max( + UrgentExtraLatency + + UrgentLatencyPixelDataOnly + * (PageTableLevels + - 1), + LineTime / 4.0)); + } else + Tvm_oto = LineTime / 4.0; + + if ((GPUVMEnable == true || DCCEnable == true)) { + Tr0_oto = dml_max( + (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto, + dml_max(UrgentLatencyPixelDataOnly, dml_max(LineTime - Tvm_oto, LineTime / 4))); + } else + Tr0_oto = LineTime - Tvm_oto; + + Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto; + + dst_y_prefetch_oto = Tpre_oto / LineTime; + + if (dst_y_prefetch_oto < dst_y_prefetch_equ) + *DestinationLinesForPrefetch = dst_y_prefetch_oto; + else + *DestinationLinesForPrefetch = dst_y_prefetch_equ; + + *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1) + / 4; + + dml_print("DML: VStartup: %d\n", VStartup); + dml_print("DML: TCalc: %f\n", TCalc); + dml_print("DML: TWait: %f\n", TWait); + dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay); + dml_print("DML: LineTime: %f\n", LineTime); + dml_print("DML: Tsetup: %f\n", Tsetup); + dml_print("DML: Tdm: %f\n", Tdm); + dml_print("DML: DSTYAfterScaler: %f\n", DSTYAfterScaler); + dml_print("DML: DSTXAfterScaler: %f\n", DSTXAfterScaler); + dml_print("DML: HTotal: %d\n", HTotal); + + *PrefetchBandwidth = 0; + *DestinationLinesToRequestVMInVBlank = 0; + *DestinationLinesToRequestRowInVBlank = 0; + *VRatioPrefetchY = 0; + *VRatioPrefetchC = 0; + *RequiredPrefetchPixDataBW = 0; + if (*DestinationLinesForPrefetch > 1) { + *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte + + 2 * PixelPTEBytesPerRow + + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1) + + PrefetchSourceLinesC * SwathWidthY / 2 + * dml_ceil(BytePerPixelDETC, 2)) + / (*DestinationLinesForPrefetch * LineTime - *Tno_bw); + if (GPUVMEnable) { + TimeForFetchingMetaPTE = + dml_max( + *Tno_bw + + (double) PDEAndMetaPTEBytesFrame + / *PrefetchBandwidth, + dml_max( + UrgentExtraLatency + + UrgentLatencyPixelDataOnly + * (PageTableLevels + - 1), + LineTime / 4)); + } else { + if (NumberOfCursors > 0 || XFCEnabled) + TimeForFetchingMetaPTE = LineTime / 4; + else + TimeForFetchingMetaPTE = 0.0; + } + + if ((GPUVMEnable == true || DCCEnable == true)) { + TimeForFetchingRowInVBlank = + dml_max( + (MetaRowByte + PixelPTEBytesPerRow) + / *PrefetchBandwidth, + dml_max( + UrgentLatencyPixelDataOnly, + dml_max( + LineTime + - TimeForFetchingMetaPTE, + LineTime + / 4.0))); + } else { + if (NumberOfCursors > 0 || XFCEnabled) + TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE; + else + TimeForFetchingRowInVBlank = 0.0; + } + + *DestinationLinesToRequestVMInVBlank = dml_floor( + 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125), + 1) / 4.0; + + *DestinationLinesToRequestRowInVBlank = dml_floor( + 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125), + 1) / 4.0; + + LinesToRequestPrefetchPixelData = + *DestinationLinesForPrefetch + - ((NumberOfCursors > 0 || GPUVMEnable + || DCCEnable) ? + (*DestinationLinesToRequestVMInVBlank + + *DestinationLinesToRequestRowInVBlank) : + 0.0); + + if (LinesToRequestPrefetchPixelData > 0) { + + *VRatioPrefetchY = (double) PrefetchSourceLinesY + / LinesToRequestPrefetchPixelData; + *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0); + if ((SwathHeightY > 4) && (VInitPreFillY > 3)) { + if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) { + *VRatioPrefetchY = + dml_max( + (double) PrefetchSourceLinesY + / LinesToRequestPrefetchPixelData, + (double) MaxNumSwathY + * SwathHeightY + / (LinesToRequestPrefetchPixelData + - (VInitPreFillY + - 3.0) + / 2.0)); + *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0); + } else { + MyError = true; + *VRatioPrefetchY = 0; + } + } + + *VRatioPrefetchC = (double) PrefetchSourceLinesC + / LinesToRequestPrefetchPixelData; + *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0); + + if ((SwathHeightC > 4)) { + if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) { + *VRatioPrefetchC = + dml_max( + *VRatioPrefetchC, + (double) MaxNumSwathC + * SwathHeightC + / (LinesToRequestPrefetchPixelData + - (VInitPreFillC + - 3.0) + / 2.0)); + *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0); + } else { + MyError = true; + *VRatioPrefetchC = 0; + } + } + + *RequiredPrefetchPixDataBW = + DPPPerPlane + * ((double) PrefetchSourceLinesY + / LinesToRequestPrefetchPixelData + * dml_ceil( + BytePerPixelDETY, + 1) + + (double) PrefetchSourceLinesC + / LinesToRequestPrefetchPixelData + * dml_ceil( + BytePerPixelDETC, + 2) + / 2) + * SwathWidthY / LineTime; + } else { + MyError = true; + *VRatioPrefetchY = 0; + *VRatioPrefetchC = 0; + *RequiredPrefetchPixDataBW = 0; + } + + } else { + MyError = true; + } + + if (MyError) { + *PrefetchBandwidth = 0; + TimeForFetchingMetaPTE = 0; + TimeForFetchingRowInVBlank = 0; + *DestinationLinesToRequestVMInVBlank = 0; + *DestinationLinesToRequestRowInVBlank = 0; + *DestinationLinesForPrefetch = 0; + LinesToRequestPrefetchPixelData = 0; + *VRatioPrefetchY = 0; + *VRatioPrefetchC = 0; + *RequiredPrefetchPixDataBW = 0; + } + + return MyError; +} + +static double RoundToDFSGranularityUp(double Clock, double VCOSpeed) +{ + return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1); +} + +static double RoundToDFSGranularityDown(double Clock, double VCOSpeed) +{ + return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1); +} + +static double CalculatePrefetchSourceLines( + struct display_mode_lib *mode_lib, + double VRatio, + double vtaps, + bool Interlace, + bool ProgressiveToInterlaceUnitInOPP, + unsigned int SwathHeight, + unsigned int ViewportYStart, + double *VInitPreFill, + unsigned int *MaxNumSwath) +{ + unsigned int MaxPartialSwath; + + if (ProgressiveToInterlaceUnitInOPP) + *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1); + else + *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1); + + if (!mode_lib->vba.IgnoreViewportPositioning) { + + *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0; + + if (*VInitPreFill > 1.0) + MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight; + else + MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2) + % SwathHeight; + MaxPartialSwath = dml_max(1U, MaxPartialSwath); + + } else { + + if (ViewportYStart != 0) + dml_print( + "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n"); + + *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1); + + if (*VInitPreFill > 1.0) + MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight; + else + MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1) + % SwathHeight; + } + + return *MaxNumSwath * SwathHeight + MaxPartialSwath; +} + +static unsigned int CalculateVMAndRowBytes( + struct display_mode_lib *mode_lib, + bool DCCEnable, + unsigned int BlockHeight256Bytes, + unsigned int BlockWidth256Bytes, + enum source_format_class SourcePixelFormat, + unsigned int SurfaceTiling, + unsigned int BytePerPixel, + enum scan_direction_class ScanDirection, + unsigned int ViewportWidth, + unsigned int ViewportHeight, + unsigned int SwathWidth, + bool GPUVMEnable, + unsigned int VMMPageSize, + unsigned int PTEBufferSizeInRequestsLuma, + unsigned int PDEProcessingBufIn64KBReqs, + unsigned int Pitch, + unsigned int DCCMetaPitch, + unsigned int *MacroTileWidth, + unsigned int *MetaRowByte, + unsigned int *PixelPTEBytesPerRow, + bool *PTEBufferSizeNotExceeded, + unsigned int *dpte_row_height, + unsigned int *meta_row_height) +{ + unsigned int MetaRequestHeight; + unsigned int MetaRequestWidth; + unsigned int MetaSurfWidth; + unsigned int MetaSurfHeight; + unsigned int MPDEBytesFrame; + unsigned int MetaPTEBytesFrame; + unsigned int DCCMetaSurfaceBytes; + + unsigned int MacroTileSizeBytes; + unsigned int MacroTileHeight; + unsigned int DPDE0BytesFrame; + unsigned int ExtraDPDEBytesFrame; + unsigned int PDEAndMetaPTEBytesFrame; + + if (DCCEnable == true) { + MetaRequestHeight = 8 * BlockHeight256Bytes; + MetaRequestWidth = 8 * BlockWidth256Bytes; + if (ScanDirection == dm_horz) { + *meta_row_height = MetaRequestHeight; + MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth) + + MetaRequestWidth; + *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0; + } else { + *meta_row_height = MetaRequestWidth; + MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight) + + MetaRequestHeight; + *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0; + } + if (ScanDirection == dm_horz) { + DCCMetaSurfaceBytes = DCCMetaPitch + * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes) + + 64 * BlockHeight256Bytes) * BytePerPixel + / 256; + } else { + DCCMetaSurfaceBytes = DCCMetaPitch + * (dml_ceil( + (double) ViewportHeight - 1, + 64 * BlockHeight256Bytes) + + 64 * BlockHeight256Bytes) * BytePerPixel + / 256; + } + if (GPUVMEnable == true) { + MetaPTEBytesFrame = (dml_ceil( + (double) (DCCMetaSurfaceBytes - VMMPageSize) + / (8 * VMMPageSize), + 1) + 1) * 64; + MPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 1); + } else { + MetaPTEBytesFrame = 0; + MPDEBytesFrame = 0; + } + } else { + MetaPTEBytesFrame = 0; + MPDEBytesFrame = 0; + *MetaRowByte = 0; + } + + if (SurfaceTiling == dm_sw_linear || SurfaceTiling == dm_sw_gfx7_2d_thin_gl || SurfaceTiling == dm_sw_gfx7_2d_thin_lvp) { + MacroTileSizeBytes = 256; + MacroTileHeight = BlockHeight256Bytes; + } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x + || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) { + MacroTileSizeBytes = 4096; + MacroTileHeight = 4 * BlockHeight256Bytes; + } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t + || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d + || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x + || SurfaceTiling == dm_sw_64kb_r_x) { + MacroTileSizeBytes = 65536; + MacroTileHeight = 16 * BlockHeight256Bytes; + } else { + MacroTileSizeBytes = 262144; + MacroTileHeight = 32 * BlockHeight256Bytes; + } + *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight; + + if (GPUVMEnable == true && mode_lib->vba.GPUVMMaxPageTableLevels > 1) { + if (ScanDirection == dm_horz) { + DPDE0BytesFrame = + 64 + * (dml_ceil( + ((Pitch + * (dml_ceil( + ViewportHeight + - 1, + MacroTileHeight) + + MacroTileHeight) + * BytePerPixel) + - MacroTileSizeBytes) + / (8 + * 2097152), + 1) + 1); + } else { + DPDE0BytesFrame = + 64 + * (dml_ceil( + ((Pitch + * (dml_ceil( + (double) SwathWidth + - 1, + MacroTileHeight) + + MacroTileHeight) + * BytePerPixel) + - MacroTileSizeBytes) + / (8 + * 2097152), + 1) + 1); + } + ExtraDPDEBytesFrame = 128 * (mode_lib->vba.GPUVMMaxPageTableLevels - 2); + } else { + DPDE0BytesFrame = 0; + ExtraDPDEBytesFrame = 0; + } + + PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame + + ExtraDPDEBytesFrame; + + if (GPUVMEnable == true) { + unsigned int PTERequestSize; + unsigned int PixelPTEReqHeight; + unsigned int PixelPTEReqWidth; + double FractionOfPTEReturnDrop; + unsigned int EffectivePDEProcessingBufIn64KBReqs; + + if (SurfaceTiling == dm_sw_linear) { + PixelPTEReqHeight = 1; + PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel; + PTERequestSize = 64; + FractionOfPTEReturnDrop = 0; + } else if (MacroTileSizeBytes == 4096) { + PixelPTEReqHeight = MacroTileHeight; + PixelPTEReqWidth = 8 * *MacroTileWidth; + PTERequestSize = 64; + if (ScanDirection == dm_horz) + FractionOfPTEReturnDrop = 0; + else + FractionOfPTEReturnDrop = 7 / 8; + } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) { + PixelPTEReqHeight = 16 * BlockHeight256Bytes; + PixelPTEReqWidth = 16 * BlockWidth256Bytes; + PTERequestSize = 128; + FractionOfPTEReturnDrop = 0; + } else { + PixelPTEReqHeight = MacroTileHeight; + PixelPTEReqWidth = 8 * *MacroTileWidth; + PTERequestSize = 64; + FractionOfPTEReturnDrop = 0; + } + + if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) + EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2; + else + EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs; + + if (SurfaceTiling == dm_sw_linear) { + *dpte_row_height = + dml_min( + 128, + 1 + << (unsigned int) dml_floor( + dml_log2( + dml_min( + (double) PTEBufferSizeInRequestsLuma + * PixelPTEReqWidth, + EffectivePDEProcessingBufIn64KBReqs + * 65536.0 + / BytePerPixel) + / Pitch), + 1)); + *PixelPTEBytesPerRow = PTERequestSize + * (dml_ceil( + (double) (Pitch * *dpte_row_height - 1) + / PixelPTEReqWidth, + 1) + 1); + } else if (ScanDirection == dm_horz) { + *dpte_row_height = PixelPTEReqHeight; + *PixelPTEBytesPerRow = PTERequestSize + * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1) + + 1); + } else { + *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth); + *PixelPTEBytesPerRow = PTERequestSize + * (dml_ceil( + ((double) SwathWidth - 1) + / PixelPTEReqHeight, + 1) + 1); + } + if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop) + <= 64 * PTEBufferSizeInRequestsLuma) { + *PTEBufferSizeNotExceeded = true; + } else { + *PTEBufferSizeNotExceeded = false; + } + } else { + *PixelPTEBytesPerRow = 0; + *PTEBufferSizeNotExceeded = true; + } + + return PDEAndMetaPTEBytesFrame; +} + +static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( + struct display_mode_lib *mode_lib) +{ + unsigned int j, k; + + mode_lib->vba.WritebackDISPCLK = 0.0; + mode_lib->vba.DISPCLKWithRamping = 0; + mode_lib->vba.DISPCLKWithoutRamping = 0; + mode_lib->vba.GlobalDPPCLK = 0.0; + + // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation + // + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.WritebackEnable[k]) { + mode_lib->vba.WritebackDISPCLK = + dml_max( + mode_lib->vba.WritebackDISPCLK, + CalculateWriteBackDISPCLK( + mode_lib->vba.WritebackPixelFormat[k], + mode_lib->vba.PixelClock[k], + mode_lib->vba.WritebackHRatio[k], + mode_lib->vba.WritebackVRatio[k], + mode_lib->vba.WritebackLumaHTaps[k], + mode_lib->vba.WritebackLumaVTaps[k], + mode_lib->vba.WritebackChromaHTaps[k], + mode_lib->vba.WritebackChromaVTaps[k], + mode_lib->vba.WritebackDestinationWidth[k], + mode_lib->vba.HTotal[k], + mode_lib->vba.WritebackChromaLineBufferWidth)); + } + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.HRatio[k] > 1) { + mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput + * mode_lib->vba.HRatio[k] + / dml_ceil( + mode_lib->vba.htaps[k] + / 6.0, + 1)); + } else { + mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput); + } + + mode_lib->vba.DPPCLKUsingSingleDPPLuma = + mode_lib->vba.PixelClock[k] + * dml_max( + mode_lib->vba.vtaps[k] / 6.0 + * dml_min( + 1.0, + mode_lib->vba.HRatio[k]), + dml_max( + mode_lib->vba.HRatio[k] + * mode_lib->vba.VRatio[k] + / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k], + 1.0)); + + if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6) + && mode_lib->vba.DPPCLKUsingSingleDPPLuma + < 2 * mode_lib->vba.PixelClock[k]) { + mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k]; + } + + if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 + && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { + mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0; + mode_lib->vba.DPPCLKUsingSingleDPP[k] = + mode_lib->vba.DPPCLKUsingSingleDPPLuma; + } else { + if (mode_lib->vba.HRatio[k] > 1) { + mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = + dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput + * mode_lib->vba.HRatio[k] + / 2 + / dml_ceil( + mode_lib->vba.HTAPsChroma[k] + / 6.0, + 1.0)); + } else { + mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput); + } + mode_lib->vba.DPPCLKUsingSingleDPPChroma = + mode_lib->vba.PixelClock[k] + * dml_max( + mode_lib->vba.VTAPsChroma[k] + / 6.0 + * dml_min( + 1.0, + mode_lib->vba.HRatio[k] + / 2), + dml_max( + mode_lib->vba.HRatio[k] + * mode_lib->vba.VRatio[k] + / 4 + / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k], + 1.0)); + + if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6) + && mode_lib->vba.DPPCLKUsingSingleDPPChroma + < 2 * mode_lib->vba.PixelClock[k]) { + mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2 + * mode_lib->vba.PixelClock[k]; + } + + mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max( + mode_lib->vba.DPPCLKUsingSingleDPPLuma, + mode_lib->vba.DPPCLKUsingSingleDPPChroma); + } + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.BlendingAndTiming[k] != k) + continue; + if (mode_lib->vba.ODMCombineEnabled[k]) { + mode_lib->vba.DISPCLKWithRamping = + dml_max( + mode_lib->vba.DISPCLKWithRamping, + mode_lib->vba.PixelClock[k] / 2 + * (1 + + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading + / 100) + * (1 + + mode_lib->vba.DISPCLKRampingMargin + / 100)); + mode_lib->vba.DISPCLKWithoutRamping = + dml_max( + mode_lib->vba.DISPCLKWithoutRamping, + mode_lib->vba.PixelClock[k] / 2 + * (1 + + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading + / 100)); + } else if (!mode_lib->vba.ODMCombineEnabled[k]) { + mode_lib->vba.DISPCLKWithRamping = + dml_max( + mode_lib->vba.DISPCLKWithRamping, + mode_lib->vba.PixelClock[k] + * (1 + + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading + / 100) + * (1 + + mode_lib->vba.DISPCLKRampingMargin + / 100)); + mode_lib->vba.DISPCLKWithoutRamping = + dml_max( + mode_lib->vba.DISPCLKWithoutRamping, + mode_lib->vba.PixelClock[k] + * (1 + + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading + / 100)); + } + } + + mode_lib->vba.DISPCLKWithRamping = dml_max( + mode_lib->vba.DISPCLKWithRamping, + mode_lib->vba.WritebackDISPCLK); + mode_lib->vba.DISPCLKWithoutRamping = dml_max( + mode_lib->vba.DISPCLKWithoutRamping, + mode_lib->vba.WritebackDISPCLK); + + ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0); + mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp( + mode_lib->vba.DISPCLKWithRamping, + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp( + mode_lib->vba.DISPCLKWithoutRamping, + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown( + mode_lib->vba.soc.clock_limits[mode_lib->vba.soc.num_states].dispclk_mhz, + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity + > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) { + mode_lib->vba.DISPCLK_calculated = + mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity; + } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity + > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) { + mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity; + } else { + mode_lib->vba.DISPCLK_calculated = + mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity; + } + DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated); + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.DPPPerPlane[k] == 0) { + mode_lib->vba.DPPCLK_calculated[k] = 0; + } else { + mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k] + / mode_lib->vba.DPPPerPlane[k] + * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100); + } + mode_lib->vba.GlobalDPPCLK = dml_max( + mode_lib->vba.GlobalDPPCLK, + mode_lib->vba.DPPCLK_calculated[k]); + } + mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp( + mode_lib->vba.GlobalDPPCLK, + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255 + * dml_ceil( + mode_lib->vba.DPPCLK_calculated[k] * 255 + / mode_lib->vba.GlobalDPPCLK, + 1); + DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]); + } + + // Urgent Watermark + mode_lib->vba.DCCEnabledAnyPlane = false; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) + if (mode_lib->vba.DCCEnable[k]) + mode_lib->vba.DCCEnabledAnyPlane = true; + + mode_lib->vba.ReturnBandwidthToDCN = dml_min( + mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK, + mode_lib->vba.FabricAndDRAMBandwidth * 1000) + * mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; + + mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN; + mode_lib->vba.ReturnBW = adjust_ReturnBW( + mode_lib, + mode_lib->vba.ReturnBW, + mode_lib->vba.DCCEnabledAnyPlane, + mode_lib->vba.ReturnBandwidthToDCN); + + // Let's do this calculation again?? + mode_lib->vba.ReturnBandwidthToDCN = dml_min( + mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK, + mode_lib->vba.FabricAndDRAMBandwidth * 1000); + mode_lib->vba.ReturnBW = adjust_ReturnBW( + mode_lib, + mode_lib->vba.ReturnBW, + mode_lib->vba.DCCEnabledAnyPlane, + mode_lib->vba.ReturnBandwidthToDCN); + + DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK); + DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN); + DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW); + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + bool MainPlaneDoesODMCombine = false; + + if (mode_lib->vba.SourceScan[k] == dm_horz) + mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k]; + else + mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k]; + + if (mode_lib->vba.ODMCombineEnabled[k] == true) + MainPlaneDoesODMCombine = true; + for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) + if (mode_lib->vba.BlendingAndTiming[k] == j + && mode_lib->vba.ODMCombineEnabled[j] == true) + MainPlaneDoesODMCombine = true; + + if (MainPlaneDoesODMCombine == true) + mode_lib->vba.SwathWidthY[k] = dml_min( + (double) mode_lib->vba.SwathWidthSingleDPPY[k], + dml_round( + mode_lib->vba.HActive[k] / 2.0 + * mode_lib->vba.HRatio[k])); + else { + if (mode_lib->vba.DPPPerPlane[k] == 0) { + mode_lib->vba.SwathWidthY[k] = 0; + } else { + mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k] + / mode_lib->vba.DPPPerPlane[k]; + } + } + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { + mode_lib->vba.BytePerPixelDETY[k] = 8; + mode_lib->vba.BytePerPixelDETC[k] = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) { + mode_lib->vba.BytePerPixelDETY[k] = 4; + mode_lib->vba.BytePerPixelDETC[k] = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) { + mode_lib->vba.BytePerPixelDETY[k] = 2; + mode_lib->vba.BytePerPixelDETC[k] = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) { + mode_lib->vba.BytePerPixelDETY[k] = 1; + mode_lib->vba.BytePerPixelDETC[k] = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { + mode_lib->vba.BytePerPixelDETY[k] = 1; + mode_lib->vba.BytePerPixelDETC[k] = 2; + } else { // dm_420_10 + mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0; + mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0; + } + } + + mode_lib->vba.TotalDataReadBandwidth = 0.0; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k] + * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) + / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) + * mode_lib->vba.VRatio[k]; + mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k] + / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2) + / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) + * mode_lib->vba.VRatio[k] / 2; + DTRACE( + " read_bw[%i] = %fBps", + k, + mode_lib->vba.ReadBandwidthPlaneLuma[k] + + mode_lib->vba.ReadBandwidthPlaneChroma[k]); + mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k] + + mode_lib->vba.ReadBandwidthPlaneChroma[k]; + } + + mode_lib->vba.TotalDCCActiveDPP = 0; + mode_lib->vba.TotalActiveDPP = 0; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP + + mode_lib->vba.DPPPerPlane[k]; + if (mode_lib->vba.DCCEnable[k]) + mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP + + mode_lib->vba.DPPPerPlane[k]; + } + + mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency = + (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK + + mode_lib->vba.UrgentOutOfOrderReturnPerChannelPixelDataOnly + * mode_lib->vba.NumberOfChannels + / mode_lib->vba.ReturnBW; + + mode_lib->vba.LastPixelOfLineExtraWatermark = 0; + + mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency + + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte + + mode_lib->vba.TotalDCCActiveDPP + * mode_lib->vba.MetaChunkSize) * 1024.0 + / mode_lib->vba.ReturnBW; + + if (mode_lib->vba.GPUVMEnable) + mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP + * mode_lib->vba.PTEGroupSize / mode_lib->vba.ReturnBW; + + mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatencyPixelDataOnly + + mode_lib->vba.LastPixelOfLineExtraWatermark + + mode_lib->vba.UrgentExtraLatency; + + DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency); + DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark); + + mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly; + + mode_lib->vba.TotalActiveWriteback = 0; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.WritebackEnable[k]) + mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + mode_lib->vba.ActiveWritebacksPerPlane[k]; + } + + if (mode_lib->vba.TotalActiveWriteback <= 1) + mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency; + else + mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency + + mode_lib->vba.WritebackChunkSize * 1024.0 / 32 + / mode_lib->vba.SOCCLK; + + DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark); + + // NB P-State/DRAM Clock Change Watermark + mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency + + mode_lib->vba.UrgentWatermark; + + DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark); + + DTRACE(" calculating wb pstate watermark"); + DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback); + DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK); + + if (mode_lib->vba.TotalActiveWriteback <= 1) + mode_lib->vba.WritebackDRAMClockChangeWatermark = + mode_lib->vba.DRAMClockChangeLatency + + mode_lib->vba.WritebackLatency; + else + mode_lib->vba.WritebackDRAMClockChangeWatermark = + mode_lib->vba.DRAMClockChangeLatency + + mode_lib->vba.WritebackLatency + + mode_lib->vba.WritebackChunkSize * 1024.0 / 32 + / mode_lib->vba.SOCCLK; + + DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark); + + // Stutter Efficiency + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k] + / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k]; + mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor( + mode_lib->vba.LinesInDETY[k], + mode_lib->vba.SwathHeightY[k]); + mode_lib->vba.FullDETBufferingTimeY[k] = + mode_lib->vba.LinesInDETYRoundedDownToSwath[k] + * (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) + / mode_lib->vba.VRatio[k]; + if (mode_lib->vba.BytePerPixelDETC[k] > 0) { + mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k] + / mode_lib->vba.BytePerPixelDETC[k] + / (mode_lib->vba.SwathWidthY[k] / 2); + mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor( + mode_lib->vba.LinesInDETC[k], + mode_lib->vba.SwathHeightC[k]); + mode_lib->vba.FullDETBufferingTimeC[k] = + mode_lib->vba.LinesInDETCRoundedDownToSwath[k] + * (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) + / (mode_lib->vba.VRatio[k] / 2); + } else { + mode_lib->vba.LinesInDETC[k] = 0; + mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0; + mode_lib->vba.FullDETBufferingTimeC[k] = 999999; + } + } + + mode_lib->vba.MinFullDETBufferingTime = 999999.0; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.FullDETBufferingTimeY[k] + < mode_lib->vba.MinFullDETBufferingTime) { + mode_lib->vba.MinFullDETBufferingTime = + mode_lib->vba.FullDETBufferingTimeY[k]; + mode_lib->vba.FrameTimeForMinFullDETBufferingTime = + (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]; + } + if (mode_lib->vba.FullDETBufferingTimeC[k] + < mode_lib->vba.MinFullDETBufferingTime) { + mode_lib->vba.MinFullDETBufferingTime = + mode_lib->vba.FullDETBufferingTimeC[k]; + mode_lib->vba.FrameTimeForMinFullDETBufferingTime = + (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]; + } + } + + mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.DCCEnable[k]) { + mode_lib->vba.AverageReadBandwidthGBytePerSecond = + mode_lib->vba.AverageReadBandwidthGBytePerSecond + + mode_lib->vba.ReadBandwidthPlaneLuma[k] + / mode_lib->vba.DCCRate[k] + / 1000 + + mode_lib->vba.ReadBandwidthPlaneChroma[k] + / mode_lib->vba.DCCRate[k] + / 1000; + } else { + mode_lib->vba.AverageReadBandwidthGBytePerSecond = + mode_lib->vba.AverageReadBandwidthGBytePerSecond + + mode_lib->vba.ReadBandwidthPlaneLuma[k] + / 1000 + + mode_lib->vba.ReadBandwidthPlaneChroma[k] + / 1000; + } + if (mode_lib->vba.DCCEnable[k]) { + mode_lib->vba.AverageReadBandwidthGBytePerSecond = + mode_lib->vba.AverageReadBandwidthGBytePerSecond + + mode_lib->vba.ReadBandwidthPlaneLuma[k] + / 1000 / 256 + + mode_lib->vba.ReadBandwidthPlaneChroma[k] + / 1000 / 256; + } + if (mode_lib->vba.GPUVMEnable) { + mode_lib->vba.AverageReadBandwidthGBytePerSecond = + mode_lib->vba.AverageReadBandwidthGBytePerSecond + + mode_lib->vba.ReadBandwidthPlaneLuma[k] + / 1000 / 512 + + mode_lib->vba.ReadBandwidthPlaneChroma[k] + / 1000 / 512; + } + } + + mode_lib->vba.PartOfBurstThatFitsInROB = + dml_min( + mode_lib->vba.MinFullDETBufferingTime + * mode_lib->vba.TotalDataReadBandwidth, + mode_lib->vba.ROBBufferSizeInKByte * 1024 + * mode_lib->vba.TotalDataReadBandwidth + / (mode_lib->vba.AverageReadBandwidthGBytePerSecond + * 1000)); + mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB + * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000) + / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW + + (mode_lib->vba.MinFullDETBufferingTime + * mode_lib->vba.TotalDataReadBandwidth + - mode_lib->vba.PartOfBurstThatFitsInROB) + / (mode_lib->vba.DCFCLK * 64); + if (mode_lib->vba.TotalActiveWriteback == 0) { + mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1 + - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime) + / mode_lib->vba.MinFullDETBufferingTime) * 100; + } else { + mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0; + } + + mode_lib->vba.SmallestVBlank = 999999; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { + mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k] + - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]; + } else { + mode_lib->vba.VBlankTime = 0; + } + mode_lib->vba.SmallestVBlank = dml_min( + mode_lib->vba.SmallestVBlank, + mode_lib->vba.VBlankTime); + } + + mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100 + * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime + - mode_lib->vba.SmallestVBlank) + + mode_lib->vba.SmallestVBlank) + / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100; + + // dml_ml->vba.DCFCLK Deep Sleep + mode_lib->vba.DCFCLKDeepSleep = 8.0; + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) { + if (mode_lib->vba.BytePerPixelDETC[k] > 0) { + mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = + dml_max( + 1.1 * mode_lib->vba.SwathWidthY[k] + * dml_ceil( + mode_lib->vba.BytePerPixelDETY[k], + 1) / 32 + / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], + 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0 + * dml_ceil( + mode_lib->vba.BytePerPixelDETC[k], + 2) / 32 + / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]); + } else + mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = 1.1 * mode_lib->vba.SwathWidthY[k] + * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0 + / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]; + mode_lib->vba.DCFCLKDeepSleepPerPlane[k] = dml_max( + mode_lib->vba.DCFCLKDeepSleepPerPlane[k], + mode_lib->vba.PixelClock[k] / 16.0); + mode_lib->vba.DCFCLKDeepSleep = dml_max( + mode_lib->vba.DCFCLKDeepSleep, + mode_lib->vba.DCFCLKDeepSleepPerPlane[k]); + + DTRACE( + " dcfclk_deepsleep_per_plane[%i] = %fMHz", + k, + mode_lib->vba.DCFCLKDeepSleepPerPlane[k]); + } + + DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFCLKDeepSleep); + + // Stutter Watermark + mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime + + mode_lib->vba.LastPixelOfLineExtraWatermark + + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFCLKDeepSleep; + mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime + + mode_lib->vba.LastPixelOfLineExtraWatermark + + mode_lib->vba.UrgentExtraLatency; + + DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark); + DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark); + + // Urgent Latency Supported + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.EffectiveDETPlusLBLinesLuma = + dml_floor( + mode_lib->vba.LinesInDETY[k] + + dml_min( + mode_lib->vba.LinesInDETY[k] + * mode_lib->vba.DPPCLK[k] + * mode_lib->vba.BytePerPixelDETY[k] + * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] + / (mode_lib->vba.ReturnBW + / mode_lib->vba.DPPPerPlane[k]), + (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma), + mode_lib->vba.SwathHeightY[k]); + + mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma + * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) + / mode_lib->vba.VRatio[k] + - mode_lib->vba.EffectiveDETPlusLBLinesLuma + * mode_lib->vba.SwathWidthY[k] + * mode_lib->vba.BytePerPixelDETY[k] + / (mode_lib->vba.ReturnBW + / mode_lib->vba.DPPPerPlane[k]); + + if (mode_lib->vba.BytePerPixelDETC[k] > 0) { + mode_lib->vba.EffectiveDETPlusLBLinesChroma = + dml_floor( + mode_lib->vba.LinesInDETC[k] + + dml_min( + mode_lib->vba.LinesInDETC[k] + * mode_lib->vba.DPPCLK[k] + * mode_lib->vba.BytePerPixelDETC[k] + * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] + / (mode_lib->vba.ReturnBW + / mode_lib->vba.DPPPerPlane[k]), + (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma), + mode_lib->vba.SwathHeightC[k]); + mode_lib->vba.UrgentLatencySupportUsChroma = + mode_lib->vba.EffectiveDETPlusLBLinesChroma + * (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) + / (mode_lib->vba.VRatio[k] / 2) + - mode_lib->vba.EffectiveDETPlusLBLinesChroma + * (mode_lib->vba.SwathWidthY[k] + / 2) + * mode_lib->vba.BytePerPixelDETC[k] + / (mode_lib->vba.ReturnBW + / mode_lib->vba.DPPPerPlane[k]); + mode_lib->vba.UrgentLatencySupportUs[k] = dml_min( + mode_lib->vba.UrgentLatencySupportUsLuma, + mode_lib->vba.UrgentLatencySupportUsChroma); + } else { + mode_lib->vba.UrgentLatencySupportUs[k] = + mode_lib->vba.UrgentLatencySupportUsLuma; + } + } + + mode_lib->vba.MinUrgentLatencySupportUs = 999999; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.MinUrgentLatencySupportUs = dml_min( + mode_lib->vba.MinUrgentLatencySupportUs, + mode_lib->vba.UrgentLatencySupportUs[k]); + } + + // Non-Urgent Latency Tolerance + mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs + - mode_lib->vba.UrgentWatermark; + + // DSCCLK + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) { + mode_lib->vba.DSCCLK_calculated[k] = 0.0; + } else { + if (mode_lib->vba.OutputFormat[k] == dm_420 + || mode_lib->vba.OutputFormat[k] == dm_n422) + mode_lib->vba.DSCFormatFactor = 2; + else + mode_lib->vba.DSCFormatFactor = 1; + if (mode_lib->vba.ODMCombineEnabled[k]) + mode_lib->vba.DSCCLK_calculated[k] = + mode_lib->vba.PixelClockBackEnd[k] / 6 + / mode_lib->vba.DSCFormatFactor + / (1 + - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading + / 100); + else + mode_lib->vba.DSCCLK_calculated[k] = + mode_lib->vba.PixelClockBackEnd[k] / 3 + / mode_lib->vba.DSCFormatFactor + / (1 + - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading + / 100); + } + } + + // DSC Delay + // TODO + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + double bpp = mode_lib->vba.OutputBpp[k]; + unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k]; + + if (mode_lib->vba.DSCEnabled[k] && bpp != 0) { + if (!mode_lib->vba.ODMCombineEnabled[k]) { + mode_lib->vba.DSCDelay[k] = + dscceComputeDelay( + mode_lib->vba.DSCInputBitPerComponent[k], + bpp, + dml_ceil( + (double) mode_lib->vba.HActive[k] + / mode_lib->vba.NumberOfDSCSlices[k], + 1), + slices, + mode_lib->vba.OutputFormat[k]) + + dscComputeDelay( + mode_lib->vba.OutputFormat[k]); + } else { + mode_lib->vba.DSCDelay[k] = + 2 + * (dscceComputeDelay( + mode_lib->vba.DSCInputBitPerComponent[k], + bpp, + dml_ceil( + (double) mode_lib->vba.HActive[k] + / mode_lib->vba.NumberOfDSCSlices[k], + 1), + slices / 2.0, + mode_lib->vba.OutputFormat[k]) + + dscComputeDelay( + mode_lib->vba.OutputFormat[k])); + } + mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k] + * mode_lib->vba.PixelClock[k] + / mode_lib->vba.PixelClockBackEnd[k]; + } else { + mode_lib->vba.DSCDelay[k] = 0; + } + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) + for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes + if (j != k && mode_lib->vba.BlendingAndTiming[k] == j + && mode_lib->vba.DSCEnabled[j]) + mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j]; + + // Prefetch + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + unsigned int PDEAndMetaPTEBytesFrameY; + unsigned int PixelPTEBytesPerRowY; + unsigned int MetaRowByteY; + unsigned int MetaRowByteC; + unsigned int PDEAndMetaPTEBytesFrameC; + unsigned int PixelPTEBytesPerRowC; + + Calculate256BBlockSizes( + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.SurfaceTiling[k], + dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1), + dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2), + &mode_lib->vba.BlockHeight256BytesY[k], + &mode_lib->vba.BlockHeight256BytesC[k], + &mode_lib->vba.BlockWidth256BytesY[k], + &mode_lib->vba.BlockWidth256BytesC[k]); + PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes( + mode_lib, + mode_lib->vba.DCCEnable[k], + mode_lib->vba.BlockHeight256BytesY[k], + mode_lib->vba.BlockWidth256BytesY[k], + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.SurfaceTiling[k], + dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1), + mode_lib->vba.SourceScan[k], + mode_lib->vba.ViewportWidth[k], + mode_lib->vba.ViewportHeight[k], + mode_lib->vba.SwathWidthY[k], + mode_lib->vba.GPUVMEnable, + mode_lib->vba.VMMPageSize, + mode_lib->vba.PTEBufferSizeInRequestsLuma, + mode_lib->vba.PDEProcessingBufIn64KBReqs, + mode_lib->vba.PitchY[k], + mode_lib->vba.DCCMetaPitchY[k], + &mode_lib->vba.MacroTileWidthY[k], + &MetaRowByteY, + &PixelPTEBytesPerRowY, + &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0], + &mode_lib->vba.dpte_row_height[k], + &mode_lib->vba.meta_row_height[k]); + mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines( + mode_lib, + mode_lib->vba.VRatio[k], + mode_lib->vba.vtaps[k], + mode_lib->vba.Interlace[k], + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + mode_lib->vba.SwathHeightY[k], + mode_lib->vba.ViewportYStartY[k], + &mode_lib->vba.VInitPreFillY[k], + &mode_lib->vba.MaxNumSwathY[k]); + + if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) { + PDEAndMetaPTEBytesFrameC = + CalculateVMAndRowBytes( + mode_lib, + mode_lib->vba.DCCEnable[k], + mode_lib->vba.BlockHeight256BytesC[k], + mode_lib->vba.BlockWidth256BytesC[k], + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.SurfaceTiling[k], + dml_ceil( + mode_lib->vba.BytePerPixelDETC[k], + 2), + mode_lib->vba.SourceScan[k], + mode_lib->vba.ViewportWidth[k] / 2, + mode_lib->vba.ViewportHeight[k] / 2, + mode_lib->vba.SwathWidthY[k] / 2, + mode_lib->vba.GPUVMEnable, + mode_lib->vba.VMMPageSize, + mode_lib->vba.PTEBufferSizeInRequestsLuma, + mode_lib->vba.PDEProcessingBufIn64KBReqs, + mode_lib->vba.PitchC[k], + 0, + &mode_lib->vba.MacroTileWidthC[k], + &MetaRowByteC, + &PixelPTEBytesPerRowC, + &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel][0], + &mode_lib->vba.dpte_row_height_chroma[k], + &mode_lib->vba.meta_row_height_chroma[k]); + mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines( + mode_lib, + mode_lib->vba.VRatio[k] / 2, + mode_lib->vba.VTAPsChroma[k], + mode_lib->vba.Interlace[k], + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + mode_lib->vba.SwathHeightC[k], + mode_lib->vba.ViewportYStartC[k], + &mode_lib->vba.VInitPreFillC[k], + &mode_lib->vba.MaxNumSwathC[k]); + } else { + PixelPTEBytesPerRowC = 0; + PDEAndMetaPTEBytesFrameC = 0; + MetaRowByteC = 0; + mode_lib->vba.MaxNumSwathC[k] = 0; + mode_lib->vba.PrefetchSourceLinesC[k] = 0; + } + + mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC; + mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY + + PDEAndMetaPTEBytesFrameC; + mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC; + + CalculateActiveRowBandwidth( + mode_lib->vba.GPUVMEnable, + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.VRatio[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], + MetaRowByteY, + MetaRowByteC, + mode_lib->vba.meta_row_height[k], + mode_lib->vba.meta_row_height_chroma[k], + PixelPTEBytesPerRowY, + PixelPTEBytesPerRowC, + mode_lib->vba.dpte_row_height[k], + mode_lib->vba.dpte_row_height_chroma[k], + &mode_lib->vba.meta_row_bw[k], + &mode_lib->vba.dpte_row_bw[k], + &mode_lib->vba.qual_row_bw[k]); + } + + mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFCLKDeepSleep; + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.BlendingAndTiming[k] == k) { + if (mode_lib->vba.WritebackEnable[k] == true) { + mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = + mode_lib->vba.WritebackLatency + + CalculateWriteBackDelay( + mode_lib->vba.WritebackPixelFormat[k], + mode_lib->vba.WritebackHRatio[k], + mode_lib->vba.WritebackVRatio[k], + mode_lib->vba.WritebackLumaHTaps[k], + mode_lib->vba.WritebackLumaVTaps[k], + mode_lib->vba.WritebackChromaHTaps[k], + mode_lib->vba.WritebackChromaVTaps[k], + mode_lib->vba.WritebackDestinationWidth[k]) + / mode_lib->vba.DISPCLK; + } else + mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0; + for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { + if (mode_lib->vba.BlendingAndTiming[j] == k + && mode_lib->vba.WritebackEnable[j] == true) { + mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = + dml_max( + mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k], + mode_lib->vba.WritebackLatency + + CalculateWriteBackDelay( + mode_lib->vba.WritebackPixelFormat[j], + mode_lib->vba.WritebackHRatio[j], + mode_lib->vba.WritebackVRatio[j], + mode_lib->vba.WritebackLumaHTaps[j], + mode_lib->vba.WritebackLumaVTaps[j], + mode_lib->vba.WritebackChromaHTaps[j], + mode_lib->vba.WritebackChromaVTaps[j], + mode_lib->vba.WritebackDestinationWidth[j]) + / mode_lib->vba.DISPCLK); + } + } + } + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) + for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) + if (mode_lib->vba.BlendingAndTiming[k] == j) + mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = + mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j]; + + mode_lib->vba.VStartupLines = 13; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.MaxVStartupLines[k] = + mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + - dml_max( + 1.0, + dml_ceil( + mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] + / (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]), + 1)); + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) + mode_lib->vba.MaximumMaxVStartupLines = dml_max( + mode_lib->vba.MaximumMaxVStartupLines, + mode_lib->vba.MaxVStartupLines[k]); + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.cursor_bw[k] = 0.0; + for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j) + mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j] + * mode_lib->vba.CursorBPP[k][j] / 8.0 + / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) + * mode_lib->vba.VRatio[k]; + } + + do { + double MaxTotalRDBandwidth = 0; + bool DestinationLineTimesForPrefetchLessThan2 = false; + bool VRatioPrefetchMoreThan4 = false; + bool prefetch_vm_bw_valid = true; + bool prefetch_row_bw_valid = true; + double TWait = CalculateTWait( + mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb], + mode_lib->vba.DRAMClockChangeLatency, + mode_lib->vba.UrgentLatencyPixelDataOnly, + mode_lib->vba.SREnterPlusExitTime); + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.XFCEnabled[k] == true) { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = + CalculateRemoteSurfaceFlipDelay( + mode_lib, + mode_lib->vba.VRatio[k], + mode_lib->vba.SwathWidthY[k], + dml_ceil( + mode_lib->vba.BytePerPixelDETY[k], + 1), + mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k], + mode_lib->vba.XFCTSlvVupdateOffset, + mode_lib->vba.XFCTSlvVupdateWidth, + mode_lib->vba.XFCTSlvVreadyOffset, + mode_lib->vba.XFCXBUFLatencyTolerance, + mode_lib->vba.XFCFillBWOverhead, + mode_lib->vba.XFCSlvChunkSize, + mode_lib->vba.XFCBusTransportTime, + mode_lib->vba.TCalc, + TWait, + &mode_lib->vba.SrcActiveDrainRate, + &mode_lib->vba.TInitXFill, + &mode_lib->vba.TslvChk); + } else { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0; + } + + CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBW, mode_lib->vba.ReadBandwidthPlaneLuma[k], mode_lib->vba.ReadBandwidthPlaneChroma[k], mode_lib->vba.TotalDataReadBandwidth, + mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k], + mode_lib->vba.DPPCLK[k], mode_lib->vba.DISPCLK, mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelay[k], mode_lib->vba.DPPPerPlane[k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k], + mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal, + mode_lib->vba.SwathWidthY[k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k], + mode_lib->vba.SwathWidthSingleDPPY[k], mode_lib->vba.BytePerPixelDETY[k], mode_lib->vba.BytePerPixelDETC[k], mode_lib->vba.SwathHeightY[k], mode_lib->vba.SwathHeightC[k], mode_lib->vba.Interlace[k], + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, &mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]); + + mode_lib->vba.ErrorResult[k] = + CalculatePrefetchSchedule( + mode_lib, + mode_lib->vba.DPPCLK[k], + mode_lib->vba.DISPCLK, + mode_lib->vba.PixelClock[k], + mode_lib->vba.DCFCLKDeepSleep, + mode_lib->vba.DPPPerPlane[k], + mode_lib->vba.NumberOfCursors[k], + mode_lib->vba.VTotal[k] + - mode_lib->vba.VActive[k], + mode_lib->vba.HTotal[k], + mode_lib->vba.MaxInterDCNTileRepeaters, + dml_min( + mode_lib->vba.VStartupLines, + mode_lib->vba.MaxVStartupLines[k]), + mode_lib->vba.GPUVMMaxPageTableLevels, + mode_lib->vba.GPUVMEnable, + mode_lib->vba.DynamicMetadataEnable[k], + mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], + mode_lib->vba.DynamicMetadataTransmittedBytes[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.UrgentLatencyPixelDataOnly, + mode_lib->vba.UrgentExtraLatency, + mode_lib->vba.TCalc, + mode_lib->vba.PDEAndMetaPTEBytesFrame[k], + mode_lib->vba.MetaRowByte[k], + mode_lib->vba.PixelPTEBytesPerRow[k], + mode_lib->vba.PrefetchSourceLinesY[k], + mode_lib->vba.SwathWidthY[k], + mode_lib->vba.BytePerPixelDETY[k], + mode_lib->vba.VInitPreFillY[k], + mode_lib->vba.MaxNumSwathY[k], + mode_lib->vba.PrefetchSourceLinesC[k], + mode_lib->vba.BytePerPixelDETC[k], + mode_lib->vba.VInitPreFillC[k], + mode_lib->vba.MaxNumSwathC[k], + mode_lib->vba.SwathHeightY[k], + mode_lib->vba.SwathHeightC[k], + TWait, + mode_lib->vba.XFCEnabled[k], + mode_lib->vba.XFCRemoteSurfaceFlipDelay, + mode_lib->vba.Interlace[k], + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + mode_lib->vba.DSTXAfterScaler[k], + mode_lib->vba.DSTYAfterScaler[k], + &mode_lib->vba.DestinationLinesForPrefetch[k], + &mode_lib->vba.PrefetchBandwidth[k], + &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k], + &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k], + &mode_lib->vba.VRatioPrefetchY[k], + &mode_lib->vba.VRatioPrefetchC[k], + &mode_lib->vba.RequiredPrefetchPixDataBWLuma[k], + &mode_lib->vba.Tno_bw[k], + &mode_lib->vba.VUpdateOffsetPix[k], + &mode_lib->vba.VUpdateWidthPix[k], + &mode_lib->vba.VReadyOffsetPix[k]); + + if (mode_lib->vba.BlendingAndTiming[k] == k) { + mode_lib->vba.VStartup[k] = dml_min( + mode_lib->vba.VStartupLines, + mode_lib->vba.MaxVStartupLines[k]); + if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata + != 0) { + mode_lib->vba.VStartup[k] = + mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata; + } + } else { + mode_lib->vba.VStartup[k] = + dml_min( + mode_lib->vba.VStartupLines, + mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]); + } + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + + if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0) + mode_lib->vba.prefetch_vm_bw[k] = 0; + else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) { + mode_lib->vba.prefetch_vm_bw[k] = + (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k] + / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]); + } else { + mode_lib->vba.prefetch_vm_bw[k] = 0; + prefetch_vm_bw_valid = false; + } + if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k] + == 0) + mode_lib->vba.prefetch_row_bw[k] = 0; + else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) { + mode_lib->vba.prefetch_row_bw[k] = + (double) (mode_lib->vba.MetaRowByte[k] + + mode_lib->vba.PixelPTEBytesPerRow[k]) + / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]); + } else { + mode_lib->vba.prefetch_row_bw[k] = 0; + prefetch_row_bw_valid = false; + } + + MaxTotalRDBandwidth = + MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k] + + dml_max( + mode_lib->vba.prefetch_vm_bw[k], + dml_max( + mode_lib->vba.prefetch_row_bw[k], + dml_max( + mode_lib->vba.ReadBandwidthPlaneLuma[k] + + mode_lib->vba.ReadBandwidthPlaneChroma[k], + mode_lib->vba.RequiredPrefetchPixDataBWLuma[k]) + + mode_lib->vba.meta_row_bw[k] + + mode_lib->vba.dpte_row_bw[k])); + + if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2) + DestinationLineTimesForPrefetchLessThan2 = true; + if (mode_lib->vba.VRatioPrefetchY[k] > 4 + || mode_lib->vba.VRatioPrefetchC[k] > 4) + VRatioPrefetchMoreThan4 = true; + } + + if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid + && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4 + && !DestinationLineTimesForPrefetchLessThan2) + mode_lib->vba.PrefetchModeSupported = true; + else { + mode_lib->vba.PrefetchModeSupported = false; + dml_print( + "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n"); + } + + if (mode_lib->vba.PrefetchModeSupported == true) { + double final_flip_bw[DC__NUM_DPP__MAX]; + unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX]; + double total_dcn_read_bw_with_flip = 0; + + mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.BandwidthAvailableForImmediateFlip = + mode_lib->vba.BandwidthAvailableForImmediateFlip + - mode_lib->vba.cursor_bw[k] + - dml_max( + mode_lib->vba.ReadBandwidthPlaneLuma[k] + + mode_lib->vba.ReadBandwidthPlaneChroma[k] + + mode_lib->vba.qual_row_bw[k], + mode_lib->vba.PrefetchBandwidth[k]); + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + ImmediateFlipBytes[k] = 0; + if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 + && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { + ImmediateFlipBytes[k] = + mode_lib->vba.PDEAndMetaPTEBytesFrame[k] + + mode_lib->vba.MetaRowByte[k] + + mode_lib->vba.PixelPTEBytesPerRow[k]; + } + } + mode_lib->vba.TotImmediateFlipBytes = 0; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 + && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { + mode_lib->vba.TotImmediateFlipBytes = + mode_lib->vba.TotImmediateFlipBytes + + ImmediateFlipBytes[k]; + } + } + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + CalculateFlipSchedule( + mode_lib, + mode_lib->vba.UrgentExtraLatency, + mode_lib->vba.UrgentLatencyPixelDataOnly, + mode_lib->vba.GPUVMMaxPageTableLevels, + mode_lib->vba.GPUVMEnable, + mode_lib->vba.BandwidthAvailableForImmediateFlip, + mode_lib->vba.TotImmediateFlipBytes, + mode_lib->vba.SourcePixelFormat[k], + ImmediateFlipBytes[k], + mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k], + mode_lib->vba.VRatio[k], + mode_lib->vba.Tno_bw[k], + mode_lib->vba.PDEAndMetaPTEBytesFrame[k], + mode_lib->vba.MetaRowByte[k], + mode_lib->vba.PixelPTEBytesPerRow[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.dpte_row_height[k], + mode_lib->vba.meta_row_height[k], + mode_lib->vba.qual_row_bw[k], + &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k], + &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k], + &final_flip_bw[k], + &mode_lib->vba.ImmediateFlipSupportedForPipe[k]); + } + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + total_dcn_read_bw_with_flip = + total_dcn_read_bw_with_flip + + mode_lib->vba.cursor_bw[k] + + dml_max( + mode_lib->vba.prefetch_vm_bw[k], + dml_max( + mode_lib->vba.prefetch_row_bw[k], + final_flip_bw[k] + + dml_max( + mode_lib->vba.ReadBandwidthPlaneLuma[k] + + mode_lib->vba.ReadBandwidthPlaneChroma[k], + mode_lib->vba.RequiredPrefetchPixDataBWLuma[k]))); + } + mode_lib->vba.ImmediateFlipSupported = true; + if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) { + mode_lib->vba.ImmediateFlipSupported = false; + } + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) { + mode_lib->vba.ImmediateFlipSupported = false; + } + } + } else { + mode_lib->vba.ImmediateFlipSupported = false; + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.ErrorResult[k]) { + mode_lib->vba.PrefetchModeSupported = false; + dml_print( + "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n"); + } + } + + mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1; + } while (!((mode_lib->vba.PrefetchModeSupported + && (!mode_lib->vba.ImmediateFlipSupport + || mode_lib->vba.ImmediateFlipSupported)) + || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines)); + + //Display Pipeline Delivery Time in Prefetch + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.VRatioPrefetchY[k] <= 1) { + mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = + mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k] + / mode_lib->vba.HRatio[k] + / mode_lib->vba.PixelClock[k]; + } else { + mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = + mode_lib->vba.SwathWidthY[k] + / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] + / mode_lib->vba.DPPCLK[k]; + } + if (mode_lib->vba.BytePerPixelDETC[k] == 0) { + mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0; + } else { + if (mode_lib->vba.VRatioPrefetchC[k] <= 1) { + mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = + mode_lib->vba.SwathWidthY[k] + * mode_lib->vba.DPPPerPlane[k] + / mode_lib->vba.HRatio[k] + / mode_lib->vba.PixelClock[k]; + } else { + mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = + mode_lib->vba.SwathWidthY[k] + / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] + / mode_lib->vba.DPPCLK[k]; + } + } + } + + // Min TTUVBlank + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { + mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true; + mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true; + mode_lib->vba.MinTTUVBlank[k] = dml_max( + mode_lib->vba.DRAMClockChangeWatermark, + dml_max( + mode_lib->vba.StutterEnterPlusExitWatermark, + mode_lib->vba.UrgentWatermark)); + } else if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 1) { + mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false; + mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true; + mode_lib->vba.MinTTUVBlank[k] = dml_max( + mode_lib->vba.StutterEnterPlusExitWatermark, + mode_lib->vba.UrgentWatermark); + } else { + mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false; + mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false; + mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark; + } + if (!mode_lib->vba.DynamicMetadataEnable[k]) + mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc + + mode_lib->vba.MinTTUVBlank[k]; + } + + // DCC Configuration + mode_lib->vba.ActiveDPPs = 0; + // NB P-State/DRAM Clock Change Support + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k]; + } + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + double EffectiveLBLatencyHidingY; + double EffectiveLBLatencyHidingC; + double DPPOutputBufferLinesY; + double DPPOutputBufferLinesC; + double DPPOPPBufferingY; + double MaxDETBufferingTimeY; + double ActiveDRAMClockChangeLatencyMarginY; + + mode_lib->vba.LBLatencyHidingSourceLinesY = + dml_min( + mode_lib->vba.MaxLineBufferLines, + (unsigned int) dml_floor( + (double) mode_lib->vba.LineBufferSize + / mode_lib->vba.LBBitPerPixel[k] + / (mode_lib->vba.SwathWidthY[k] + / dml_max( + mode_lib->vba.HRatio[k], + 1.0)), + 1)) - (mode_lib->vba.vtaps[k] - 1); + + mode_lib->vba.LBLatencyHidingSourceLinesC = + dml_min( + mode_lib->vba.MaxLineBufferLines, + (unsigned int) dml_floor( + (double) mode_lib->vba.LineBufferSize + / mode_lib->vba.LBBitPerPixel[k] + / (mode_lib->vba.SwathWidthY[k] + / 2.0 + / dml_max( + mode_lib->vba.HRatio[k] + / 2, + 1.0)), + 1)) + - (mode_lib->vba.VTAPsChroma[k] - 1); + + EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY + / mode_lib->vba.VRatio[k] + * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); + + EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC + / (mode_lib->vba.VRatio[k] / 2) + * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]); + + if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) { + DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels + / mode_lib->vba.SwathWidthY[k]; + } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) { + DPPOutputBufferLinesY = 0.5; + } else { + DPPOutputBufferLinesY = 1; + } + + if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) { + DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels + / (mode_lib->vba.SwathWidthY[k] / 2); + } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) { + DPPOutputBufferLinesC = 0.5; + } else { + DPPOutputBufferLinesC = 1; + } + + DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) + * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines); + MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k] + + (mode_lib->vba.LinesInDETY[k] + - mode_lib->vba.LinesInDETYRoundedDownToSwath[k]) + / mode_lib->vba.SwathHeightY[k] + * (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]); + + ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY + + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark; + + if (mode_lib->vba.ActiveDPPs > 1) { + ActiveDRAMClockChangeLatencyMarginY = + ActiveDRAMClockChangeLatencyMarginY + - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1)) + * mode_lib->vba.SwathHeightY[k] + * (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]); + } + + if (mode_lib->vba.BytePerPixelDETC[k] > 0) { + double DPPOPPBufferingC = (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) + * (DPPOutputBufferLinesC + + mode_lib->vba.OPPOutputBufferLines); + double MaxDETBufferingTimeC = + mode_lib->vba.FullDETBufferingTimeC[k] + + (mode_lib->vba.LinesInDETC[k] + - mode_lib->vba.LinesInDETCRoundedDownToSwath[k]) + / mode_lib->vba.SwathHeightC[k] + * (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]); + double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC + + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC + - mode_lib->vba.DRAMClockChangeWatermark; + + if (mode_lib->vba.ActiveDPPs > 1) { + ActiveDRAMClockChangeLatencyMarginC = + ActiveDRAMClockChangeLatencyMarginC + - (1 + - 1 + / (mode_lib->vba.ActiveDPPs + - 1)) + * mode_lib->vba.SwathHeightC[k] + * (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]); + } + mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min( + ActiveDRAMClockChangeLatencyMarginY, + ActiveDRAMClockChangeLatencyMarginC); + } else { + mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = + ActiveDRAMClockChangeLatencyMarginY; + } + + if (mode_lib->vba.WritebackEnable[k]) { + double WritebackDRAMClockChangeLatencyMargin; + + if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) { + WritebackDRAMClockChangeLatencyMargin = + (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize + + mode_lib->vba.WritebackInterfaceChromaBufferSize) + / (mode_lib->vba.WritebackDestinationWidth[k] + * mode_lib->vba.WritebackDestinationHeight[k] + / (mode_lib->vba.WritebackSourceHeight[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) + * 4) + - mode_lib->vba.WritebackDRAMClockChangeWatermark; + } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) { + WritebackDRAMClockChangeLatencyMargin = + dml_min( + (double) mode_lib->vba.WritebackInterfaceLumaBufferSize + * 8.0 / 10, + 2.0 + * mode_lib->vba.WritebackInterfaceChromaBufferSize + * 8 / 10) + / (mode_lib->vba.WritebackDestinationWidth[k] + * mode_lib->vba.WritebackDestinationHeight[k] + / (mode_lib->vba.WritebackSourceHeight[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k])) + - mode_lib->vba.WritebackDRAMClockChangeWatermark; + } else { + WritebackDRAMClockChangeLatencyMargin = + dml_min( + (double) mode_lib->vba.WritebackInterfaceLumaBufferSize, + 2.0 + * mode_lib->vba.WritebackInterfaceChromaBufferSize) + / (mode_lib->vba.WritebackDestinationWidth[k] + * mode_lib->vba.WritebackDestinationHeight[k] + / (mode_lib->vba.WritebackSourceHeight[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k])) + - mode_lib->vba.WritebackDRAMClockChangeWatermark; + } + mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min( + mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k], + WritebackDRAMClockChangeLatencyMargin); + } + } + + mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] + < mode_lib->vba.MinActiveDRAMClockChangeMargin) { + mode_lib->vba.MinActiveDRAMClockChangeMargin = + mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]; + } + } + + mode_lib->vba.MinActiveDRAMClockChangeLatencySupported = + mode_lib->vba.MinActiveDRAMClockChangeMargin + + mode_lib->vba.DRAMClockChangeLatency; + + if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; + } else { + if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { + mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) { + mode_lib->vba.DRAMClockChangeSupport[0][0] = + dm_dram_clock_change_unsupported; + } + } + } else { + mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported; + } + } + for (k = 0; k <= mode_lib->vba.soc.num_states; k++) + for (j = 0; j < 2; j++) + mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0]; + + //XFC Parameters: + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.XFCEnabled[k] == true) { + double TWait; + + mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset; + mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth; + mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset; + TWait = CalculateTWait( + mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb], + mode_lib->vba.DRAMClockChangeLatency, + mode_lib->vba.UrgentLatencyPixelDataOnly, + mode_lib->vba.SREnterPlusExitTime); + mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay( + mode_lib, + mode_lib->vba.VRatio[k], + mode_lib->vba.SwathWidthY[k], + dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1), + mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], + mode_lib->vba.XFCTSlvVupdateOffset, + mode_lib->vba.XFCTSlvVupdateWidth, + mode_lib->vba.XFCTSlvVreadyOffset, + mode_lib->vba.XFCXBUFLatencyTolerance, + mode_lib->vba.XFCFillBWOverhead, + mode_lib->vba.XFCSlvChunkSize, + mode_lib->vba.XFCBusTransportTime, + mode_lib->vba.TCalc, + TWait, + &mode_lib->vba.SrcActiveDrainRate, + &mode_lib->vba.TInitXFill, + &mode_lib->vba.TslvChk); + mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = + dml_floor( + mode_lib->vba.XFCRemoteSurfaceFlipDelay + / (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]), + 1); + mode_lib->vba.XFCTransferDelay[k] = + dml_ceil( + mode_lib->vba.XFCBusTransportTime + / (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]), + 1); + mode_lib->vba.XFCPrechargeDelay[k] = + dml_ceil( + (mode_lib->vba.XFCBusTransportTime + + mode_lib->vba.TInitXFill + + mode_lib->vba.TslvChk) + / (mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]), + 1); + mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance + * mode_lib->vba.SrcActiveDrainRate; + mode_lib->vba.FinalFillMargin = + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] + + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]) + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k] + * mode_lib->vba.SrcActiveDrainRate + + mode_lib->vba.XFCFillConstant; + mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay + * mode_lib->vba.SrcActiveDrainRate + + mode_lib->vba.FinalFillMargin; + mode_lib->vba.RemainingFillLevel = dml_max( + 0.0, + mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel); + mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel + / (mode_lib->vba.SrcActiveDrainRate + * mode_lib->vba.XFCFillBWOverhead / 100); + mode_lib->vba.XFCPrefetchMargin[k] = + mode_lib->vba.XFCRemoteSurfaceFlipDelay + + mode_lib->vba.TFinalxFill + + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] + + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]) + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]; + } else { + mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0; + mode_lib->vba.XFCSlaveVupdateWidth[k] = 0; + mode_lib->vba.XFCSlaveVReadyOffset[k] = 0; + mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0; + mode_lib->vba.XFCPrechargeDelay[k] = 0; + mode_lib->vba.XFCTransferDelay[k] = 0; + mode_lib->vba.XFCPrefetchMargin[k] = 0; + } + } + { + unsigned int VStartupMargin = 0; + bool FirstMainPlane = true; + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (mode_lib->vba.BlendingAndTiming[k] == k) { + unsigned int Margin = (mode_lib->vba.MaxVStartupLines[k] - mode_lib->vba.VStartup[k]) + * mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]; + + if (FirstMainPlane) { + VStartupMargin = Margin; + FirstMainPlane = false; + } else + VStartupMargin = dml_min(VStartupMargin, Margin); + } + + if (mode_lib->vba.UseMaximumVStartup) { + if (mode_lib->vba.VTotal_Max[k] == mode_lib->vba.VTotal[k]) { + //only use max vstart if it is not drr or lateflip. + mode_lib->vba.VStartup[k] = mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]; + } + } + } +} +} + +static void dml20v2_DisplayPipeConfiguration(struct display_mode_lib *mode_lib) +{ + double BytePerPixDETY; + double BytePerPixDETC; + double Read256BytesBlockHeightY; + double Read256BytesBlockHeightC; + double Read256BytesBlockWidthY; + double Read256BytesBlockWidthC; + double MaximumSwathHeightY; + double MaximumSwathHeightC; + double MinimumSwathHeightY; + double MinimumSwathHeightC; + double SwathWidth; + double SwathWidthGranularityY; + double SwathWidthGranularityC; + double RoundedUpMaxSwathSizeBytesY; + double RoundedUpMaxSwathSizeBytesC; + unsigned int j, k; + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + bool MainPlaneDoesODMCombine = false; + + if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { + BytePerPixDETY = 8; + BytePerPixDETC = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) { + BytePerPixDETY = 4; + BytePerPixDETC = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) { + BytePerPixDETY = 2; + BytePerPixDETC = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) { + BytePerPixDETY = 1; + BytePerPixDETC = 0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { + BytePerPixDETY = 1; + BytePerPixDETC = 2; + } else { + BytePerPixDETY = 4.0 / 3.0; + BytePerPixDETC = 8.0 / 3.0; + } + + if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_32 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_16 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) { + if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { + Read256BytesBlockHeightY = 1; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { + Read256BytesBlockHeightY = 4; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) { + Read256BytesBlockHeightY = 8; + } else { + Read256BytesBlockHeightY = 16; + } + Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1) + / Read256BytesBlockHeightY; + Read256BytesBlockHeightC = 0; + Read256BytesBlockWidthC = 0; + } else { + if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { + Read256BytesBlockHeightY = 1; + Read256BytesBlockHeightC = 1; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { + Read256BytesBlockHeightY = 16; + Read256BytesBlockHeightC = 8; + } else { + Read256BytesBlockHeightY = 8; + Read256BytesBlockHeightC = 8; + } + Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1) + / Read256BytesBlockHeightY; + Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2) + / Read256BytesBlockHeightC; + } + + if (mode_lib->vba.SourceScan[k] == dm_horz) { + MaximumSwathHeightY = Read256BytesBlockHeightY; + MaximumSwathHeightC = Read256BytesBlockHeightC; + } else { + MaximumSwathHeightY = Read256BytesBlockWidthY; + MaximumSwathHeightC = Read256BytesBlockWidthC; + } + + if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_32 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_16 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) { + if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear + || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64 + && (mode_lib->vba.SurfaceTiling[k] + == dm_sw_4kb_s + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_4kb_s_x + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_64kb_s + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_64kb_s_t + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_64kb_s_x + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_var_s + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_var_s_x) + && mode_lib->vba.SourceScan[k] == dm_horz)) { + MinimumSwathHeightY = MaximumSwathHeightY; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8 + && mode_lib->vba.SourceScan[k] != dm_horz) { + MinimumSwathHeightY = MaximumSwathHeightY; + } else { + MinimumSwathHeightY = MaximumSwathHeightY / 2.0; + } + MinimumSwathHeightC = MaximumSwathHeightC; + } else { + if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { + MinimumSwathHeightY = MaximumSwathHeightY; + MinimumSwathHeightC = MaximumSwathHeightC; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8 + && mode_lib->vba.SourceScan[k] == dm_horz) { + MinimumSwathHeightY = MaximumSwathHeightY / 2.0; + MinimumSwathHeightC = MaximumSwathHeightC; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10 + && mode_lib->vba.SourceScan[k] == dm_horz) { + MinimumSwathHeightC = MaximumSwathHeightC / 2.0; + MinimumSwathHeightY = MaximumSwathHeightY; + } else { + MinimumSwathHeightY = MaximumSwathHeightY; + MinimumSwathHeightC = MaximumSwathHeightC; + } + } + + if (mode_lib->vba.SourceScan[k] == dm_horz) { + SwathWidth = mode_lib->vba.ViewportWidth[k]; + } else { + SwathWidth = mode_lib->vba.ViewportHeight[k]; + } + + if (mode_lib->vba.ODMCombineEnabled[k] == true) { + MainPlaneDoesODMCombine = true; + } + for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) { + if (mode_lib->vba.BlendingAndTiming[k] == j + && mode_lib->vba.ODMCombineEnabled[j] == true) { + MainPlaneDoesODMCombine = true; + } + } + + if (MainPlaneDoesODMCombine == true) { + SwathWidth = dml_min( + SwathWidth, + mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]); + } else { + if (mode_lib->vba.DPPPerPlane[k] == 0) + SwathWidth = 0; + else + SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k]; + } + + SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY; + RoundedUpMaxSwathSizeBytesY = (dml_ceil( + (double) (SwathWidth - 1), + SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY + * MaximumSwathHeightY; + if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) { + RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256) + + 256; + } + if (MaximumSwathHeightC > 0) { + SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2) + / MaximumSwathHeightC; + RoundedUpMaxSwathSizeBytesC = (dml_ceil( + (double) (SwathWidth / 2.0 - 1), + SwathWidthGranularityC) + SwathWidthGranularityC) + * BytePerPixDETC * MaximumSwathHeightC; + if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) { + RoundedUpMaxSwathSizeBytesC = dml_ceil( + RoundedUpMaxSwathSizeBytesC, + 256) + 256; + } + } else + RoundedUpMaxSwathSizeBytesC = 0.0; + + if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC + <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) { + mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY; + mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC; + } else { + mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY; + mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC; + } + + if (mode_lib->vba.SwathHeightC[k] == 0) { + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024; + mode_lib->vba.DETBufferSizeC[k] = 0; + } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) { + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte + * 1024.0 / 2; + mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte + * 1024.0 / 2; + } else { + mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte + * 1024.0 * 2 / 3; + mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte + * 1024.0 / 3; + } + } +} + +static double CalculateTWait( + unsigned int PrefetchMode, + double DRAMClockChangeLatency, + double UrgentLatencyPixelDataOnly, + double SREnterPlusExitTime) +{ + if (PrefetchMode == 0) { + return dml_max( + DRAMClockChangeLatency + UrgentLatencyPixelDataOnly, + dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly)); + } else if (PrefetchMode == 1) { + return dml_max(SREnterPlusExitTime, UrgentLatencyPixelDataOnly); + } else { + return UrgentLatencyPixelDataOnly; + } +} + +static double CalculateRemoteSurfaceFlipDelay( + struct display_mode_lib *mode_lib, + double VRatio, + double SwathWidth, + double Bpp, + double LineTime, + double XFCTSlvVupdateOffset, + double XFCTSlvVupdateWidth, + double XFCTSlvVreadyOffset, + double XFCXBUFLatencyTolerance, + double XFCFillBWOverhead, + double XFCSlvChunkSize, + double XFCBusTransportTime, + double TCalc, + double TWait, + double *SrcActiveDrainRate, + double *TInitXFill, + double *TslvChk) +{ + double TSlvSetup, AvgfillRate, result; + + *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime; + TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset; + *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100); + AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100); + *TslvChk = XFCSlvChunkSize / AvgfillRate; + dml_print( + "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n", + *SrcActiveDrainRate); + dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup); + dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill); + dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate); + dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk); + result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide + dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result); + return result; +} + +static double CalculateWriteBackDelay( + enum source_format_class WritebackPixelFormat, + double WritebackHRatio, + double WritebackVRatio, + unsigned int WritebackLumaHTaps, + unsigned int WritebackLumaVTaps, + unsigned int WritebackChromaHTaps, + unsigned int WritebackChromaVTaps, + unsigned int WritebackDestinationWidth) +{ + double CalculateWriteBackDelay = + dml_max( + dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio, + WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1) + * dml_ceil( + WritebackDestinationWidth + / 4.0, + 1) + + dml_ceil(1.0 / WritebackVRatio, 1) + * (dml_ceil( + WritebackLumaVTaps + / 4.0, + 1) + 4)); + + if (WritebackPixelFormat != dm_444_32) { + CalculateWriteBackDelay = + dml_max( + CalculateWriteBackDelay, + dml_max( + dml_ceil( + WritebackChromaHTaps + / 2.0, + 1) + / (2 + * WritebackHRatio), + WritebackChromaVTaps + * dml_ceil( + 1 + / (2 + * WritebackVRatio), + 1) + * dml_ceil( + WritebackDestinationWidth + / 2.0 + / 2.0, + 1) + + dml_ceil( + 1 + / (2 + * WritebackVRatio), + 1) + * (dml_ceil( + WritebackChromaVTaps + / 4.0, + 1) + + 4))); + } + return CalculateWriteBackDelay; +} + +static void CalculateActiveRowBandwidth( + bool GPUVMEnable, + enum source_format_class SourcePixelFormat, + double VRatio, + bool DCCEnable, + double LineTime, + unsigned int MetaRowByteLuma, + unsigned int MetaRowByteChroma, + unsigned int meta_row_height_luma, + unsigned int meta_row_height_chroma, + unsigned int PixelPTEBytesPerRowLuma, + unsigned int PixelPTEBytesPerRowChroma, + unsigned int dpte_row_height_luma, + unsigned int dpte_row_height_chroma, + double *meta_row_bw, + double *dpte_row_bw, + double *qual_row_bw) +{ + if (DCCEnable != true) { + *meta_row_bw = 0; + } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) { + *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime) + + VRatio / 2 * MetaRowByteChroma + / (meta_row_height_chroma * LineTime); + } else { + *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime); + } + + if (GPUVMEnable != true) { + *dpte_row_bw = 0; + } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) { + *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime) + + VRatio / 2 * PixelPTEBytesPerRowChroma + / (dpte_row_height_chroma * LineTime); + } else { + *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime); + } + + if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) { + *qual_row_bw = *meta_row_bw + *dpte_row_bw; + } else { + *qual_row_bw = 0; + } +} + +static void CalculateFlipSchedule( + struct display_mode_lib *mode_lib, + double UrgentExtraLatency, + double UrgentLatencyPixelDataOnly, + unsigned int GPUVMMaxPageTableLevels, + bool GPUVMEnable, + double BandwidthAvailableForImmediateFlip, + unsigned int TotImmediateFlipBytes, + enum source_format_class SourcePixelFormat, + unsigned int ImmediateFlipBytes, + double LineTime, + double VRatio, + double Tno_bw, + double PDEAndMetaPTEBytesFrame, + unsigned int MetaRowByte, + unsigned int PixelPTEBytesPerRow, + bool DCCEnable, + unsigned int dpte_row_height, + unsigned int meta_row_height, + double qual_row_bw, + double *DestinationLinesToRequestVMInImmediateFlip, + double *DestinationLinesToRequestRowInImmediateFlip, + double *final_flip_bw, + bool *ImmediateFlipSupportedForPipe) +{ + double min_row_time = 0.0; + + if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) { + *DestinationLinesToRequestVMInImmediateFlip = 0.0; + *DestinationLinesToRequestRowInImmediateFlip = 0.0; + *final_flip_bw = qual_row_bw; + *ImmediateFlipSupportedForPipe = true; + } else { + double TimeForFetchingMetaPTEImmediateFlip; + double TimeForFetchingRowInVBlankImmediateFlip; + + if (GPUVMEnable == true) { + mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip + * ImmediateFlipBytes / TotImmediateFlipBytes; + TimeForFetchingMetaPTEImmediateFlip = + dml_max( + Tno_bw + + PDEAndMetaPTEBytesFrame + / mode_lib->vba.ImmediateFlipBW[0], + dml_max( + UrgentExtraLatency + + UrgentLatencyPixelDataOnly + * (GPUVMMaxPageTableLevels + - 1), + LineTime / 4.0)); + } else { + TimeForFetchingMetaPTEImmediateFlip = 0; + } + + *DestinationLinesToRequestVMInImmediateFlip = dml_floor( + 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125), + 1) / 4.0; + + if ((GPUVMEnable == true || DCCEnable == true)) { + mode_lib->vba.ImmediateFlipBW[0] = BandwidthAvailableForImmediateFlip + * ImmediateFlipBytes / TotImmediateFlipBytes; + TimeForFetchingRowInVBlankImmediateFlip = dml_max( + (MetaRowByte + PixelPTEBytesPerRow) + / mode_lib->vba.ImmediateFlipBW[0], + dml_max(UrgentLatencyPixelDataOnly, LineTime / 4.0)); + } else { + TimeForFetchingRowInVBlankImmediateFlip = 0; + } + + *DestinationLinesToRequestRowInImmediateFlip = dml_floor( + 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125), + 1) / 4.0; + + if (GPUVMEnable == true) { + *final_flip_bw = + dml_max( + PDEAndMetaPTEBytesFrame + / (*DestinationLinesToRequestVMInImmediateFlip + * LineTime), + (MetaRowByte + PixelPTEBytesPerRow) + / (TimeForFetchingRowInVBlankImmediateFlip + * LineTime)); + } else if (MetaRowByte + PixelPTEBytesPerRow > 0) { + *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow) + / (TimeForFetchingRowInVBlankImmediateFlip * LineTime); + } else { + *final_flip_bw = 0; + } + + if (GPUVMEnable && !DCCEnable) + min_row_time = dpte_row_height * LineTime / VRatio; + else if (!GPUVMEnable && DCCEnable) + min_row_time = meta_row_height * LineTime / VRatio; + else + min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime + / VRatio; + + if (*DestinationLinesToRequestVMInImmediateFlip >= 8 + || *DestinationLinesToRequestRowInImmediateFlip >= 16 + || TimeForFetchingMetaPTEImmediateFlip + + 2 * TimeForFetchingRowInVBlankImmediateFlip + > min_row_time) + *ImmediateFlipSupportedForPipe = false; + else + *ImmediateFlipSupportedForPipe = true; + } +} + +static unsigned int TruncToValidBPP( + double DecimalBPP, + bool DSCEnabled, + enum output_encoder_class Output, + enum output_format_class Format, + unsigned int DSCInputBitPerComponent) +{ + if (Output == dm_hdmi) { + if (Format == dm_420) { + if (DecimalBPP >= 18) + return 18; + else if (DecimalBPP >= 15) + return 15; + else if (DecimalBPP >= 12) + return 12; + else + return BPP_INVALID; + } else if (Format == dm_444) { + if (DecimalBPP >= 36) + return 36; + else if (DecimalBPP >= 30) + return 30; + else if (DecimalBPP >= 24) + return 24; + else if (DecimalBPP >= 18) + return 18; + else + return BPP_INVALID; + } else { + if (DecimalBPP / 1.5 >= 24) + return 24; + else if (DecimalBPP / 1.5 >= 20) + return 20; + else if (DecimalBPP / 1.5 >= 16) + return 16; + else + return BPP_INVALID; + } + } else { + if (DSCEnabled) { + if (Format == dm_420) { + if (DecimalBPP < 6) + return BPP_INVALID; + else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16) + return 1.5 * DSCInputBitPerComponent - 1 / 16; + else + return dml_floor(16 * DecimalBPP, 1) / 16; + } else if (Format == dm_n422) { + if (DecimalBPP < 7) + return BPP_INVALID; + else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16) + return 2 * DSCInputBitPerComponent - 1 / 16; + else + return dml_floor(16 * DecimalBPP, 1) / 16; + } else { + if (DecimalBPP < 8) + return BPP_INVALID; + else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16) + return 3 * DSCInputBitPerComponent - 1 / 16; + else + return dml_floor(16 * DecimalBPP, 1) / 16; + } + } else if (Format == dm_420) { + if (DecimalBPP >= 18) + return 18; + else if (DecimalBPP >= 15) + return 15; + else if (DecimalBPP >= 12) + return 12; + else + return BPP_INVALID; + } else if (Format == dm_s422 || Format == dm_n422) { + if (DecimalBPP >= 24) + return 24; + else if (DecimalBPP >= 20) + return 20; + else if (DecimalBPP >= 16) + return 16; + else + return BPP_INVALID; + } else { + if (DecimalBPP >= 36) + return 36; + else if (DecimalBPP >= 30) + return 30; + else if (DecimalBPP >= 24) + return 24; + else if (DecimalBPP >= 18) + return 18; + else + return BPP_INVALID; + } + } +} + +void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) +{ + struct vba_vars_st *locals = &mode_lib->vba; + + int i; + unsigned int j, k, m; + + /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ + + /*Scale Ratio, taps Support Check*/ + + mode_lib->vba.ScaleRatioAndTapsSupport = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.ScalerEnabled[k] == false + && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) + || mode_lib->vba.HRatio[k] != 1.0 + || mode_lib->vba.htaps[k] != 1.0 + || mode_lib->vba.VRatio[k] != 1.0 + || mode_lib->vba.vtaps[k] != 1.0)) { + mode_lib->vba.ScaleRatioAndTapsSupport = false; + } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0 + || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0 + || (mode_lib->vba.htaps[k] > 1.0 + && (mode_lib->vba.htaps[k] % 2) == 1) + || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio + || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio + || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k] + || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k] + || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8 + && (mode_lib->vba.HRatio[k] / 2.0 + > mode_lib->vba.HTAPsChroma[k] + || mode_lib->vba.VRatio[k] / 2.0 + > mode_lib->vba.VTAPsChroma[k]))) { + mode_lib->vba.ScaleRatioAndTapsSupport = false; + } + } + /*Source Format, Pixel Format and Scan Support Check*/ + + mode_lib->vba.SourceFormatPixelAndScanSupport = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear + && mode_lib->vba.SourceScan[k] != dm_horz) + || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d + || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x + || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d + || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t + || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x + || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d + || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x) + && mode_lib->vba.SourcePixelFormat[k] != dm_444_64) + || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x + && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8 + || mode_lib->vba.SourcePixelFormat[k] + == dm_420_8 + || mode_lib->vba.SourcePixelFormat[k] + == dm_420_10)) + || (((mode_lib->vba.SurfaceTiling[k] == dm_sw_gfx7_2d_thin_gl + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_gfx7_2d_thin_lvp) + && !((mode_lib->vba.SourcePixelFormat[k] + == dm_444_64 + || mode_lib->vba.SourcePixelFormat[k] + == dm_444_32) + && mode_lib->vba.SourceScan[k] + == dm_horz + && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp + == true + && mode_lib->vba.DCCEnable[k] + == false)) + || (mode_lib->vba.DCCEnable[k] == true + && (mode_lib->vba.SurfaceTiling[k] + == dm_sw_linear + || mode_lib->vba.SourcePixelFormat[k] + == dm_420_8 + || mode_lib->vba.SourcePixelFormat[k] + == dm_420_10)))) { + mode_lib->vba.SourceFormatPixelAndScanSupport = false; + } + } + /*Bandwidth Support Check*/ + + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) { + locals->BytePerPixelInDETY[k] = 8.0; + locals->BytePerPixelInDETC[k] = 0.0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) { + locals->BytePerPixelInDETY[k] = 4.0; + locals->BytePerPixelInDETC[k] = 0.0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16 + || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) { + locals->BytePerPixelInDETY[k] = 2.0; + locals->BytePerPixelInDETC[k] = 0.0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) { + locals->BytePerPixelInDETY[k] = 1.0; + locals->BytePerPixelInDETC[k] = 0.0; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) { + locals->BytePerPixelInDETY[k] = 1.0; + locals->BytePerPixelInDETC[k] = 2.0; + } else { + locals->BytePerPixelInDETY[k] = 4.0 / 3; + locals->BytePerPixelInDETC[k] = 8.0 / 3; + } + if (mode_lib->vba.SourceScan[k] == dm_horz) { + locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k]; + } else { + locals->SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k]; + } + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + locals->ReadBandwidthLuma[k] = locals->SwathWidthYSingleDPP[k] * dml_ceil(locals->BytePerPixelInDETY[k], 1.0) + / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k]; + locals->ReadBandwidthChroma[k] = locals->SwathWidthYSingleDPP[k] / 2 * dml_ceil(locals->BytePerPixelInDETC[k], 2.0) + / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) * mode_lib->vba.VRatio[k] / 2.0; + locals->ReadBandwidth[k] = locals->ReadBandwidthLuma[k] + locals->ReadBandwidthChroma[k]; + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.WritebackEnable[k] == true + && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) { + locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k] + * mode_lib->vba.WritebackDestinationHeight[k] + / (mode_lib->vba.WritebackSourceHeight[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) * 4.0; + } else if (mode_lib->vba.WritebackEnable[k] == true + && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) { + locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k] + * mode_lib->vba.WritebackDestinationHeight[k] + / (mode_lib->vba.WritebackSourceHeight[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) * 3.0; + } else if (mode_lib->vba.WritebackEnable[k] == true) { + locals->WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k] + * mode_lib->vba.WritebackDestinationHeight[k] + / (mode_lib->vba.WritebackSourceHeight[k] + * mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k]) * 1.5; + } else { + locals->WriteBandwidth[k] = 0.0; + } + } + mode_lib->vba.DCCEnabledInAnyPlane = false; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.DCCEnable[k] == true) { + mode_lib->vba.DCCEnabledInAnyPlane = true; + } + } + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + locals->FabricAndDRAMBandwidthPerState[i] = dml_min( + mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels + * mode_lib->vba.DRAMChannelWidth, + mode_lib->vba.FabricClockPerState[i] + * mode_lib->vba.FabricDatapathToDCNDataReturn) / 1000; + locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth * locals->DCFCLKPerState[i], + locals->FabricAndDRAMBandwidthPerState[i] * 1000) + * locals->PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly / 100; + + locals->ReturnBWPerState[i] = locals->ReturnBWToDCNPerState; + + if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { + locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / + ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 + / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] + * locals->ReturnBusWidth / 4) + locals->UrgentLatency))); + } + locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * + locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency + + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); + + if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { + locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + 4 * locals->ReturnBWToDCNPerState * + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 + * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / + dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency + + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2)); + } + + locals->ReturnBWToDCNPerState = dml_min(locals->ReturnBusWidth * + locals->DCFCLKPerState[i], locals->FabricAndDRAMBandwidthPerState[i] * 1000); + + if (locals->DCCEnabledInAnyPlane == true && locals->ReturnBWToDCNPerState > locals->DCFCLKPerState[i] * locals->ReturnBusWidth / 4) { + locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + locals->ReturnBWToDCNPerState * 4 * (1 - locals->UrgentLatency / + ((locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 + / (locals->ReturnBWToDCNPerState - locals->DCFCLKPerState[i] + * locals->ReturnBusWidth / 4) + locals->UrgentLatency))); + } + locals->CriticalPoint = 2 * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * + locals->UrgentLatency / (locals->ReturnBWToDCNPerState * locals->UrgentLatency + + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024); + + if (locals->DCCEnabledInAnyPlane && locals->CriticalPoint > 1 && locals->CriticalPoint < 4) { + locals->ReturnBWPerState[i] = dml_min(locals->ReturnBWPerState[i], + 4 * locals->ReturnBWToDCNPerState * + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024 + * locals->ReturnBusWidth * locals->DCFCLKPerState[i] * locals->UrgentLatency / + dml_pow((locals->ReturnBWToDCNPerState * locals->UrgentLatency + + (locals->ROBBufferSizeInKByte - locals->PixelChunkSizeInKByte) * 1024), 2)); + } + } + /*Writeback Latency support check*/ + + mode_lib->vba.WritebackLatencySupport = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.WritebackEnable[k] == true) { + if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) { + if (locals->WriteBandwidth[k] + > (mode_lib->vba.WritebackInterfaceLumaBufferSize + + mode_lib->vba.WritebackInterfaceChromaBufferSize) + / mode_lib->vba.WritebackLatency) { + mode_lib->vba.WritebackLatencySupport = false; + } + } else { + if (locals->WriteBandwidth[k] + > 1.5 + * dml_min( + mode_lib->vba.WritebackInterfaceLumaBufferSize, + 2.0 + * mode_lib->vba.WritebackInterfaceChromaBufferSize) + / mode_lib->vba.WritebackLatency) { + mode_lib->vba.WritebackLatencySupport = false; + } + } + } + } + /*Re-ordering Buffer Support Check*/ + + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i] = + (mode_lib->vba.RoundTripPingLatencyCycles + 32.0) / mode_lib->vba.DCFCLKPerState[i] + + locals->UrgentOutOfOrderReturnPerChannel * mode_lib->vba.NumberOfChannels / locals->ReturnBWPerState[i]; + if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024.0 / locals->ReturnBWPerState[i] + > locals->UrgentRoundTripAndOutOfOrderLatencyPerState[i]) { + locals->ROBSupport[i] = true; + } else { + locals->ROBSupport[i] = false; + } + } + /*Writeback Mode Support Check*/ + + mode_lib->vba.TotalNumberOfActiveWriteback = 0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.WritebackEnable[k] == true) { + if (mode_lib->vba.ActiveWritebacksPerPlane[k] == 0) + mode_lib->vba.ActiveWritebacksPerPlane[k] = 1; + mode_lib->vba.TotalNumberOfActiveWriteback = + mode_lib->vba.TotalNumberOfActiveWriteback + + mode_lib->vba.ActiveWritebacksPerPlane[k]; + } + } + mode_lib->vba.WritebackModeSupport = true; + if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) { + mode_lib->vba.WritebackModeSupport = false; + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.WritebackEnable[k] == true + && mode_lib->vba.Writeback10bpc420Supported != true + && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) { + mode_lib->vba.WritebackModeSupport = false; + } + } + /*Writeback Scale Ratio and Taps Support Check*/ + + mode_lib->vba.WritebackScaleRatioAndTapsSupport = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.WritebackEnable[k] == true) { + if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false + && (mode_lib->vba.WritebackHRatio[k] != 1.0 + || mode_lib->vba.WritebackVRatio[k] != 1.0)) { + mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; + } + if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio + || mode_lib->vba.WritebackVRatio[k] + > mode_lib->vba.WritebackMaxVSCLRatio + || mode_lib->vba.WritebackHRatio[k] + < mode_lib->vba.WritebackMinHSCLRatio + || mode_lib->vba.WritebackVRatio[k] + < mode_lib->vba.WritebackMinVSCLRatio + || mode_lib->vba.WritebackLumaHTaps[k] + > mode_lib->vba.WritebackMaxHSCLTaps + || mode_lib->vba.WritebackLumaVTaps[k] + > mode_lib->vba.WritebackMaxVSCLTaps + || mode_lib->vba.WritebackHRatio[k] + > mode_lib->vba.WritebackLumaHTaps[k] + || mode_lib->vba.WritebackVRatio[k] + > mode_lib->vba.WritebackLumaVTaps[k] + || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0 + && ((mode_lib->vba.WritebackLumaHTaps[k] % 2) + == 1)) + || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32 + && (mode_lib->vba.WritebackChromaHTaps[k] + > mode_lib->vba.WritebackMaxHSCLTaps + || mode_lib->vba.WritebackChromaVTaps[k] + > mode_lib->vba.WritebackMaxVSCLTaps + || 2.0 + * mode_lib->vba.WritebackHRatio[k] + > mode_lib->vba.WritebackChromaHTaps[k] + || 2.0 + * mode_lib->vba.WritebackVRatio[k] + > mode_lib->vba.WritebackChromaVTaps[k] + || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0 + && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) { + mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; + } + if (mode_lib->vba.WritebackVRatio[k] < 1.0) { + mode_lib->vba.WritebackLumaVExtra = + dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0); + } else { + mode_lib->vba.WritebackLumaVExtra = -1; + } + if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32 + && mode_lib->vba.WritebackLumaVTaps[k] + > (mode_lib->vba.WritebackLineBufferLumaBufferSize + + mode_lib->vba.WritebackLineBufferChromaBufferSize) + / 3.0 + / mode_lib->vba.WritebackDestinationWidth[k] + - mode_lib->vba.WritebackLumaVExtra) + || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8 + && mode_lib->vba.WritebackLumaVTaps[k] + > mode_lib->vba.WritebackLineBufferLumaBufferSize + * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k] + - mode_lib->vba.WritebackLumaVExtra) + || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10 + && mode_lib->vba.WritebackLumaVTaps[k] + > mode_lib->vba.WritebackLineBufferLumaBufferSize + * 8.0 / 10.0 + / mode_lib->vba.WritebackDestinationWidth[k] + - mode_lib->vba.WritebackLumaVExtra)) { + mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; + } + if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) { + mode_lib->vba.WritebackChromaVExtra = 0.0; + } else { + mode_lib->vba.WritebackChromaVExtra = -1; + } + if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8 + && mode_lib->vba.WritebackChromaVTaps[k] + > mode_lib->vba.WritebackLineBufferChromaBufferSize + * 8.0 / 10.0 / mode_lib->vba.WritebackDestinationWidth[k] + - mode_lib->vba.WritebackChromaVExtra) + || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10 + && mode_lib->vba.WritebackChromaVTaps[k] + > mode_lib->vba.WritebackLineBufferChromaBufferSize + * 8.0 / 10.0 + / mode_lib->vba.WritebackDestinationWidth[k] + - mode_lib->vba.WritebackChromaVExtra)) { + mode_lib->vba.WritebackScaleRatioAndTapsSupport = false; + } + } + } + /*Maximum DISPCLK/DPPCLK Support check*/ + + mode_lib->vba.WritebackRequiredDISPCLK = 0.0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.WritebackEnable[k] == true) { + mode_lib->vba.WritebackRequiredDISPCLK = + dml_max( + mode_lib->vba.WritebackRequiredDISPCLK, + CalculateWriteBackDISPCLK( + mode_lib->vba.WritebackPixelFormat[k], + mode_lib->vba.PixelClock[k], + mode_lib->vba.WritebackHRatio[k], + mode_lib->vba.WritebackVRatio[k], + mode_lib->vba.WritebackLumaHTaps[k], + mode_lib->vba.WritebackLumaVTaps[k], + mode_lib->vba.WritebackChromaHTaps[k], + mode_lib->vba.WritebackChromaVTaps[k], + mode_lib->vba.WritebackDestinationWidth[k], + mode_lib->vba.HTotal[k], + mode_lib->vba.WritebackChromaLineBufferWidth)); + } + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.HRatio[k] > 1.0) { + locals->PSCL_FACTOR[k] = dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput + * mode_lib->vba.HRatio[k] + / dml_ceil( + mode_lib->vba.htaps[k] + / 6.0, + 1.0)); + } else { + locals->PSCL_FACTOR[k] = dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput); + } + if (locals->BytePerPixelInDETC[k] == 0.0) { + locals->PSCL_FACTOR_CHROMA[k] = 0.0; + locals->MinDPPCLKUsingSingleDPP[k] = + mode_lib->vba.PixelClock[k] + * dml_max3( + mode_lib->vba.vtaps[k] / 6.0 + * dml_min( + 1.0, + mode_lib->vba.HRatio[k]), + mode_lib->vba.HRatio[k] + * mode_lib->vba.VRatio[k] + / locals->PSCL_FACTOR[k], + 1.0); + if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0) + && locals->MinDPPCLKUsingSingleDPP[k] + < 2.0 * mode_lib->vba.PixelClock[k]) { + locals->MinDPPCLKUsingSingleDPP[k] = 2.0 + * mode_lib->vba.PixelClock[k]; + } + } else { + if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) { + locals->PSCL_FACTOR_CHROMA[k] = + dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput + * mode_lib->vba.HRatio[k] + / 2.0 + / dml_ceil( + mode_lib->vba.HTAPsChroma[k] + / 6.0, + 1.0)); + } else { + locals->PSCL_FACTOR_CHROMA[k] = dml_min( + mode_lib->vba.MaxDCHUBToPSCLThroughput, + mode_lib->vba.MaxPSCLToLBThroughput); + } + locals->MinDPPCLKUsingSingleDPP[k] = + mode_lib->vba.PixelClock[k] + * dml_max5( + mode_lib->vba.vtaps[k] / 6.0 + * dml_min( + 1.0, + mode_lib->vba.HRatio[k]), + mode_lib->vba.HRatio[k] + * mode_lib->vba.VRatio[k] + / locals->PSCL_FACTOR[k], + mode_lib->vba.VTAPsChroma[k] + / 6.0 + * dml_min( + 1.0, + mode_lib->vba.HRatio[k] + / 2.0), + mode_lib->vba.HRatio[k] + * mode_lib->vba.VRatio[k] + / 4.0 + / locals->PSCL_FACTOR_CHROMA[k], + 1.0); + if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0 + || mode_lib->vba.HTAPsChroma[k] > 6.0 + || mode_lib->vba.VTAPsChroma[k] > 6.0) + && locals->MinDPPCLKUsingSingleDPP[k] + < 2.0 * mode_lib->vba.PixelClock[k]) { + locals->MinDPPCLKUsingSingleDPP[k] = 2.0 + * mode_lib->vba.PixelClock[k]; + } + } + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + Calculate256BBlockSizes( + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.SurfaceTiling[k], + dml_ceil(locals->BytePerPixelInDETY[k], 1.0), + dml_ceil(locals->BytePerPixelInDETC[k], 2.0), + &locals->Read256BlockHeightY[k], + &locals->Read256BlockHeightC[k], + &locals->Read256BlockWidthY[k], + &locals->Read256BlockWidthC[k]); + if (mode_lib->vba.SourceScan[k] == dm_horz) { + locals->MaxSwathHeightY[k] = locals->Read256BlockHeightY[k]; + locals->MaxSwathHeightC[k] = locals->Read256BlockHeightC[k]; + } else { + locals->MaxSwathHeightY[k] = locals->Read256BlockWidthY[k]; + locals->MaxSwathHeightC[k] = locals->Read256BlockWidthC[k]; + } + if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_32 + || mode_lib->vba.SourcePixelFormat[k] == dm_444_16 + || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16 + || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) { + if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear + || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64 + && (mode_lib->vba.SurfaceTiling[k] + == dm_sw_4kb_s + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_4kb_s_x + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_64kb_s + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_64kb_s_t + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_64kb_s_x + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_var_s + || mode_lib->vba.SurfaceTiling[k] + == dm_sw_var_s_x) + && mode_lib->vba.SourceScan[k] == dm_horz)) { + locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; + } else { + locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k] + / 2.0; + } + locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; + } else { + if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { + locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; + locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8 + && mode_lib->vba.SourceScan[k] == dm_horz) { + locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k] + / 2.0; + locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; + } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10 + && mode_lib->vba.SourceScan[k] == dm_horz) { + locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k] + / 2.0; + locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; + } else { + locals->MinSwathHeightY[k] = locals->MaxSwathHeightY[k]; + locals->MinSwathHeightC[k] = locals->MaxSwathHeightC[k]; + } + } + if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) { + mode_lib->vba.MaximumSwathWidthSupport = 8192.0; + } else { + mode_lib->vba.MaximumSwathWidthSupport = 5120.0; + } + mode_lib->vba.MaximumSwathWidthInDETBuffer = + dml_min( + mode_lib->vba.MaximumSwathWidthSupport, + mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0 + / (locals->BytePerPixelInDETY[k] + * locals->MinSwathHeightY[k] + + locals->BytePerPixelInDETC[k] + / 2.0 + * locals->MinSwathHeightC[k])); + if (locals->BytePerPixelInDETC[k] == 0.0) { + mode_lib->vba.MaximumSwathWidthInLineBuffer = + mode_lib->vba.LineBufferSize + * dml_max(mode_lib->vba.HRatio[k], 1.0) + / mode_lib->vba.LBBitPerPixel[k] + / (mode_lib->vba.vtaps[k] + + dml_max( + dml_ceil( + mode_lib->vba.VRatio[k], + 1.0) + - 2, + 0.0)); + } else { + mode_lib->vba.MaximumSwathWidthInLineBuffer = + dml_min( + mode_lib->vba.LineBufferSize + * dml_max( + mode_lib->vba.HRatio[k], + 1.0) + / mode_lib->vba.LBBitPerPixel[k] + / (mode_lib->vba.vtaps[k] + + dml_max( + dml_ceil( + mode_lib->vba.VRatio[k], + 1.0) + - 2, + 0.0)), + 2.0 * mode_lib->vba.LineBufferSize + * dml_max( + mode_lib->vba.HRatio[k] + / 2.0, + 1.0) + / mode_lib->vba.LBBitPerPixel[k] + / (mode_lib->vba.VTAPsChroma[k] + + dml_max( + dml_ceil( + mode_lib->vba.VRatio[k] + / 2.0, + 1.0) + - 2, + 0.0))); + } + locals->MaximumSwathWidth[k] = dml_min( + mode_lib->vba.MaximumSwathWidthInDETBuffer, + mode_lib->vba.MaximumSwathWidthInLineBuffer); + } + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + for (j = 0; j < 2; j++) { + mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[i], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( + mode_lib->vba.MaxDppclk[i], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + locals->RequiredDISPCLK[i][j] = 0.0; + locals->DISPCLK_DPPCLK_Support[i][j] = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = + mode_lib->vba.PixelClock[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) + * (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0); + if (mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine >= mode_lib->vba.MaxDispclk[i] + && i == mode_lib->vba.soc.num_states) + mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine = mode_lib->vba.PixelClock[k] + * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); + + mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 + * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * (1 + mode_lib->vba.DISPCLKRampingMargin / 100.0); + if (mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine >= mode_lib->vba.MaxDispclk[i] + && i == mode_lib->vba.soc.num_states) + mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 + * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); + if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + locals->ODMCombineEnablePerState[i][k] = false; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; + } else { + locals->ODMCombineEnablePerState[i][k] = true; + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; + } + if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity + && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] + && locals->ODMCombineEnablePerState[i][k] == false) { + locals->NoOfDPP[i][j][k] = 1; + locals->RequiredDPPCLK[i][j][k] = + locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); + } else { + locals->NoOfDPP[i][j][k] = 2; + locals->RequiredDPPCLK[i][j][k] = + locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0; + } + locals->RequiredDISPCLK[i][j] = dml_max( + locals->RequiredDISPCLK[i][j], + mode_lib->vba.PlaneRequiredDISPCLK); + if ((locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) + > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity) + || (mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) { + locals->DISPCLK_DPPCLK_Support[i][j] = false; + } + } + locals->TotalNumberOfActiveDPP[i][j] = 0.0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) + locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k]; + if (j == 1) { + while (locals->TotalNumberOfActiveDPP[i][j] < mode_lib->vba.MaxNumDPP + && locals->TotalNumberOfActiveDPP[i][j] < 2 * mode_lib->vba.NumberOfActivePlanes) { + double BWOfNonSplitPlaneOfMaximumBandwidth; + unsigned int NumberOfNonSplitPlaneOfMaximumBandwidth; + + BWOfNonSplitPlaneOfMaximumBandwidth = 0; + NumberOfNonSplitPlaneOfMaximumBandwidth = 0; + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { + if (locals->ReadBandwidth[k] > BWOfNonSplitPlaneOfMaximumBandwidth && locals->NoOfDPP[i][j][k] == 1) { + BWOfNonSplitPlaneOfMaximumBandwidth = locals->ReadBandwidth[k]; + NumberOfNonSplitPlaneOfMaximumBandwidth = k; + } + } + locals->NoOfDPP[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = 2; + locals->RequiredDPPCLK[i][j][NumberOfNonSplitPlaneOfMaximumBandwidth] = + locals->MinDPPCLKUsingSingleDPP[NumberOfNonSplitPlaneOfMaximumBandwidth] + * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100) / 2; + locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + 1; + } + } + if (locals->TotalNumberOfActiveDPP[i][j] > mode_lib->vba.MaxNumDPP) { + locals->RequiredDISPCLK[i][j] = 0.0; + locals->DISPCLK_DPPCLK_Support[i][j] = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + locals->ODMCombineEnablePerState[i][k] = false; + if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) { + locals->NoOfDPP[i][j][k] = 1; + locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] + * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); + } else { + locals->NoOfDPP[i][j][k] = 2; + locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k] + * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) / 2.0; + } + if (i != mode_lib->vba.soc.num_states) { + mode_lib->vba.PlaneRequiredDISPCLK = + mode_lib->vba.PixelClock[k] + * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) + * (1.0 + mode_lib->vba.DISPCLKRampingMargin / 100.0); + } else { + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PixelClock[k] + * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); + } + locals->RequiredDISPCLK[i][j] = dml_max( + locals->RequiredDISPCLK[i][j], + mode_lib->vba.PlaneRequiredDISPCLK); + if (locals->MinDPPCLKUsingSingleDPP[k] / locals->NoOfDPP[i][j][k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) + > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity + || mode_lib->vba.PlaneRequiredDISPCLK > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) + locals->DISPCLK_DPPCLK_Support[i][j] = false; + } + locals->TotalNumberOfActiveDPP[i][j] = 0.0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) + locals->TotalNumberOfActiveDPP[i][j] = locals->TotalNumberOfActiveDPP[i][j] + locals->NoOfDPP[i][j][k]; + } + locals->RequiredDISPCLK[i][j] = dml_max( + locals->RequiredDISPCLK[i][j], + mode_lib->vba.WritebackRequiredDISPCLK); + if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity + < mode_lib->vba.WritebackRequiredDISPCLK) { + locals->DISPCLK_DPPCLK_Support[i][j] = false; + } + } + } + /*Viewport Size Check*/ + + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + locals->ViewportSizeSupport[i] = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->ODMCombineEnablePerState[i][k] == true) { + if (dml_min(locals->SwathWidthYSingleDPP[k], dml_round(mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k])) + > locals->MaximumSwathWidth[k]) { + locals->ViewportSizeSupport[i] = false; + } + } else { + if (locals->SwathWidthYSingleDPP[k] / 2.0 > locals->MaximumSwathWidth[k]) { + locals->ViewportSizeSupport[i] = false; + } + } + } + } + /*Total Available Pipes Support Check*/ + + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + for (j = 0; j < 2; j++) { + if (locals->TotalNumberOfActiveDPP[i][j] <= mode_lib->vba.MaxNumDPP) + locals->TotalAvailablePipesSupport[i][j] = true; + else + locals->TotalAvailablePipesSupport[i][j] = false; + } + } + /*Total Available OTG Support Check*/ + + mode_lib->vba.TotalNumberOfActiveOTG = 0.0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.BlendingAndTiming[k] == k) { + mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG + + 1.0; + } + } + if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) { + mode_lib->vba.NumberOfOTGSupport = true; + } else { + mode_lib->vba.NumberOfOTGSupport = false; + } + /*Display IO and DSC Support Check*/ + + mode_lib->vba.NonsupportedDSCInputBPC = false; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0 + || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0 + || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) { + mode_lib->vba.NonsupportedDSCInputBPC = true; + } + } + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + locals->RequiresDSC[i][k] = 0; + locals->RequiresFEC[i][k] = 0; + if (mode_lib->vba.BlendingAndTiming[k] == k) { + if (mode_lib->vba.Output[k] == dm_hdmi) { + locals->RequiresDSC[i][k] = 0; + locals->RequiresFEC[i][k] = 0; + locals->OutputBppPerState[i][k] = TruncToValidBPP( + dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24, + false, + mode_lib->vba.Output[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.DSCInputBitPerComponent[k]); + } else if (mode_lib->vba.Output[k] == dm_dp + || mode_lib->vba.Output[k] == dm_edp) { + if (mode_lib->vba.Output[k] == dm_edp) { + mode_lib->vba.EffectiveFECOverhead = 0.0; + } else { + mode_lib->vba.EffectiveFECOverhead = + mode_lib->vba.FECOverhead; + } + if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) { + mode_lib->vba.Outbpp = TruncToValidBPP( + (1.0 - mode_lib->vba.Downspreading / 100.0) * 270.0 + * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, + false, + mode_lib->vba.Output[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.DSCInputBitPerComponent[k]); + mode_lib->vba.OutbppDSC = TruncToValidBPP( + (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 270.0 + * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, + true, + mode_lib->vba.Output[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.DSCInputBitPerComponent[k]); + if (mode_lib->vba.DSCEnabled[k] == true) { + locals->RequiresDSC[i][k] = true; + if (mode_lib->vba.Output[k] == dm_dp) { + locals->RequiresFEC[i][k] = true; + } else { + locals->RequiresFEC[i][k] = false; + } + mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC; + } else { + locals->RequiresDSC[i][k] = false; + locals->RequiresFEC[i][k] = false; + } + locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp; + } + if (mode_lib->vba.Outbpp == BPP_INVALID && mode_lib->vba.PHYCLKPerState[i] >= 540.0) { + mode_lib->vba.Outbpp = TruncToValidBPP( + (1.0 - mode_lib->vba.Downspreading / 100.0) * 540.0 + * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, + false, + mode_lib->vba.Output[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.DSCInputBitPerComponent[k]); + mode_lib->vba.OutbppDSC = TruncToValidBPP( + (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 540.0 + * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, + true, + mode_lib->vba.Output[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.DSCInputBitPerComponent[k]); + if (mode_lib->vba.DSCEnabled[k] == true) { + locals->RequiresDSC[i][k] = true; + if (mode_lib->vba.Output[k] == dm_dp) { + locals->RequiresFEC[i][k] = true; + } else { + locals->RequiresFEC[i][k] = false; + } + mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC; + } else { + locals->RequiresDSC[i][k] = false; + locals->RequiresFEC[i][k] = false; + } + locals->OutputBppPerState[i][k] = mode_lib->vba.Outbpp; + } + if (mode_lib->vba.Outbpp == BPP_INVALID + && mode_lib->vba.PHYCLKPerState[i] + >= 810.0) { + mode_lib->vba.Outbpp = TruncToValidBPP( + (1.0 - mode_lib->vba.Downspreading / 100.0) * 810.0 + * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, + false, + mode_lib->vba.Output[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.DSCInputBitPerComponent[k]); + mode_lib->vba.OutbppDSC = TruncToValidBPP( + (1.0 - mode_lib->vba.Downspreading / 100.0) * (1.0 - mode_lib->vba.EffectiveFECOverhead / 100.0) * 810.0 + * mode_lib->vba.OutputLinkDPLanes[k] / mode_lib->vba.PixelClockBackEnd[k] * 8.0, + true, + mode_lib->vba.Output[k], + mode_lib->vba.OutputFormat[k], + mode_lib->vba.DSCInputBitPerComponent[k]); + if (mode_lib->vba.DSCEnabled[k] == true || mode_lib->vba.Outbpp == BPP_INVALID) { + locals->RequiresDSC[i][k] = true; + if (mode_lib->vba.Output[k] == dm_dp) { + locals->RequiresFEC[i][k] = true; + } else { + locals->RequiresFEC[i][k] = false; + } + mode_lib->vba.Outbpp = mode_lib->vba.OutbppDSC; + } else { + locals->RequiresDSC[i][k] = false; + locals->RequiresFEC[i][k] = false; + } + locals->OutputBppPerState[i][k] = + mode_lib->vba.Outbpp; + } + } + } else { + locals->OutputBppPerState[i][k] = BPP_BLENDED_PIPE; + } + } + } + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + locals->DIOSupport[i] = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->OutputBppPerState[i][k] == BPP_INVALID + || (mode_lib->vba.OutputFormat[k] == dm_420 + && mode_lib->vba.Interlace[k] == true + && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true)) { + locals->DIOSupport[i] = false; + } + } + } + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + locals->DSCCLKRequiredMoreThanSupported[i] = false; + if (mode_lib->vba.BlendingAndTiming[k] == k) { + if ((mode_lib->vba.Output[k] == dm_dp + || mode_lib->vba.Output[k] == dm_edp)) { + if (mode_lib->vba.OutputFormat[k] == dm_420 + || mode_lib->vba.OutputFormat[k] + == dm_n422) { + mode_lib->vba.DSCFormatFactor = 2; + } else { + mode_lib->vba.DSCFormatFactor = 1; + } + if (locals->RequiresDSC[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] + == true) { + if (mode_lib->vba.PixelClockBackEnd[k] / 6.0 / mode_lib->vba.DSCFormatFactor + > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { + locals->DSCCLKRequiredMoreThanSupported[i] = + true; + } + } else { + if (mode_lib->vba.PixelClockBackEnd[k] / 3.0 / mode_lib->vba.DSCFormatFactor + > (1.0 - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) * mode_lib->vba.MaxDSCCLK[i]) { + locals->DSCCLKRequiredMoreThanSupported[i] = + true; + } + } + } + } + } + } + } + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + locals->NotEnoughDSCUnits[i] = false; + mode_lib->vba.TotalDSCUnitsRequired = 0.0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->RequiresDSC[i][k] == true) { + if (locals->ODMCombineEnablePerState[i][k] == true) { + mode_lib->vba.TotalDSCUnitsRequired = + mode_lib->vba.TotalDSCUnitsRequired + 2.0; + } else { + mode_lib->vba.TotalDSCUnitsRequired = + mode_lib->vba.TotalDSCUnitsRequired + 1.0; + } + } + } + if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) { + locals->NotEnoughDSCUnits[i] = true; + } + } + /*DSC Delay per state*/ + + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.BlendingAndTiming[k] != k) { + mode_lib->vba.slices = 0; + } else if (locals->RequiresDSC[i][k] == 0 + || locals->RequiresDSC[i][k] == false) { + mode_lib->vba.slices = 0; + } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) { + mode_lib->vba.slices = dml_ceil( + mode_lib->vba.PixelClockBackEnd[k] / 400.0, + 4.0); + } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) { + mode_lib->vba.slices = 8.0; + } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) { + mode_lib->vba.slices = 4.0; + } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) { + mode_lib->vba.slices = 2.0; + } else { + mode_lib->vba.slices = 1.0; + } + if (locals->OutputBppPerState[i][k] == BPP_BLENDED_PIPE + || locals->OutputBppPerState[i][k] == BPP_INVALID) { + mode_lib->vba.bpp = 0.0; + } else { + mode_lib->vba.bpp = locals->OutputBppPerState[i][k]; + } + if (locals->RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) { + if (locals->ODMCombineEnablePerState[i][k] == false) { + locals->DSCDelayPerState[i][k] = + dscceComputeDelay( + mode_lib->vba.DSCInputBitPerComponent[k], + mode_lib->vba.bpp, + dml_ceil( + mode_lib->vba.HActive[k] + / mode_lib->vba.slices, + 1.0), + mode_lib->vba.slices, + mode_lib->vba.OutputFormat[k]) + + dscComputeDelay( + mode_lib->vba.OutputFormat[k]); + } else { + locals->DSCDelayPerState[i][k] = + 2.0 * (dscceComputeDelay( + mode_lib->vba.DSCInputBitPerComponent[k], + mode_lib->vba.bpp, + dml_ceil(mode_lib->vba.HActive[k] / mode_lib->vba.slices, 1.0), + mode_lib->vba.slices / 2, + mode_lib->vba.OutputFormat[k]) + + dscComputeDelay(mode_lib->vba.OutputFormat[k])); + } + locals->DSCDelayPerState[i][k] = + locals->DSCDelayPerState[i][k] * mode_lib->vba.PixelClock[k] / mode_lib->vba.PixelClockBackEnd[k]; + } else { + locals->DSCDelayPerState[i][k] = 0.0; + } + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) { + for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) { + if (mode_lib->vba.BlendingAndTiming[k] == m && locals->RequiresDSC[i][m] == true) + locals->DSCDelayPerState[i][k] = locals->DSCDelayPerState[i][m]; + } + } + } + } + + //Prefetch Check + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + for (j = 0; j < 2; j++) { + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->ODMCombineEnablePerState[i][k] == true) + locals->SwathWidthYPerState[i][j][k] = dml_min(locals->SwathWidthYSingleDPP[k], dml_round(locals->HActive[k] / 2 * locals->HRatio[k])); + else + locals->SwathWidthYPerState[i][j][k] = locals->SwathWidthYSingleDPP[k] / locals->NoOfDPP[i][j][k]; + locals->SwathWidthGranularityY = 256 / dml_ceil(locals->BytePerPixelInDETY[k], 1) / locals->MaxSwathHeightY[k]; + locals->RoundedUpMaxSwathSizeBytesY = (dml_ceil(locals->SwathWidthYPerState[i][j][k] - 1, locals->SwathWidthGranularityY) + + locals->SwathWidthGranularityY) * locals->BytePerPixelInDETY[k] * locals->MaxSwathHeightY[k]; + if (locals->SourcePixelFormat[k] == dm_420_10) { + locals->RoundedUpMaxSwathSizeBytesY = dml_ceil(locals->RoundedUpMaxSwathSizeBytesY, 256) + 256; + } + if (locals->MaxSwathHeightC[k] > 0) { + locals->SwathWidthGranularityC = 256 / dml_ceil(locals->BytePerPixelInDETC[k], 2) / locals->MaxSwathHeightC[k]; + + locals->RoundedUpMaxSwathSizeBytesC = (dml_ceil(locals->SwathWidthYPerState[i][j][k] / 2 - 1, locals->SwathWidthGranularityC) + + locals->SwathWidthGranularityC) * locals->BytePerPixelInDETC[k] * locals->MaxSwathHeightC[k]; + } + if (locals->SourcePixelFormat[k] == dm_420_10) { + locals->RoundedUpMaxSwathSizeBytesC = dml_ceil(locals->RoundedUpMaxSwathSizeBytesC, 256) + 256; + } else { + locals->RoundedUpMaxSwathSizeBytesC = 0; + } + + if (locals->RoundedUpMaxSwathSizeBytesY + locals->RoundedUpMaxSwathSizeBytesC <= locals->DETBufferSizeInKByte * 1024 / 2) { + locals->SwathHeightYPerState[i][j][k] = locals->MaxSwathHeightY[k]; + locals->SwathHeightCPerState[i][j][k] = locals->MaxSwathHeightC[k]; + } else { + locals->SwathHeightYPerState[i][j][k] = locals->MinSwathHeightY[k]; + locals->SwathHeightCPerState[i][j][k] = locals->MinSwathHeightC[k]; + } + + if (locals->BytePerPixelInDETC[k] == 0) { + locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; + locals->LinesInDETChroma = 0; + } else if (locals->SwathHeightYPerState[i][j][k] <= locals->SwathHeightCPerState[i][j][k]) { + locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETY[k] / + locals->SwathWidthYPerState[i][j][k]; + locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 2 / locals->BytePerPixelInDETC[k] / (locals->SwathWidthYPerState[i][j][k] / 2); + } else { + locals->LinesInDETLuma = locals->DETBufferSizeInKByte * 1024 * 2 / 3 / locals->BytePerPixelInDETY[k] / locals->SwathWidthYPerState[i][j][k]; + locals->LinesInDETChroma = locals->DETBufferSizeInKByte * 1024 / 3 / locals->BytePerPixelInDETY[k] / (locals->SwathWidthYPerState[i][j][k] / 2); + } + + locals->EffectiveLBLatencyHidingSourceLinesLuma = dml_min(locals->MaxLineBufferLines, + dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k] / (locals->SwathWidthYPerState[i][j][k] + / dml_max(locals->HRatio[k], 1)), 1)) - (locals->vtaps[k] - 1); + + locals->EffectiveLBLatencyHidingSourceLinesChroma = dml_min(locals->MaxLineBufferLines, + dml_floor(locals->LineBufferSize / locals->LBBitPerPixel[k] + / (locals->SwathWidthYPerState[i][j][k] / 2 + / dml_max(locals->HRatio[k] / 2, 1)), 1)) - (locals->VTAPsChroma[k] - 1); + + locals->EffectiveDETLBLinesLuma = dml_floor(locals->LinesInDETLuma + dml_min( + locals->LinesInDETLuma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETY[k] * + locals->PSCL_FACTOR[k] / locals->ReturnBWPerState[i], + locals->EffectiveLBLatencyHidingSourceLinesLuma), + locals->SwathHeightYPerState[i][j][k]); + + locals->EffectiveDETLBLinesChroma = dml_floor(locals->LinesInDETChroma + dml_min( + locals->LinesInDETChroma * locals->RequiredDISPCLK[i][j] * locals->BytePerPixelInDETC[k] * + locals->PSCL_FACTOR_CHROMA[k] / locals->ReturnBWPerState[i], + locals->EffectiveLBLatencyHidingSourceLinesChroma), + locals->SwathHeightCPerState[i][j][k]); + + if (locals->BytePerPixelInDETC[k] == 0) { + locals->UrgentLatencySupportUsPerState[i][j][k] = locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) + / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]); + } else { + locals->UrgentLatencySupportUsPerState[i][j][k] = dml_min( + locals->EffectiveDETLBLinesLuma * (locals->HTotal[k] / locals->PixelClock[k]) + / locals->VRatio[k] - locals->EffectiveDETLBLinesLuma * locals->SwathWidthYPerState[i][j][k] * + dml_ceil(locals->BytePerPixelInDETY[k], 1) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k]), + locals->EffectiveDETLBLinesChroma * (locals->HTotal[k] / locals->PixelClock[k]) / (locals->VRatio[k] / 2) - + locals->EffectiveDETLBLinesChroma * locals->SwathWidthYPerState[i][j][k] / 2 * + dml_ceil(locals->BytePerPixelInDETC[k], 2) / (locals->ReturnBWPerState[i] / locals->NoOfDPP[i][j][k])); + } + } + } + } + + for (i = 0; i <= locals->soc.num_states; i++) { + for (j = 0; j < 2; j++) { + locals->UrgentLatencySupport[i][j] = true; + for (k = 0; k < locals->NumberOfActivePlanes; k++) { + if (locals->UrgentLatencySupportUsPerState[i][j][k] < locals->UrgentLatency) + locals->UrgentLatencySupport[i][j] = false; + } + } + } + + + /*Prefetch Check*/ + for (i = 0; i <= locals->soc.num_states; i++) { + for (j = 0; j < 2; j++) { + locals->TotalNumberOfDCCActiveDPP[i][j] = 0; + for (k = 0; k < locals->NumberOfActivePlanes; k++) { + if (locals->DCCEnable[k] == true) { + locals->TotalNumberOfDCCActiveDPP[i][j] = + locals->TotalNumberOfDCCActiveDPP[i][j] + locals->NoOfDPP[i][j][k]; + } + } + } + } + + CalculateMinAndMaxPrefetchMode(locals->AllowDRAMSelfRefreshOrDRAMClockChangeInVblank, &locals->MinPrefetchMode, &locals->MaxPrefetchMode); + + locals->MaxTotalVActiveRDBandwidth = 0; + for (k = 0; k < locals->NumberOfActivePlanes; k++) { + locals->MaxTotalVActiveRDBandwidth = locals->MaxTotalVActiveRDBandwidth + locals->ReadBandwidth[k]; + } + + for (i = 0; i <= locals->soc.num_states; i++) { + for (j = 0; j < 2; j++) { + for (k = 0; k < locals->NumberOfActivePlanes; k++) { + locals->NoOfDPPThisState[k] = locals->NoOfDPP[i][j][k]; + locals->RequiredDPPCLKThisState[k] = locals->RequiredDPPCLK[i][j][k]; + locals->SwathHeightYThisState[k] = locals->SwathHeightYPerState[i][j][k]; + locals->SwathHeightCThisState[k] = locals->SwathHeightCPerState[i][j][k]; + locals->SwathWidthYThisState[k] = locals->SwathWidthYPerState[i][j][k]; + mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.PixelClock[k] / 16.0); + if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) { + if (mode_lib->vba.VRatio[k] <= 1.0) { + mode_lib->vba.ProjectedDCFCLKDeepSleep = + dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep, + 1.1 + * dml_ceil( + mode_lib->vba.BytePerPixelInDETY[k], + 1.0) + / 64.0 + * mode_lib->vba.HRatio[k] + * mode_lib->vba.PixelClock[k] + / mode_lib->vba.NoOfDPP[i][j][k]); + } else { + mode_lib->vba.ProjectedDCFCLKDeepSleep = + dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep, + 1.1 + * dml_ceil( + mode_lib->vba.BytePerPixelInDETY[k], + 1.0) + / 64.0 + * mode_lib->vba.PSCL_FACTOR[k] + * mode_lib->vba.RequiredDPPCLK[i][j][k]); + } + } else { + if (mode_lib->vba.VRatio[k] <= 1.0) { + mode_lib->vba.ProjectedDCFCLKDeepSleep = + dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep, + 1.1 + * dml_ceil( + mode_lib->vba.BytePerPixelInDETY[k], + 1.0) + / 32.0 + * mode_lib->vba.HRatio[k] + * mode_lib->vba.PixelClock[k] + / mode_lib->vba.NoOfDPP[i][j][k]); + } else { + mode_lib->vba.ProjectedDCFCLKDeepSleep = + dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep, + 1.1 + * dml_ceil( + mode_lib->vba.BytePerPixelInDETY[k], + 1.0) + / 32.0 + * mode_lib->vba.PSCL_FACTOR[k] + * mode_lib->vba.RequiredDPPCLK[i][j][k]); + } + if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) { + mode_lib->vba.ProjectedDCFCLKDeepSleep = + dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep, + 1.1 + * dml_ceil( + mode_lib->vba.BytePerPixelInDETC[k], + 2.0) + / 32.0 + * mode_lib->vba.HRatio[k] + / 2.0 + * mode_lib->vba.PixelClock[k] + / mode_lib->vba.NoOfDPP[i][j][k]); + } else { + mode_lib->vba.ProjectedDCFCLKDeepSleep = + dml_max( + mode_lib->vba.ProjectedDCFCLKDeepSleep, + 1.1 + * dml_ceil( + mode_lib->vba.BytePerPixelInDETC[k], + 2.0) + / 32.0 + * mode_lib->vba.PSCL_FACTOR_CHROMA[k] + * mode_lib->vba.RequiredDPPCLK[i][j][k]); + } + } + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes( + mode_lib, + mode_lib->vba.DCCEnable[k], + mode_lib->vba.Read256BlockHeightY[k], + mode_lib->vba.Read256BlockWidthY[k], + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.SurfaceTiling[k], + dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0), + mode_lib->vba.SourceScan[k], + mode_lib->vba.ViewportWidth[k], + mode_lib->vba.ViewportHeight[k], + mode_lib->vba.SwathWidthYPerState[i][j][k], + mode_lib->vba.GPUVMEnable, + mode_lib->vba.VMMPageSize, + mode_lib->vba.PTEBufferSizeInRequestsLuma, + mode_lib->vba.PDEProcessingBufIn64KBReqs, + mode_lib->vba.PitchY[k], + mode_lib->vba.DCCMetaPitchY[k], + &mode_lib->vba.MacroTileWidthY[k], + &mode_lib->vba.MetaRowBytesY, + &mode_lib->vba.DPTEBytesPerRowY, + &mode_lib->vba.PTEBufferSizeNotExceededY[i][j][k], + &mode_lib->vba.dpte_row_height[k], + &mode_lib->vba.meta_row_height[k]); + mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines( + mode_lib, + mode_lib->vba.VRatio[k], + mode_lib->vba.vtaps[k], + mode_lib->vba.Interlace[k], + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + mode_lib->vba.SwathHeightYPerState[i][j][k], + mode_lib->vba.ViewportYStartY[k], + &mode_lib->vba.PrefillY[k], + &mode_lib->vba.MaxNumSwY[k]); + if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) { + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes( + mode_lib, + mode_lib->vba.DCCEnable[k], + mode_lib->vba.Read256BlockHeightY[k], + mode_lib->vba.Read256BlockWidthY[k], + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.SurfaceTiling[k], + dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0), + mode_lib->vba.SourceScan[k], + mode_lib->vba.ViewportWidth[k] / 2.0, + mode_lib->vba.ViewportHeight[k] / 2.0, + mode_lib->vba.SwathWidthYPerState[i][j][k] / 2.0, + mode_lib->vba.GPUVMEnable, + mode_lib->vba.VMMPageSize, + mode_lib->vba.PTEBufferSizeInRequestsLuma, + mode_lib->vba.PDEProcessingBufIn64KBReqs, + mode_lib->vba.PitchC[k], + 0.0, + &mode_lib->vba.MacroTileWidthC[k], + &mode_lib->vba.MetaRowBytesC, + &mode_lib->vba.DPTEBytesPerRowC, + &mode_lib->vba.PTEBufferSizeNotExceededC[i][j][k], + &mode_lib->vba.dpte_row_height_chroma[k], + &mode_lib->vba.meta_row_height_chroma[k]); + mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines( + mode_lib, + mode_lib->vba.VRatio[k] / 2.0, + mode_lib->vba.VTAPsChroma[k], + mode_lib->vba.Interlace[k], + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + mode_lib->vba.SwathHeightCPerState[i][j][k], + mode_lib->vba.ViewportYStartC[k], + &mode_lib->vba.PrefillC[k], + &mode_lib->vba.MaxNumSwC[k]); + } else { + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0; + mode_lib->vba.MetaRowBytesC = 0.0; + mode_lib->vba.DPTEBytesPerRowC = 0.0; + locals->PrefetchLinesC[k] = 0.0; + locals->PTEBufferSizeNotExceededC[i][j][k] = true; + locals->PTEBufferSizeInRequestsForLuma = mode_lib->vba.PTEBufferSizeInRequestsLuma + mode_lib->vba.PTEBufferSizeInRequestsChroma; + } + locals->PDEAndMetaPTEBytesPerFrame[k] = + mode_lib->vba.PDEAndMetaPTEBytesPerFrameY + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC; + locals->MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY + mode_lib->vba.MetaRowBytesC; + locals->DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY + mode_lib->vba.DPTEBytesPerRowC; + + CalculateActiveRowBandwidth( + mode_lib->vba.GPUVMEnable, + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.VRatio[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], + mode_lib->vba.MetaRowBytesY, + mode_lib->vba.MetaRowBytesC, + mode_lib->vba.meta_row_height[k], + mode_lib->vba.meta_row_height_chroma[k], + mode_lib->vba.DPTEBytesPerRowY, + mode_lib->vba.DPTEBytesPerRowC, + mode_lib->vba.dpte_row_height[k], + mode_lib->vba.dpte_row_height_chroma[k], + &mode_lib->vba.meta_row_bw[k], + &mode_lib->vba.dpte_row_bw[k], + &mode_lib->vba.qual_row_bw[k]); + } + mode_lib->vba.ExtraLatency = + mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i] + + (mode_lib->vba.TotalNumberOfActiveDPP[i][j] + * mode_lib->vba.PixelChunkSizeInKByte + + mode_lib->vba.TotalNumberOfDCCActiveDPP[i][j] + * mode_lib->vba.MetaChunkSize) + * 1024.0 + / mode_lib->vba.ReturnBWPerState[i]; + if (mode_lib->vba.GPUVMEnable == true) { + mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency + + mode_lib->vba.TotalNumberOfActiveDPP[i][j] + * mode_lib->vba.PTEGroupSize + / mode_lib->vba.ReturnBWPerState[i]; + } + mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep; + + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.BlendingAndTiming[k] == k) { + if (mode_lib->vba.WritebackEnable[k] == true) { + locals->WritebackDelay[i][k] = mode_lib->vba.WritebackLatency + + CalculateWriteBackDelay( + mode_lib->vba.WritebackPixelFormat[k], + mode_lib->vba.WritebackHRatio[k], + mode_lib->vba.WritebackVRatio[k], + mode_lib->vba.WritebackLumaHTaps[k], + mode_lib->vba.WritebackLumaVTaps[k], + mode_lib->vba.WritebackChromaHTaps[k], + mode_lib->vba.WritebackChromaVTaps[k], + mode_lib->vba.WritebackDestinationWidth[k]) / locals->RequiredDISPCLK[i][j]; + } else { + locals->WritebackDelay[i][k] = 0.0; + } + for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) { + if (mode_lib->vba.BlendingAndTiming[m] == k + && mode_lib->vba.WritebackEnable[m] + == true) { + locals->WritebackDelay[i][k] = dml_max(locals->WritebackDelay[i][k], + mode_lib->vba.WritebackLatency + CalculateWriteBackDelay( + mode_lib->vba.WritebackPixelFormat[m], + mode_lib->vba.WritebackHRatio[m], + mode_lib->vba.WritebackVRatio[m], + mode_lib->vba.WritebackLumaHTaps[m], + mode_lib->vba.WritebackLumaVTaps[m], + mode_lib->vba.WritebackChromaHTaps[m], + mode_lib->vba.WritebackChromaVTaps[m], + mode_lib->vba.WritebackDestinationWidth[m]) / locals->RequiredDISPCLK[i][j]); + } + } + } + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + for (m = 0; m <= mode_lib->vba.NumberOfActivePlanes - 1; m++) { + if (mode_lib->vba.BlendingAndTiming[k] == m) { + locals->WritebackDelay[i][k] = locals->WritebackDelay[i][m]; + } + } + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + for (m = 0; m < locals->NumberOfCursors[k]; m++) + locals->cursor_bw[k] = locals->NumberOfCursors[k] * locals->CursorWidth[k][m] * locals->CursorBPP[k][m] + / 8 / (locals->HTotal[k] / locals->PixelClock[k]) * locals->VRatio[k]; + } + + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + locals->MaximumVStartup[k] = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k] + - dml_max(1.0, dml_ceil(locals->WritebackDelay[i][k] / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]), 1.0)); + } + + mode_lib->vba.NextPrefetchMode = mode_lib->vba.MinPrefetchMode; + do { + mode_lib->vba.PrefetchMode[i][j] = mode_lib->vba.NextPrefetchMode; + mode_lib->vba.NextPrefetchMode = mode_lib->vba.NextPrefetchMode + 1; + + mode_lib->vba.TWait = CalculateTWait( + mode_lib->vba.PrefetchMode[i][j], + mode_lib->vba.DRAMClockChangeLatency, + mode_lib->vba.UrgentLatency, + mode_lib->vba.SREnterPlusExitTime); + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + + if (mode_lib->vba.XFCEnabled[k] == true) { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = + CalculateRemoteSurfaceFlipDelay( + mode_lib, + mode_lib->vba.VRatio[k], + locals->SwathWidthYPerState[i][j][k], + dml_ceil(locals->BytePerPixelInDETY[k], 1.0), + mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k], + mode_lib->vba.XFCTSlvVupdateOffset, + mode_lib->vba.XFCTSlvVupdateWidth, + mode_lib->vba.XFCTSlvVreadyOffset, + mode_lib->vba.XFCXBUFLatencyTolerance, + mode_lib->vba.XFCFillBWOverhead, + mode_lib->vba.XFCSlvChunkSize, + mode_lib->vba.XFCBusTransportTime, + mode_lib->vba.TimeCalc, + mode_lib->vba.TWait, + &mode_lib->vba.SrcActiveDrainRate, + &mode_lib->vba.TInitXFill, + &mode_lib->vba.TslvChk); + } else { + mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0; + } + + CalculateDelayAfterScaler(mode_lib, mode_lib->vba.ReturnBWPerState[i], mode_lib->vba.ReadBandwidthLuma[k], mode_lib->vba.ReadBandwidthChroma[k], mode_lib->vba.MaxTotalVActiveRDBandwidth, + mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k], mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k], + mode_lib->vba.RequiredDPPCLK[i][j][k], mode_lib->vba.RequiredDISPCLK[i][j], mode_lib->vba.PixelClock[k], mode_lib->vba.DSCDelayPerState[i][k], mode_lib->vba.NoOfDPP[i][j][k], mode_lib->vba.ScalerEnabled[k], mode_lib->vba.NumberOfCursors[k], + mode_lib->vba.DPPCLKDelaySubtotal, mode_lib->vba.DPPCLKDelaySCL, mode_lib->vba.DPPCLKDelaySCLLBOnly, mode_lib->vba.DPPCLKDelayCNVCFormater, mode_lib->vba.DPPCLKDelayCNVCCursor, mode_lib->vba.DISPCLKDelaySubtotal, + mode_lib->vba.SwathWidthYPerState[i][j][k] / mode_lib->vba.HRatio[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.HTotal[k], + mode_lib->vba.SwathWidthYSingleDPP[k], mode_lib->vba.BytePerPixelInDETY[k], mode_lib->vba.BytePerPixelInDETC[k], mode_lib->vba.SwathHeightYThisState[k], mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.Interlace[k], mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + &mode_lib->vba.DSTXAfterScaler[k], &mode_lib->vba.DSTYAfterScaler[k]); + + mode_lib->vba.IsErrorResult[i][j][k] = + CalculatePrefetchSchedule( + mode_lib, + mode_lib->vba.RequiredDPPCLK[i][j][k], + mode_lib->vba.RequiredDISPCLK[i][j], + mode_lib->vba.PixelClock[k], + mode_lib->vba.ProjectedDCFCLKDeepSleep, + mode_lib->vba.NoOfDPP[i][j][k], + mode_lib->vba.NumberOfCursors[k], + mode_lib->vba.VTotal[k] + - mode_lib->vba.VActive[k], + mode_lib->vba.HTotal[k], + mode_lib->vba.MaxInterDCNTileRepeaters, + mode_lib->vba.MaximumVStartup[k], + mode_lib->vba.GPUVMMaxPageTableLevels, + mode_lib->vba.GPUVMEnable, + mode_lib->vba.DynamicMetadataEnable[k], + mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k], + mode_lib->vba.DynamicMetadataTransmittedBytes[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.UrgentLatencyPixelDataOnly, + mode_lib->vba.ExtraLatency, + mode_lib->vba.TimeCalc, + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], + mode_lib->vba.MetaRowBytes[k], + mode_lib->vba.DPTEBytesPerRow[k], + mode_lib->vba.PrefetchLinesY[k], + mode_lib->vba.SwathWidthYPerState[i][j][k], + mode_lib->vba.BytePerPixelInDETY[k], + mode_lib->vba.PrefillY[k], + mode_lib->vba.MaxNumSwY[k], + mode_lib->vba.PrefetchLinesC[k], + mode_lib->vba.BytePerPixelInDETC[k], + mode_lib->vba.PrefillC[k], + mode_lib->vba.MaxNumSwC[k], + mode_lib->vba.SwathHeightYPerState[i][j][k], + mode_lib->vba.SwathHeightCPerState[i][j][k], + mode_lib->vba.TWait, + mode_lib->vba.XFCEnabled[k], + mode_lib->vba.XFCRemoteSurfaceFlipDelay, + mode_lib->vba.Interlace[k], + mode_lib->vba.ProgressiveToInterlaceUnitInOPP, + mode_lib->vba.DSTXAfterScaler[k], + mode_lib->vba.DSTYAfterScaler[k], + &mode_lib->vba.LineTimesForPrefetch[k], + &mode_lib->vba.PrefetchBW[k], + &mode_lib->vba.LinesForMetaPTE[k], + &mode_lib->vba.LinesForMetaAndDPTERow[k], + &mode_lib->vba.VRatioPreY[i][j][k], + &mode_lib->vba.VRatioPreC[i][j][k], + &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k], + &mode_lib->vba.Tno_bw[k], + &mode_lib->vba.VUpdateOffsetPix[k], + &mode_lib->vba.VUpdateWidthPix[k], + &mode_lib->vba.VReadyOffsetPix[k]); + } + mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0; + mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0; + locals->prefetch_vm_bw_valid = true; + locals->prefetch_row_bw_valid = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->PDEAndMetaPTEBytesPerFrame[k] == 0) + locals->prefetch_vm_bw[k] = 0; + else if (locals->LinesForMetaPTE[k] > 0) + locals->prefetch_vm_bw[k] = locals->PDEAndMetaPTEBytesPerFrame[k] + / (locals->LinesForMetaPTE[k] * locals->HTotal[k] / locals->PixelClock[k]); + else { + locals->prefetch_vm_bw[k] = 0; + locals->prefetch_vm_bw_valid = false; + } + if (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k] == 0) + locals->prefetch_row_bw[k] = 0; + else if (locals->LinesForMetaAndDPTERow[k] > 0) + locals->prefetch_row_bw[k] = (locals->MetaRowBytes[k] + locals->DPTEBytesPerRow[k]) + / (locals->LinesForMetaAndDPTERow[k] * locals->HTotal[k] / locals->PixelClock[k]); + else { + locals->prefetch_row_bw[k] = 0; + locals->prefetch_row_bw_valid = false; + } + + mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = mode_lib->vba.MaximumReadBandwidthWithPrefetch + + mode_lib->vba.cursor_bw[k] + mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]; + mode_lib->vba.MaximumReadBandwidthWithPrefetch = + mode_lib->vba.MaximumReadBandwidthWithPrefetch + + mode_lib->vba.cursor_bw[k] + + dml_max3( + mode_lib->vba.prefetch_vm_bw[k], + mode_lib->vba.prefetch_row_bw[k], + dml_max(mode_lib->vba.ReadBandwidth[k], + mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k]) + + mode_lib->vba.meta_row_bw[k] + mode_lib->vba.dpte_row_bw[k]); + } + locals->BandwidthWithoutPrefetchSupported[i] = true; + if (mode_lib->vba.MaximumReadBandwidthWithoutPrefetch > locals->ReturnBWPerState[i]) { + locals->BandwidthWithoutPrefetchSupported[i] = false; + } + + locals->PrefetchSupported[i][j] = true; + if (mode_lib->vba.MaximumReadBandwidthWithPrefetch > locals->ReturnBWPerState[i]) { + locals->PrefetchSupported[i][j] = false; + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->LineTimesForPrefetch[k] < 2.0 + || locals->LinesForMetaPTE[k] >= 8.0 + || locals->LinesForMetaAndDPTERow[k] >= 16.0 + || mode_lib->vba.IsErrorResult[i][j][k] == true) { + locals->PrefetchSupported[i][j] = false; + } + } + locals->VRatioInPrefetchSupported[i][j] = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->VRatioPreY[i][j][k] > 4.0 + || locals->VRatioPreC[i][j][k] > 4.0 + || mode_lib->vba.IsErrorResult[i][j][k] == true) { + locals->VRatioInPrefetchSupported[i][j] = false; + } + } + } while ((locals->PrefetchSupported[i][j] != true || locals->VRatioInPrefetchSupported[i][j] != true) + && mode_lib->vba.NextPrefetchMode < mode_lib->vba.MaxPrefetchMode); + + if (mode_lib->vba.PrefetchSupported[i][j] == true + && mode_lib->vba.VRatioInPrefetchSupported[i][j] == true) { + mode_lib->vba.BandwidthAvailableForImmediateFlip = + mode_lib->vba.ReturnBWPerState[i]; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + mode_lib->vba.BandwidthAvailableForImmediateFlip = + mode_lib->vba.BandwidthAvailableForImmediateFlip + - mode_lib->vba.cursor_bw[k] + - dml_max( + mode_lib->vba.ReadBandwidth[k] + mode_lib->vba.qual_row_bw[k], + mode_lib->vba.PrefetchBW[k]); + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + mode_lib->vba.ImmediateFlipBytes[k] = 0.0; + if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 + && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { + mode_lib->vba.ImmediateFlipBytes[k] = + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] + + mode_lib->vba.MetaRowBytes[k] + + mode_lib->vba.DPTEBytesPerRow[k]; + } + } + mode_lib->vba.TotImmediateFlipBytes = 0.0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8 + && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) { + mode_lib->vba.TotImmediateFlipBytes = + mode_lib->vba.TotImmediateFlipBytes + + mode_lib->vba.ImmediateFlipBytes[k]; + } + } + + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + CalculateFlipSchedule( + mode_lib, + mode_lib->vba.ExtraLatency, + mode_lib->vba.UrgentLatencyPixelDataOnly, + mode_lib->vba.GPUVMMaxPageTableLevels, + mode_lib->vba.GPUVMEnable, + mode_lib->vba.BandwidthAvailableForImmediateFlip, + mode_lib->vba.TotImmediateFlipBytes, + mode_lib->vba.SourcePixelFormat[k], + mode_lib->vba.ImmediateFlipBytes[k], + mode_lib->vba.HTotal[k] + / mode_lib->vba.PixelClock[k], + mode_lib->vba.VRatio[k], + mode_lib->vba.Tno_bw[k], + mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k], + mode_lib->vba.MetaRowBytes[k], + mode_lib->vba.DPTEBytesPerRow[k], + mode_lib->vba.DCCEnable[k], + mode_lib->vba.dpte_row_height[k], + mode_lib->vba.meta_row_height[k], + mode_lib->vba.qual_row_bw[k], + &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k], + &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k], + &mode_lib->vba.final_flip_bw[k], + &mode_lib->vba.ImmediateFlipSupportedForPipe[k]); + } + mode_lib->vba.total_dcn_read_bw_with_flip = 0.0; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + mode_lib->vba.total_dcn_read_bw_with_flip = + mode_lib->vba.total_dcn_read_bw_with_flip + + mode_lib->vba.cursor_bw[k] + + dml_max3( + mode_lib->vba.prefetch_vm_bw[k], + mode_lib->vba.prefetch_row_bw[k], + mode_lib->vba.final_flip_bw[k] + + dml_max( + mode_lib->vba.ReadBandwidth[k], + mode_lib->vba.RequiredPrefetchPixelDataBWLuma[i][j][k])); + } + mode_lib->vba.ImmediateFlipSupportedForState[i][j] = true; + if (mode_lib->vba.total_dcn_read_bw_with_flip + > mode_lib->vba.ReturnBWPerState[i]) { + mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; + } + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) { + mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; + } + } + } else { + mode_lib->vba.ImmediateFlipSupportedForState[i][j] = false; + } + } + } + + /*Vertical Active BW support*/ + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i] = dml_min(mode_lib->vba.ReturnBusWidth * + mode_lib->vba.DCFCLKPerState[i], mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000) * + mode_lib->vba.MaxAveragePercentOfIdealDRAMBWDisplayCanUseInNormalSystemOperation / 100; + if (mode_lib->vba.MaxTotalVActiveRDBandwidth <= mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i]) + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = true; + else + mode_lib->vba.TotalVerticalActiveBandwidthSupport[i] = false; + } + + /*PTE Buffer Size Check*/ + + for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + for (j = 0; j < 2; j++) { + locals->PTEBufferSizeNotExceeded[i][j] = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (locals->PTEBufferSizeNotExceededY[i][j][k] == false + || locals->PTEBufferSizeNotExceededC[i][j][k] == false) { + locals->PTEBufferSizeNotExceeded[i][j] = false; + } + } + } + } + /*Cursor Support Check*/ + mode_lib->vba.CursorSupport = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + for (j = 0; j < 2; j++) { + if (mode_lib->vba.CursorWidth[k][j] > 0.0) { + if (dml_floor( + dml_floor( + mode_lib->vba.CursorBufferSize + - mode_lib->vba.CursorChunkSize, + mode_lib->vba.CursorChunkSize) * 1024.0 + / (mode_lib->vba.CursorWidth[k][j] + * mode_lib->vba.CursorBPP[k][j] + / 8.0), + 1.0) + * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]) + / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatencyPixelDataOnly + || (mode_lib->vba.CursorBPP[k][j] == 64.0 + && mode_lib->vba.Cursor64BppSupport == false)) { + mode_lib->vba.CursorSupport = false; + } + } + } + } + /*Valid Pitch Check*/ + + mode_lib->vba.PitchSupport = true; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + locals->AlignedYPitch[k] = dml_ceil( + dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]), + locals->MacroTileWidthY[k]); + if (locals->AlignedYPitch[k] > mode_lib->vba.PitchY[k]) { + mode_lib->vba.PitchSupport = false; + } + if (mode_lib->vba.DCCEnable[k] == true) { + locals->AlignedDCCMetaPitch[k] = dml_ceil( + dml_max( + mode_lib->vba.DCCMetaPitchY[k], + mode_lib->vba.ViewportWidth[k]), + 64.0 * locals->Read256BlockWidthY[k]); + } else { + locals->AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k]; + } + if (locals->AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) { + mode_lib->vba.PitchSupport = false; + } + if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_32 + && mode_lib->vba.SourcePixelFormat[k] != dm_444_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16 + && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) { + locals->AlignedCPitch[k] = dml_ceil( + dml_max( + mode_lib->vba.PitchC[k], + mode_lib->vba.ViewportWidth[k] / 2.0), + locals->MacroTileWidthC[k]); + } else { + locals->AlignedCPitch[k] = mode_lib->vba.PitchC[k]; + } + if (locals->AlignedCPitch[k] > mode_lib->vba.PitchC[k]) { + mode_lib->vba.PitchSupport = false; + } + } + /*Mode Support, Voltage State and SOC Configuration*/ + + for (i = mode_lib->vba.soc.num_states; i >= 0; i--) { + for (j = 0; j < 2; j++) { + enum dm_validation_status status = DML_VALIDATION_OK; + + if (mode_lib->vba.ScaleRatioAndTapsSupport != true) { + status = DML_FAIL_SCALE_RATIO_TAP; + } else if (mode_lib->vba.SourceFormatPixelAndScanSupport != true) { + status = DML_FAIL_SOURCE_PIXEL_FORMAT; + } else if (locals->ViewportSizeSupport[i] != true) { + status = DML_FAIL_VIEWPORT_SIZE; + } else if (locals->DIOSupport[i] != true) { + status = DML_FAIL_DIO_SUPPORT; + } else if (locals->NotEnoughDSCUnits[i] != false) { + status = DML_FAIL_NOT_ENOUGH_DSC; + } else if (locals->DSCCLKRequiredMoreThanSupported[i] != false) { + status = DML_FAIL_DSC_CLK_REQUIRED; + } else if (locals->UrgentLatencySupport[i][j] != true) { + status = DML_FAIL_URGENT_LATENCY; + } else if (locals->ROBSupport[i] != true) { + status = DML_FAIL_REORDERING_BUFFER; + } else if (locals->DISPCLK_DPPCLK_Support[i][j] != true) { + status = DML_FAIL_DISPCLK_DPPCLK; + } else if (locals->TotalAvailablePipesSupport[i][j] != true) { + status = DML_FAIL_TOTAL_AVAILABLE_PIPES; + } else if (mode_lib->vba.NumberOfOTGSupport != true) { + status = DML_FAIL_NUM_OTG; + } else if (mode_lib->vba.WritebackModeSupport != true) { + status = DML_FAIL_WRITEBACK_MODE; + } else if (mode_lib->vba.WritebackLatencySupport != true) { + status = DML_FAIL_WRITEBACK_LATENCY; + } else if (mode_lib->vba.WritebackScaleRatioAndTapsSupport != true) { + status = DML_FAIL_WRITEBACK_SCALE_RATIO_TAP; + } else if (mode_lib->vba.CursorSupport != true) { + status = DML_FAIL_CURSOR_SUPPORT; + } else if (mode_lib->vba.PitchSupport != true) { + status = DML_FAIL_PITCH_SUPPORT; + } else if (locals->PrefetchSupported[i][j] != true) { + status = DML_FAIL_PREFETCH_SUPPORT; + } else if (locals->TotalVerticalActiveBandwidthSupport[i] != true) { + status = DML_FAIL_TOTAL_V_ACTIVE_BW; + } else if (locals->VRatioInPrefetchSupported[i][j] != true) { + status = DML_FAIL_V_RATIO_PREFETCH; + } else if (locals->PTEBufferSizeNotExceeded[i][j] != true) { + status = DML_FAIL_PTE_BUFFER_SIZE; + } else if (mode_lib->vba.NonsupportedDSCInputBPC != false) { + status = DML_FAIL_DSC_INPUT_BPC; + } + + if (status == DML_VALIDATION_OK) { + locals->ModeSupport[i][j] = true; + } else { + locals->ModeSupport[i][j] = false; + } + locals->ValidationStatus[i] = status; + } + } + { + unsigned int MaximumMPCCombine = 0; + mode_lib->vba.VoltageLevel = mode_lib->vba.soc.num_states + 1; + for (i = mode_lib->vba.VoltageOverrideLevel; i <= mode_lib->vba.soc.num_states; i++) { + if (locals->ModeSupport[i][0] == true || locals->ModeSupport[i][1] == true) { + mode_lib->vba.VoltageLevel = i; + if (locals->ModeSupport[i][1] == true && (locals->ModeSupport[i][0] == false + || mode_lib->vba.WhenToDoMPCCombine == dm_mpc_always_when_possible)) { + MaximumMPCCombine = 1; + } else { + MaximumMPCCombine = 0; + } + break; + } + } + mode_lib->vba.ImmediateFlipSupport = + locals->ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel][MaximumMPCCombine]; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + mode_lib->vba.DPPPerPlane[k] = locals->NoOfDPP[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k]; + locals->DPPCLK[k] = locals->RequiredDPPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine][k]; + } + mode_lib->vba.DISPCLK = locals->RequiredDISPCLK[mode_lib->vba.VoltageLevel][MaximumMPCCombine]; + mode_lib->vba.maxMpcComb = MaximumMPCCombine; + } + mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.ReturnBW = locals->ReturnBWPerState[mode_lib->vba.VoltageLevel]; + mode_lib->vba.FabricAndDRAMBandwidth = locals->FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel]; + for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { + if (mode_lib->vba.BlendingAndTiming[k] == k) { + mode_lib->vba.ODMCombineEnabled[k] = + locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; + } else { + mode_lib->vba.ODMCombineEnabled[k] = 0; + } + mode_lib->vba.DSCEnabled[k] = + locals->RequiresDSC[mode_lib->vba.VoltageLevel][k]; + mode_lib->vba.OutputBpp[k] = + locals->OutputBppPerState[mode_lib->vba.VoltageLevel][k]; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h new file mode 100644 index 000000000000..a989d3ca1e99 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h @@ -0,0 +1,32 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN20V2_DISPLAY_MODE_VBA_H_ +#define _DCN20V2_DISPLAY_MODE_VBA_H_ + +void dml20v2_recalculate(struct display_mode_lib *mode_lib); +void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c new file mode 100644 index 000000000000..ed8bf5f723c9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -0,0 +1,1701 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "../display_mode_lib.h" +#include "../display_mode_vba.h" +#include "display_rq_dlg_calc_20v2.h" + +// Function: dml20v2_rq_dlg_get_rq_params +// Calculate requestor related parameters that register definition agnostic +// (i.e. this layer does try to separate real values from register definition) +// Input: +// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.) +// Output: +// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.) +// +static void dml20v2_rq_dlg_get_rq_params( + struct display_mode_lib *mode_lib, + display_rq_params_st * rq_param, + const display_pipe_source_params_st pipe_src_param); + +// Function: dml20v2_rq_dlg_get_dlg_params +// Calculate deadline related parameters +// +static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, + const display_e2e_pipe_params_st *e2e_pipe_param, + const unsigned int num_pipes, + const unsigned int pipe_idx, + display_dlg_regs_st *disp_dlg_regs, + display_ttu_regs_st *disp_ttu_regs, + const display_rq_dlg_params_st rq_dlg_param, + const display_dlg_sys_params_st dlg_sys_param, + const bool cstate_en, + const bool pstate_en); +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, + double *refcyc_per_req_delivery_pre_cur, + double *refcyc_per_req_delivery_cur, + double refclk_freq_in_mhz, + double ref_freq_to_pix_freq, + double hscale_pixel_rate_l, + double hscl_ratio, + double vratio_pre_l, + double vratio_l, + unsigned int cur_width, + enum cursor_bpp cur_bpp); + +#include "../dml_inline_defs.h" + +static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) +{ + unsigned int ret_val = 0; + + if (source_format == dm_444_16) { + if (!is_chroma) + ret_val = 2; + } else if (source_format == dm_444_32) { + if (!is_chroma) + ret_val = 4; + } else if (source_format == dm_444_64) { + if (!is_chroma) + ret_val = 8; + } else if (source_format == dm_420_8) { + if (is_chroma) + ret_val = 2; + else + ret_val = 1; + } else if (source_format == dm_420_10) { + if (is_chroma) + ret_val = 4; + else + ret_val = 2; + } else if (source_format == dm_444_8) { + ret_val = 1; + } + return ret_val; +} + +static bool is_dual_plane(enum source_format_class source_format) +{ + bool ret_val = 0; + + if ((source_format == dm_420_8) || (source_format == dm_420_10)) + ret_val = 1; + + return ret_val; +} + +static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib, + double refclk_freq_in_mhz, + double pclk_freq_in_mhz, + bool odm_combine, + unsigned int recout_width, + unsigned int hactive, + double vratio, + double hscale_pixel_rate, + unsigned int delivery_width, + unsigned int req_per_swath_ub) +{ + double refcyc_per_delivery = 0.0; + + if (vratio <= 1.0) { + if (odm_combine) + refcyc_per_delivery = (double) refclk_freq_in_mhz + * dml_min((double) recout_width, (double) hactive / 2.0) + / pclk_freq_in_mhz / (double) req_per_swath_ub; + else + refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width + / pclk_freq_in_mhz / (double) req_per_swath_ub; + } else { + refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width + / (double) hscale_pixel_rate / (double) req_per_swath_ub; + } + + dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz); + dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz); + dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width); + dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio); + dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub); + dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery); + + return refcyc_per_delivery; + +} + +static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size) +{ + if (tile_size == dm_256k_tile) + return (256 * 1024); + else if (tile_size == dm_64k_tile) + return (64 * 1024); + else + return (4 * 1024); +} + +static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, + display_data_rq_regs_st *rq_regs, + const display_data_rq_sizing_params_st rq_sizing) +{ + dml_print("DML_DLG: %s: rq_sizing param\n", __func__); + print__data_rq_sizing_params_st(mode_lib, rq_sizing); + + rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10; + + if (rq_sizing.min_chunk_bytes == 0) + rq_regs->min_chunk_size = 0; + else + rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1; + + rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10; + if (rq_sizing.min_meta_chunk_bytes == 0) + rq_regs->min_meta_chunk_size = 0; + else + rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1; + + rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6; + rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6; +} + +static void extract_rq_regs(struct display_mode_lib *mode_lib, + display_rq_regs_st *rq_regs, + const display_rq_params_st rq_param) +{ + unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; + unsigned int detile_buf_plane1_addr = 0; + + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l); + + rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), + 1) - 3; + + if (rq_param.yuv420) { + extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c); + rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), + 1) - 3; + } + + rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); + rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); + + // FIXME: take the max between luma, chroma chunk size? + // okay for now, as we are setting chunk_bytes to 8kb anyways + if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb + rq_regs->drq_expansion_mode = 0; + } else { + rq_regs->drq_expansion_mode = 2; + } + rq_regs->prq_expansion_mode = 1; + rq_regs->mrq_expansion_mode = 1; + rq_regs->crq_expansion_mode = 1; + + if (rq_param.yuv420) { + if ((double) rq_param.misc.rq_l.stored_swath_bytes + / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) { + detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma + } else { + detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0), + 256, + 0) / 64.0; // 2/3 to chroma + } + } + rq_regs->plane1_base_address = detile_buf_plane1_addr; +} + +static void handle_det_buf_split(struct display_mode_lib *mode_lib, + display_rq_params_st *rq_param, + const display_pipe_source_params_st pipe_src_param) +{ + unsigned int total_swath_bytes = 0; + unsigned int swath_bytes_l = 0; + unsigned int swath_bytes_c = 0; + unsigned int full_swath_bytes_packed_l = 0; + unsigned int full_swath_bytes_packed_c = 0; + bool req128_l = 0; + bool req128_c = 0; + bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); + bool surf_vert = (pipe_src_param.source_scan == dm_vert); + unsigned int log2_swath_height_l = 0; + unsigned int log2_swath_height_c = 0; + unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024; + + full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes; + full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes; + + if (rq_param->yuv420_10bpc) { + full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3, + 256, + 1) + 256; + full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3, + 256, + 1) + 256; + } + + if (rq_param->yuv420) { + total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; + + if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request + req128_l = 0; + req128_c = 0; + swath_bytes_l = full_swath_bytes_packed_l; + swath_bytes_c = full_swath_bytes_packed_c; + } else { //128b request (for luma only for yuv420 8bpc) + req128_l = 1; + req128_c = 0; + swath_bytes_l = full_swath_bytes_packed_l / 2; + swath_bytes_c = full_swath_bytes_packed_c; + } + // Note: assumption, the config that pass in will fit into + // the detiled buffer. + } else { + total_swath_bytes = 2 * full_swath_bytes_packed_l; + + if (total_swath_bytes <= detile_buf_size_in_bytes) + req128_l = 0; + else + req128_l = 1; + + swath_bytes_l = total_swath_bytes; + swath_bytes_c = 0; + } + rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l; + rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c; + + if (surf_linear) { + log2_swath_height_l = 0; + log2_swath_height_c = 0; + } else if (!surf_vert) { + log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l; + log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c; + } else { + log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l; + log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c; + } + rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l; + rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c; + + dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l); + dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c); + dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n", + __func__, + full_swath_bytes_packed_l); + dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n", + __func__, + full_swath_bytes_packed_c); +} + +static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib, + display_data_rq_dlg_params_st *rq_dlg_param, + display_data_rq_misc_params_st *rq_misc_param, + display_data_rq_sizing_params_st *rq_sizing_param, + unsigned int vp_width, + unsigned int vp_height, + unsigned int data_pitch, + unsigned int meta_pitch, + unsigned int source_format, + unsigned int tiling, + unsigned int macro_tile_size, + unsigned int source_scan, + unsigned int is_chroma) +{ + bool surf_linear = (tiling == dm_sw_linear); + bool surf_vert = (source_scan == dm_vert); + + unsigned int bytes_per_element; + unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format), + false); + unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format), + true); + + unsigned int blk256_width = 0; + unsigned int blk256_height = 0; + + unsigned int blk256_width_y = 0; + unsigned int blk256_height_y = 0; + unsigned int blk256_width_c = 0; + unsigned int blk256_height_c = 0; + unsigned int log2_bytes_per_element; + unsigned int log2_blk256_width; + unsigned int log2_blk256_height; + unsigned int blk_bytes; + unsigned int log2_blk_bytes; + unsigned int log2_blk_height; + unsigned int log2_blk_width; + unsigned int log2_meta_req_bytes; + unsigned int log2_meta_req_height; + unsigned int log2_meta_req_width; + unsigned int meta_req_width; + unsigned int meta_req_height; + unsigned int log2_meta_row_height; + unsigned int meta_row_width_ub; + unsigned int log2_meta_chunk_bytes; + unsigned int log2_meta_chunk_height; + + //full sized meta chunk width in unit of data elements + unsigned int log2_meta_chunk_width; + unsigned int log2_min_meta_chunk_bytes; + unsigned int min_meta_chunk_width; + unsigned int meta_chunk_width; + unsigned int meta_chunk_per_row_int; + unsigned int meta_row_remainder; + unsigned int meta_chunk_threshold; + unsigned int meta_blk_bytes; + unsigned int meta_blk_height; + unsigned int meta_blk_width; + unsigned int meta_surface_bytes; + unsigned int vmpg_bytes; + unsigned int meta_pte_req_per_frame_ub; + unsigned int meta_pte_bytes_per_frame_ub; + const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes); + const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs_luma; + const unsigned int pde_proc_buffer_size_64k_reqs = + mode_lib->ip.pde_proc_buffer_size_64k_reqs; + + unsigned int log2_vmpg_height = 0; + unsigned int log2_vmpg_width = 0; + unsigned int log2_dpte_req_height_ptes = 0; + unsigned int log2_dpte_req_height = 0; + unsigned int log2_dpte_req_width = 0; + unsigned int log2_dpte_row_height_linear = 0; + unsigned int log2_dpte_row_height = 0; + unsigned int log2_dpte_group_width = 0; + unsigned int dpte_row_width_ub = 0; + unsigned int dpte_req_height = 0; + unsigned int dpte_req_width = 0; + unsigned int dpte_group_width = 0; + unsigned int log2_dpte_group_bytes = 0; + unsigned int log2_dpte_group_length = 0; + unsigned int pde_buf_entries; + bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10); + + Calculate256BBlockSizes((enum source_format_class)(source_format), + (enum dm_swizzle_mode)(tiling), + bytes_per_element_y, + bytes_per_element_c, + &blk256_height_y, + &blk256_height_c, + &blk256_width_y, + &blk256_width_c); + + if (!is_chroma) { + blk256_width = blk256_width_y; + blk256_height = blk256_height_y; + bytes_per_element = bytes_per_element_y; + } else { + blk256_width = blk256_width_c; + blk256_height = blk256_height_c; + bytes_per_element = bytes_per_element_c; + } + + log2_bytes_per_element = dml_log2(bytes_per_element); + + dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear); + dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert); + dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width); + dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height); + + log2_blk256_width = dml_log2((double) blk256_width); + log2_blk256_height = dml_log2((double) blk256_height); + blk_bytes = surf_linear ? + 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size); + log2_blk_bytes = dml_log2((double) blk_bytes); + log2_blk_height = 0; + log2_blk_width = 0; + + // remember log rule + // "+" in log is multiply + // "-" in log is divide + // "/2" is like square root + // blk is vertical biased + if (tiling != dm_sw_linear) + log2_blk_height = log2_blk256_height + + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1); + else + log2_blk_height = 0; // blk height of 1 + + log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height; + + if (!surf_vert) { + rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1) + + blk256_width; + rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width; + } else { + rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1) + + blk256_height; + rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height; + } + + if (!surf_vert) + rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height + * bytes_per_element; + else + rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width + * bytes_per_element; + + rq_misc_param->blk256_height = blk256_height; + rq_misc_param->blk256_width = blk256_width; + + // ------- + // meta + // ------- + log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element + + // each 64b meta request for dcn is 8x8 meta elements and + // a meta element covers one 256b block of the the data surface. + log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256 + log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element + - log2_meta_req_height; + meta_req_width = 1 << log2_meta_req_width; + meta_req_height = 1 << log2_meta_req_height; + log2_meta_row_height = 0; + meta_row_width_ub = 0; + + // the dimensions of a meta row are meta_row_width x meta_row_height in elements. + // calculate upper bound of the meta_row_width + if (!surf_vert) { + log2_meta_row_height = log2_meta_req_height; + meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1) + + meta_req_width; + rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width; + } else { + log2_meta_row_height = log2_meta_req_width; + meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1) + + meta_req_height; + rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height; + } + rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64; + + rq_dlg_param->meta_row_height = 1 << log2_meta_row_height; + + log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes); + log2_meta_chunk_height = log2_meta_row_height; + + //full sized meta chunk width in unit of data elements + log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element + - log2_meta_chunk_height; + log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes); + min_meta_chunk_width = 1 + << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element + - log2_meta_chunk_height); + meta_chunk_width = 1 << log2_meta_chunk_width; + meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width); + meta_row_remainder = meta_row_width_ub % meta_chunk_width; + meta_chunk_threshold = 0; + meta_blk_bytes = 4096; + meta_blk_height = blk256_height * 64; + meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height; + meta_surface_bytes = meta_pitch + * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height) + * bytes_per_element / 256; + vmpg_bytes = mode_lib->soc.vmm_page_size_bytes; + meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes, + 8 * vmpg_bytes, + 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes); + meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request + rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub; + + dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height); + dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width); + dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes); + dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n", + __func__, + meta_pte_req_per_frame_ub); + dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n", + __func__, + meta_pte_bytes_per_frame_ub); + + if (!surf_vert) + meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width; + else + meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height; + + if (meta_row_remainder <= meta_chunk_threshold) + rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1; + else + rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2; + + // ------ + // dpte + // ------ + if (surf_linear) { + log2_vmpg_height = 0; // one line high + } else { + log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height; + } + log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height; + + // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. + if (surf_linear) { //one 64B PTE request returns 8 PTEs + log2_dpte_req_height_ptes = 0; + log2_dpte_req_width = log2_vmpg_width + 3; + log2_dpte_req_height = 0; + } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size + //one 64B req gives 8x1 PTEs for 4KB tile + log2_dpte_req_height_ptes = 0; + log2_dpte_req_width = log2_blk_width + 3; + log2_dpte_req_height = log2_blk_height + 0; + } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB + //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB + log2_dpte_req_height_ptes = 4; + log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width + log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height + } else { //64KB page size and must 64KB tile block + //one 64B req gives 8x1 PTEs for 64KB tile + log2_dpte_req_height_ptes = 0; + log2_dpte_req_width = log2_blk_width + 3; + log2_dpte_req_height = log2_blk_height + 0; + } + + // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height + // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent + // That depends on the pte shape (i.e. 8x1, 4x2, 2x4) + //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes; + //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes; + dpte_req_height = 1 << log2_dpte_req_height; + dpte_req_width = 1 << log2_dpte_req_width; + + // calculate pitch dpte row buffer can hold + // round the result down to a power of two. + pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs; + if (surf_linear) { + unsigned int dpte_row_height; + + log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries + / bytes_per_element, + dpte_buf_in_pte_reqs + * dpte_req_width) + / data_pitch), + 1); + + ASSERT(log2_dpte_row_height_linear >= 3); + + if (log2_dpte_row_height_linear > 7) + log2_dpte_row_height_linear = 7; + + log2_dpte_row_height = log2_dpte_row_height_linear; + // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary. + // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering. + dpte_row_height = 1 << log2_dpte_row_height; + dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1, + dpte_req_width, + 1) + dpte_req_width; + rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width; + } else { + // the upper bound of the dpte_row_width without dependency on viewport position follows. + // for tiled mode, row height is the same as req height and row store up to vp size upper bound + if (!surf_vert) { + log2_dpte_row_height = log2_dpte_req_height; + dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1) + + dpte_req_width; + rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width; + } else { + log2_dpte_row_height = + (log2_blk_width < log2_dpte_req_width) ? + log2_blk_width : log2_dpte_req_width; + dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1) + + dpte_req_height; + rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height; + } + } + if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB + rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request + else + rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request + + rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height; + + // the dpte_group_bytes is reduced for the specific case of vertical + // access of a tile surface that has dpte request of 8x1 ptes. + if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group + rq_sizing_param->dpte_group_bytes = 512; + else + //full size + rq_sizing_param->dpte_group_bytes = 2048; + + //since pte request size is 64byte, the number of data pte requests per full sized group is as follows. + log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes); + log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests + + // full sized data pte group width in elements + if (!surf_vert) + log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width; + else + log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height; + + //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B + if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB + log2_dpte_group_width = log2_dpte_group_width - 1; + + dpte_group_width = 1 << log2_dpte_group_width; + + // since dpte groups are only aligned to dpte_req_width and not dpte_group_width, + // the upper bound for the dpte groups per row is as follows. + rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width, + 1); +} + +static void get_surf_rq_param(struct display_mode_lib *mode_lib, + display_data_rq_sizing_params_st *rq_sizing_param, + display_data_rq_dlg_params_st *rq_dlg_param, + display_data_rq_misc_params_st *rq_misc_param, + const display_pipe_source_params_st pipe_src_param, + bool is_chroma) +{ + bool mode_422 = 0; + unsigned int vp_width = 0; + unsigned int vp_height = 0; + unsigned int data_pitch = 0; + unsigned int meta_pitch = 0; + unsigned int ppe = mode_422 ? 2 : 1; + + // FIXME check if ppe apply for both luma and chroma in 422 case + if (is_chroma) { + vp_width = pipe_src_param.viewport_width_c / ppe; + vp_height = pipe_src_param.viewport_height_c; + data_pitch = pipe_src_param.data_pitch_c; + meta_pitch = pipe_src_param.meta_pitch_c; + } else { + vp_width = pipe_src_param.viewport_width / ppe; + vp_height = pipe_src_param.viewport_height; + data_pitch = pipe_src_param.data_pitch; + meta_pitch = pipe_src_param.meta_pitch; + } + + rq_sizing_param->chunk_bytes = 8192; + + if (rq_sizing_param->chunk_bytes == 64 * 1024) + rq_sizing_param->min_chunk_bytes = 0; + else + rq_sizing_param->min_chunk_bytes = 1024; + + rq_sizing_param->meta_chunk_bytes = 2048; + rq_sizing_param->min_meta_chunk_bytes = 256; + + rq_sizing_param->mpte_group_bytes = 2048; + + get_meta_and_pte_attr(mode_lib, + rq_dlg_param, + rq_misc_param, + rq_sizing_param, + vp_width, + vp_height, + data_pitch, + meta_pitch, + pipe_src_param.source_format, + pipe_src_param.sw_mode, + pipe_src_param.macro_tile_size, + pipe_src_param.source_scan, + is_chroma); +} + +static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, + display_rq_params_st *rq_param, + const display_pipe_source_params_st pipe_src_param) +{ + // get param for luma surface + rq_param->yuv420 = pipe_src_param.source_format == dm_420_8 + || pipe_src_param.source_format == dm_420_10; + rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10; + + get_surf_rq_param(mode_lib, + &(rq_param->sizing.rq_l), + &(rq_param->dlg.rq_l), + &(rq_param->misc.rq_l), + pipe_src_param, + 0); + + if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) { + // get param for chroma surface + get_surf_rq_param(mode_lib, + &(rq_param->sizing.rq_c), + &(rq_param->dlg.rq_c), + &(rq_param->misc.rq_c), + pipe_src_param, + 1); + } + + // calculate how to split the det buffer space between luma and chroma + handle_det_buf_split(mode_lib, rq_param, pipe_src_param); + print__rq_params_st(mode_lib, *rq_param); +} + +void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, + display_rq_regs_st *rq_regs, + const display_pipe_params_st pipe_param) +{ + display_rq_params_st rq_param = {0}; + + memset(rq_regs, 0, sizeof(*rq_regs)); + dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src); + extract_rq_regs(mode_lib, rq_regs, rq_param); + + print__rq_regs_st(mode_lib, *rq_regs); +} + +// Note: currently taken in as is. +// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma. +static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, + const display_e2e_pipe_params_st *e2e_pipe_param, + const unsigned int num_pipes, + const unsigned int pipe_idx, + display_dlg_regs_st *disp_dlg_regs, + display_ttu_regs_st *disp_ttu_regs, + const display_rq_dlg_params_st rq_dlg_param, + const display_dlg_sys_params_st dlg_sys_param, + const bool cstate_en, + const bool pstate_en) +{ + const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src; + const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest; + const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout; + const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg; + const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth; + const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps; + + // ------------------------- + // Section 1.15.2.1: OTG dependent Params + // ------------------------- + // Timing + unsigned int htotal = dst->htotal; +// unsigned int hblank_start = dst.hblank_start; // TODO: Remove + unsigned int hblank_end = dst->hblank_end; + unsigned int vblank_start = dst->vblank_start; + unsigned int vblank_end = dst->vblank_end; + unsigned int min_vblank = mode_lib->ip.min_vblank_lines; + + double dppclk_freq_in_mhz = clks->dppclk_mhz; + double dispclk_freq_in_mhz = clks->dispclk_mhz; + double refclk_freq_in_mhz = clks->refclk_mhz; + double pclk_freq_in_mhz = dst->pixel_rate_mhz; + bool interlaced = dst->interlaced; + + double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz; + + double min_dcfclk_mhz; + double t_calc_us; + double min_ttu_vblank; + + double min_dst_y_ttu_vblank; + unsigned int dlg_vblank_start; + bool dual_plane; + bool mode_422; + unsigned int access_dir; + unsigned int vp_height_l; + unsigned int vp_width_l; + unsigned int vp_height_c; + unsigned int vp_width_c; + + // Scaling + unsigned int htaps_l; + unsigned int htaps_c; + double hratio_l; + double hratio_c; + double vratio_l; + double vratio_c; + bool scl_enable; + + double line_time_in_us; + // double vinit_l; + // double vinit_c; + // double vinit_bot_l; + // double vinit_bot_c; + + // unsigned int swath_height_l; + unsigned int swath_width_ub_l; + // unsigned int dpte_bytes_per_row_ub_l; + unsigned int dpte_groups_per_row_ub_l; + // unsigned int meta_pte_bytes_per_frame_ub_l; + // unsigned int meta_bytes_per_row_ub_l; + + // unsigned int swath_height_c; + unsigned int swath_width_ub_c; + // unsigned int dpte_bytes_per_row_ub_c; + unsigned int dpte_groups_per_row_ub_c; + + unsigned int meta_chunks_per_row_ub_l; + unsigned int meta_chunks_per_row_ub_c; + unsigned int vupdate_offset; + unsigned int vupdate_width; + unsigned int vready_offset; + + unsigned int dppclk_delay_subtotal; + unsigned int dispclk_delay_subtotal; + unsigned int pixel_rate_delay_subtotal; + + unsigned int vstartup_start; + unsigned int dst_x_after_scaler; + unsigned int dst_y_after_scaler; + double line_wait; + double dst_y_prefetch; + double dst_y_per_vm_vblank; + double dst_y_per_row_vblank; + double dst_y_per_vm_flip; + double dst_y_per_row_flip; + double min_dst_y_per_vm_vblank; + double min_dst_y_per_row_vblank; + double lsw; + double vratio_pre_l; + double vratio_pre_c; + unsigned int req_per_swath_ub_l; + unsigned int req_per_swath_ub_c; + unsigned int meta_row_height_l; + unsigned int meta_row_height_c; + unsigned int swath_width_pixels_ub_l; + unsigned int swath_width_pixels_ub_c; + unsigned int scaler_rec_in_width_l; + unsigned int scaler_rec_in_width_c; + unsigned int dpte_row_height_l; + unsigned int dpte_row_height_c; + double hscale_pixel_rate_l; + double hscale_pixel_rate_c; + double min_hratio_fact_l; + double min_hratio_fact_c; + double refcyc_per_line_delivery_pre_l; + double refcyc_per_line_delivery_pre_c; + double refcyc_per_line_delivery_l; + double refcyc_per_line_delivery_c; + + double refcyc_per_req_delivery_pre_l; + double refcyc_per_req_delivery_pre_c; + double refcyc_per_req_delivery_l; + double refcyc_per_req_delivery_c; + + unsigned int full_recout_width; + double xfc_transfer_delay; + double xfc_precharge_delay; + double xfc_remote_surface_flip_latency; + double xfc_dst_y_delta_drq_limit; + double xfc_prefetch_margin; + double refcyc_per_req_delivery_pre_cur0; + double refcyc_per_req_delivery_cur0; + double refcyc_per_req_delivery_pre_cur1; + double refcyc_per_req_delivery_cur1; + + memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs)); + memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs)); + + dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en); + dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en); + + dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz); + dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz); + dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz); + dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz); + dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced); + ASSERT(ref_freq_to_pix_freq < 4.0); + + disp_dlg_regs->ref_freq_to_pix_freq = + (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19)); + disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal + * dml_pow(2, 8)); + disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits + disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end + * (double) ref_freq_to_pix_freq); + ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); + + min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; + t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes); + min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + + min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal; + dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start; + + disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start + + min_dst_y_ttu_vblank) * dml_pow(2, 2)); + ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18)); + + dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n", + __func__, + min_dcfclk_mhz); + dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n", + __func__, + min_ttu_vblank); + dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n", + __func__, + min_dst_y_ttu_vblank); + dml_print("DML_DLG: %s: t_calc_us = %3.2f\n", + __func__, + t_calc_us); + dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n", + __func__, + disp_dlg_regs->min_dst_y_next_start); + dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n", + __func__, + ref_freq_to_pix_freq); + + // ------------------------- + // Section 1.15.2.2: Prefetch, Active and TTU + // ------------------------- + // Prefetch Calc + // Source +// dcc_en = src.dcc; + dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); + mode_422 = 0; // FIXME + access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed +// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); +// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); + vp_height_l = src->viewport_height; + vp_width_l = src->viewport_width; + vp_height_c = src->viewport_height_c; + vp_width_c = src->viewport_width_c; + + // Scaling + htaps_l = taps->htaps; + htaps_c = taps->htaps_c; + hratio_l = scl->hscl_ratio; + hratio_c = scl->hscl_ratio_c; + vratio_l = scl->vscl_ratio; + vratio_c = scl->vscl_ratio_c; + scl_enable = scl->scl_enable; + + line_time_in_us = (htotal / pclk_freq_in_mhz); +// vinit_l = scl.vinit; +// vinit_c = scl.vinit_c; +// vinit_bot_l = scl.vinit_bot; +// vinit_bot_c = scl.vinit_bot_c; + +// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height; + swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub; +// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub; +// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub; +// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub; + +// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height; + swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub; + // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub; + dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub; + + meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub; + meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub; + vupdate_offset = dst->vupdate_offset; + vupdate_width = dst->vupdate_width; + vready_offset = dst->vready_offset; + + dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal; + dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal; + + if (scl_enable) + dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl; + else + dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only; + + dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter + + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor; + + if (dout->dsc_enable) { + double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + + dispclk_delay_subtotal += dsc_delay; + } + + pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz + + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz; + + vstartup_start = dst->vstartup_start; + if (interlaced) { + if (vstartup_start / 2.0 + - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal + <= vblank_end / 2.0) + disp_dlg_regs->vready_after_vcount0 = 1; + else + disp_dlg_regs->vready_after_vcount0 = 0; + } else { + if (vstartup_start + - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal + <= vblank_end) + disp_dlg_regs->vready_after_vcount0 = 1; + else + disp_dlg_regs->vready_after_vcount0 = 0; + } + + // TODO: Where is this coming from? + if (interlaced) + vstartup_start = vstartup_start / 2; + + // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp? + if (vstartup_start >= min_vblank) { + dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n", + __func__, + vblank_start, + vblank_end); + dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n", + __func__, + vstartup_start, + min_vblank); + min_vblank = vstartup_start + 1; + dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n", + __func__, + vstartup_start, + min_vblank); + } + + dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + + dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal); + dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n", + __func__, + pixel_rate_delay_subtotal); + dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n", + __func__, + dst_x_after_scaler); + dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n", + __func__, + dst_y_after_scaler); + + // Lwait + line_wait = mode_lib->soc.urgent_latency_us; + if (cstate_en) + line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait); + if (pstate_en) + line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us + + mode_lib->soc.urgent_latency_us, + line_wait); + line_wait = line_wait / line_time_in_us; + + dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch); + + dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib, + e2e_pipe_param, + num_pipes, + pipe_idx); + dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib, + e2e_pipe_param, + num_pipes, + pipe_idx); + dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + + min_dst_y_per_vm_vblank = 8.0; + min_dst_y_per_row_vblank = 16.0; + + // magic! + if (htotal <= 75) { + min_vblank = 300; + min_dst_y_per_vm_vblank = 100.0; + min_dst_y_per_row_vblank = 100.0; + } + + dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank); + dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank); + + ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank); + ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank); + + ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank)); + lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank); + + dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw); + + vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + + dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l); + dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c); + + // Active + req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub; + req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub; + meta_row_height_l = rq_dlg_param.rq_l.meta_row_height; + meta_row_height_c = rq_dlg_param.rq_c.meta_row_height; + swath_width_pixels_ub_l = 0; + swath_width_pixels_ub_c = 0; + scaler_rec_in_width_l = 0; + scaler_rec_in_width_c = 0; + dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height; + dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height; + + if (mode_422) { + swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element + swath_width_pixels_ub_c = swath_width_ub_c * 2; + } else { + swath_width_pixels_ub_l = swath_width_ub_l * 1; + swath_width_pixels_ub_c = swath_width_ub_c * 1; + } + + hscale_pixel_rate_l = 0.; + hscale_pixel_rate_c = 0.; + min_hratio_fact_l = 1.0; + min_hratio_fact_c = 1.0; + + if (htaps_l <= 1) + min_hratio_fact_l = 2.0; + else if (htaps_l <= 6) { + if ((hratio_l * 2.0) > 4.0) + min_hratio_fact_l = 4.0; + else + min_hratio_fact_l = hratio_l * 2.0; + } else { + if (hratio_l > 4.0) + min_hratio_fact_l = 4.0; + else + min_hratio_fact_l = hratio_l; + } + + hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz; + + if (htaps_c <= 1) + min_hratio_fact_c = 2.0; + else if (htaps_c <= 6) { + if ((hratio_c * 2.0) > 4.0) + min_hratio_fact_c = 4.0; + else + min_hratio_fact_c = hratio_c * 2.0; + } else { + if (hratio_c > 4.0) + min_hratio_fact_c = 4.0; + else + min_hratio_fact_c = hratio_c; + } + + hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz; + + refcyc_per_line_delivery_pre_l = 0.; + refcyc_per_line_delivery_pre_c = 0.; + refcyc_per_line_delivery_l = 0.; + refcyc_per_line_delivery_c = 0.; + + refcyc_per_req_delivery_pre_l = 0.; + refcyc_per_req_delivery_pre_c = 0.; + refcyc_per_req_delivery_l = 0.; + refcyc_per_req_delivery_c = 0.; + + full_recout_width = 0; + // In ODM + if (src->is_hsplit) { + // This "hack" is only allowed (and valid) for MPC combine. In ODM + // combine, you MUST specify the full_recout_width...according to Oswin + if (dst->full_recout_width == 0 && !dst->odm_combine) { + dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n", + __func__); + full_recout_width = dst->recout_width * 2; // assume half split for dcn1 + } else + full_recout_width = dst->full_recout_width; + } else + full_recout_width = dst->recout_width; + + // As of DCN2, mpc_combine and odm_combine are mutually exclusive + refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_pre_l, + hscale_pixel_rate_l, + swath_width_pixels_ub_l, + 1); // per line + + refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_l, + hscale_pixel_rate_l, + swath_width_pixels_ub_l, + 1); // per line + + dml_print("DML_DLG: %s: full_recout_width = %d\n", + __func__, + full_recout_width); + dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n", + __func__, + hscale_pixel_rate_l); + dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n", + __func__, + refcyc_per_line_delivery_pre_l); + dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n", + __func__, + refcyc_per_line_delivery_l); + + if (dual_plane) { + refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_pre_c, + hscale_pixel_rate_c, + swath_width_pixels_ub_c, + 1); // per line + + refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_c, + hscale_pixel_rate_c, + swath_width_pixels_ub_c, + 1); // per line + + dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n", + __func__, + refcyc_per_line_delivery_pre_c); + dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n", + __func__, + refcyc_per_line_delivery_c); + } + + // TTU - Luma / Chroma + if (access_dir) { // vertical access + scaler_rec_in_width_l = vp_height_l; + scaler_rec_in_width_c = vp_height_c; + } else { + scaler_rec_in_width_l = vp_width_l; + scaler_rec_in_width_c = vp_width_c; + } + + refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_pre_l, + hscale_pixel_rate_l, + scaler_rec_in_width_l, + req_per_swath_ub_l); // per req + refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_l, + hscale_pixel_rate_l, + scaler_rec_in_width_l, + req_per_swath_ub_l); // per req + + dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n", + __func__, + refcyc_per_req_delivery_pre_l); + dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n", + __func__, + refcyc_per_req_delivery_l); + + ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13)); + ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13)); + + if (dual_plane) { + refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_pre_c, + hscale_pixel_rate_c, + scaler_rec_in_width_c, + req_per_swath_ub_c); // per req + refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib, + refclk_freq_in_mhz, + pclk_freq_in_mhz, + dst->odm_combine, + full_recout_width, + dst->hactive, + vratio_c, + hscale_pixel_rate_c, + scaler_rec_in_width_c, + req_per_swath_ub_c); // per req + + dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n", + __func__, + refcyc_per_req_delivery_pre_c); + dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n", + __func__, + refcyc_per_req_delivery_c); + + ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13)); + ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13)); + } + + // XFC + xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + xfc_precharge_delay = get_xfc_precharge_delay(mode_lib, + e2e_pipe_param, + num_pipes, + pipe_idx); + xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib, + e2e_pipe_param, + num_pipes, + pipe_idx); + xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency; + xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib, + e2e_pipe_param, + num_pipes, + pipe_idx); + + // TTU - Cursor + refcyc_per_req_delivery_pre_cur0 = 0.0; + refcyc_per_req_delivery_cur0 = 0.0; + if (src->num_cursors > 0) { + calculate_ttu_cursor(mode_lib, + &refcyc_per_req_delivery_pre_cur0, + &refcyc_per_req_delivery_cur0, + refclk_freq_in_mhz, + ref_freq_to_pix_freq, + hscale_pixel_rate_l, + scl->hscl_ratio, + vratio_pre_l, + vratio_l, + src->cur0_src_width, + (enum cursor_bpp)(src->cur0_bpp)); + } + + refcyc_per_req_delivery_pre_cur1 = 0.0; + refcyc_per_req_delivery_cur1 = 0.0; + if (src->num_cursors > 1) { + calculate_ttu_cursor(mode_lib, + &refcyc_per_req_delivery_pre_cur1, + &refcyc_per_req_delivery_cur1, + refclk_freq_in_mhz, + ref_freq_to_pix_freq, + hscale_pixel_rate_l, + scl->hscl_ratio, + vratio_pre_l, + vratio_l, + src->cur1_src_width, + (enum cursor_bpp)(src->cur1_bpp)); + } + + // TTU - Misc + // all hard-coded + + // Assignment to register structures + disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line + disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk + ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13)); + disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2)); + disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2)); + disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2)); + disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2)); + disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2)); + + disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19)); + disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19)); + + disp_dlg_regs->refcyc_per_pte_group_vblank_l = + (unsigned int) (dst_y_per_row_vblank * (double) htotal + * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l); + ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13)); + + if (dual_plane) { + disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank + * (double) htotal * ref_freq_to_pix_freq + / (double) dpte_groups_per_row_ub_c); + ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c + < (unsigned int) dml_pow(2, 13)); + } + + disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = + (unsigned int) (dst_y_per_row_vblank * (double) htotal + * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l); + ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13)); + + disp_dlg_regs->refcyc_per_meta_chunk_vblank_c = + disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now + + disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal + * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l; + disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal + * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l; + + if (dual_plane) { + disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip + * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c; + disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip + * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c; + } + + disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l + / (double) vratio_l * dml_pow(2, 2)); + ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17)); + + if (dual_plane) { + disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c + / (double) vratio_c * dml_pow(2, 2)); + if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) { + dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n", + __func__, + disp_dlg_regs->dst_y_per_pte_row_nom_c, + (unsigned int) dml_pow(2, 17) - 1); + } + } + + disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l + / (double) vratio_l * dml_pow(2, 2)); + ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17)); + + disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now + + disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l + / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq + / (double) dpte_groups_per_row_ub_l); + if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23)) + disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1; + disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l + / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq + / (double) meta_chunks_per_row_ub_l); + if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23)) + disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1; + + if (dual_plane) { + disp_dlg_regs->refcyc_per_pte_group_nom_c = + (unsigned int) ((double) dpte_row_height_c / (double) vratio_c + * (double) htotal * ref_freq_to_pix_freq + / (double) dpte_groups_per_row_ub_c); + if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23)) + disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1; + + // TODO: Is this the right calculation? Does htotal need to be halved? + disp_dlg_regs->refcyc_per_meta_chunk_nom_c = + (unsigned int) ((double) meta_row_height_c / (double) vratio_c + * (double) htotal * ref_freq_to_pix_freq + / (double) meta_chunks_per_row_ub_c); + if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23)) + disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1; + } + + disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l, + 1); + disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l, + 1); + ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13)); + ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13)); + + disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c, + 1); + disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c, + 1); + ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13)); + ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13)); + + disp_dlg_regs->chunk_hdl_adjust_cur0 = 3; + disp_dlg_regs->dst_y_offset_cur0 = 0; + disp_dlg_regs->chunk_hdl_adjust_cur1 = 3; + disp_dlg_regs->dst_y_offset_cur1 = 0; + + disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay; + disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay; + disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency; + disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz, + 1); + + // slave has to have this value also set to off + if (src->xfc_enable && !src->xfc_slave) + disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1); + else + disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off + + disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l + * dml_pow(2, 10)); + disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l + * dml_pow(2, 10)); + disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c + * dml_pow(2, 10)); + disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c + * dml_pow(2, 10)); + disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = + (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10)); + disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0 + * dml_pow(2, 10)); + disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 = + (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10)); + disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1 + * dml_pow(2, 10)); + disp_ttu_regs->qos_level_low_wm = 0; + ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14)); + disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal + * ref_freq_to_pix_freq); + /*ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));*/ + + disp_ttu_regs->qos_level_flip = 14; + disp_ttu_regs->qos_level_fixed_l = 8; + disp_ttu_regs->qos_level_fixed_c = 8; + disp_ttu_regs->qos_level_fixed_cur0 = 8; + disp_ttu_regs->qos_ramp_disable_l = 0; + disp_ttu_regs->qos_ramp_disable_c = 0; + disp_ttu_regs->qos_ramp_disable_cur0 = 0; + + disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz; + ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24)); + + print__ttu_regs_st(mode_lib, *disp_ttu_regs); + print__dlg_regs_st(mode_lib, *disp_dlg_regs); +} + +void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, + display_dlg_regs_st *dlg_regs, + display_ttu_regs_st *ttu_regs, + display_e2e_pipe_params_st *e2e_pipe_param, + const unsigned int num_pipes, + const unsigned int pipe_idx, + const bool cstate_en, + const bool pstate_en, + const bool vm_en, + const bool ignore_viewport_pos, + const bool immediate_flip_support) +{ + display_rq_params_st rq_param = {0}; + display_dlg_sys_params_st dlg_sys_param = {0}; + + // Get watermark and Tex. + dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes); + dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib, + e2e_pipe_param, + num_pipes); + dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes); + dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes); + dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes); + dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes); + dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib, + e2e_pipe_param, + num_pipes); + dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, + e2e_pipe_param, + num_pipes); + dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency + / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated + + print__dlg_sys_params_st(mode_lib, dlg_sys_param); + + // system parameter calculation done + + dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx); + dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src); + dml20v2_rq_dlg_get_dlg_params(mode_lib, + e2e_pipe_param, + num_pipes, + pipe_idx, + dlg_regs, + ttu_regs, + rq_param.dlg, + dlg_sys_param, + cstate_en, + pstate_en); + dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx); +} + +static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, + double *refcyc_per_req_delivery_pre_cur, + double *refcyc_per_req_delivery_cur, + double refclk_freq_in_mhz, + double ref_freq_to_pix_freq, + double hscale_pixel_rate_l, + double hscl_ratio, + double vratio_pre_l, + double vratio_l, + unsigned int cur_width, + enum cursor_bpp cur_bpp) +{ + unsigned int cur_src_width = cur_width; + unsigned int cur_req_size = 0; + unsigned int cur_req_width = 0; + double cur_width_ub = 0.0; + double cur_req_per_width = 0.0; + double hactive_cur = 0.0; + + ASSERT(cur_src_width <= 256); + + *refcyc_per_req_delivery_pre_cur = 0.0; + *refcyc_per_req_delivery_cur = 0.0; + if (cur_src_width > 0) { + unsigned int cur_bit_per_pixel = 0; + + if (cur_bpp == dm_cur_2bit) { + cur_req_size = 64; // byte + cur_bit_per_pixel = 2; + } else { // 32bit + cur_bit_per_pixel = 32; + if (cur_src_width >= 1 && cur_src_width <= 16) + cur_req_size = 64; + else if (cur_src_width >= 17 && cur_src_width <= 31) + cur_req_size = 128; + else + cur_req_size = 256; + } + + cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0); + cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) + * (double) cur_req_width; + cur_req_per_width = cur_width_ub / (double) cur_req_width; + hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + + if (vratio_pre_l <= 1.0) { + *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq + / (double) cur_req_per_width; + } else { + *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz + * (double) cur_src_width / hscale_pixel_rate_l + / (double) cur_req_per_width; + } + + ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13)); + + if (vratio_l <= 1.0) { + *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq + / (double) cur_req_per_width; + } else { + *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz + * (double) cur_src_width / hscale_pixel_rate_l + / (double) cur_req_per_width; + } + + dml_print("DML_DLG: %s: cur_req_width = %d\n", + __func__, + cur_req_width); + dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n", + __func__, + cur_width_ub); + dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n", + __func__, + cur_req_per_width); + dml_print("DML_DLG: %s: hactive_cur = %3.2f\n", + __func__, + hactive_cur); + dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n", + __func__, + *refcyc_per_req_delivery_pre_cur); + dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n", + __func__, + *refcyc_per_req_delivery_cur); + + ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13)); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h new file mode 100644 index 000000000000..0378406bf7e7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h @@ -0,0 +1,74 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__ +#define __DML20V2_DISPLAY_RQ_DLG_CALC_H__ + +#include "../dml_common_defs.h" +#include "../display_rq_dlg_helpers.h" + +struct display_mode_lib; + + +// Function: dml_rq_dlg_get_rq_reg +// Main entry point for test to get the register values out of this DML class. +// This function calls and fucntions to calculate +// and then populate the rq_regs struct +// Input: +// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.) +// Output: +// rq_regs - struct that holds all the RQ registers field value. +// See also: +void dml20v2_rq_dlg_get_rq_reg( + struct display_mode_lib *mode_lib, + display_rq_regs_st *rq_regs, + const display_pipe_params_st pipe_param); + + +// Function: dml_rq_dlg_get_dlg_reg +// Calculate and return DLG and TTU register struct given the system setting +// Output: +// dlg_regs - output DLG register struct +// ttu_regs - output DLG TTU register struct +// Input: +// e2e_pipe_param - "compacted" array of e2e pipe param struct +// num_pipes - num of active "pipe" or "route" +// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg +// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered. +// Added for legacy or unrealistic timing tests. +void dml20v2_rq_dlg_get_dlg_reg( + struct display_mode_lib *mode_lib, + display_dlg_regs_st *dlg_regs, + display_ttu_regs_st *ttu_regs, + display_e2e_pipe_params_st *e2e_pipe_param, + const unsigned int num_pipes, + const unsigned int pipe_idx, + const bool cstate_en, + const bool pstate_en, + const bool vm_en, + const bool ignore_viewport_pos, + const bool immediate_flip_support); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index 91810c7d5cf5..96dfcd8c36bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -28,6 +28,8 @@ #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #include "dcn20/display_mode_vba_20.h" #include "dcn20/display_rq_dlg_calc_20.h" +#include "dcn20/display_mode_vba_20v2.h" +#include "dcn20/display_rq_dlg_calc_20v2.h" #endif #if defined(CONFIG_DRM_AMD_DC_DCN2_0) @@ -37,6 +39,13 @@ const struct dml_funcs dml20_funcs = { .rq_dlg_get_dlg_reg = dml20_rq_dlg_get_dlg_reg, .rq_dlg_get_rq_reg = dml20_rq_dlg_get_rq_reg }; + +const struct dml_funcs dml20v2_funcs = { + .validate = dml20v2_ModeSupportAndSystemConfigurationFull, + .recalculate = dml20v2_recalculate, + .rq_dlg_get_dlg_reg = dml20v2_rq_dlg_get_dlg_reg, + .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg +}; #endif void dml_init_instance(struct display_mode_lib *lib, @@ -52,6 +61,9 @@ void dml_init_instance(struct display_mode_lib *lib, case DML_PROJECT_NAVI10: lib->funcs = dml20_funcs; break; + case DML_PROJECT_NAVI10v2: + lib->funcs = dml20v2_funcs; + break; #endif default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 5bf13d67f289..870716e3c132 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -36,6 +36,7 @@ enum dml_project { DML_PROJECT_RAVEN1, #ifdef CONFIG_DRM_AMD_DC_DCN2_0 DML_PROJECT_NAVI10, + DML_PROJECT_NAVI10v2, #endif }; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 5678472546ab..ab34fd26702f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -100,6 +100,7 @@ struct _vcs_dpi_soc_bounding_box_st { unsigned int vmm_page_size_bytes; unsigned int hostvm_min_page_size_bytes; double dram_clock_change_latency_us; + double dummy_pstate_latency_us; double writeback_dram_clock_change_latency_us; unsigned int return_bus_width_bytes; unsigned int voltage_override; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 4d2a1262d9db..88e63f16f7fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -568,6 +568,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) if (src->is_hsplit) { for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) { display_pipe_source_params_st *src_k = &pipes[k].pipe.src; + display_pipe_dest_params_st *dst_k = &pipes[k].pipe.dest; if (src_k->is_hsplit && !visited[k] && src->hsplit_grp == src_k->hsplit_grp) { @@ -575,12 +576,15 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.NumberOfActivePlanes; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++; if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] - == dm_horz) + == dm_horz) { mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] += src_k->viewport_width; - else + mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] += + dst_k->recout_width; + } else { mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] += src_k->viewport_height; + } visited[k] = true; } -- cgit v1.2.3 From 9295f1113c74baab85a18628c8b2ef9db04dac14 Mon Sep 17 00:00:00 2001 From: Fatemeh Darbehani Date: Wed, 10 Jul 2019 10:34:31 -0400 Subject: drm/amd/display: Add SMU version field to clk_mgr_internal For some platforms, we need to know SMU version for driver/SMU compatibility. This change adds that field. Signed-off-by: Fatemeh Darbehani Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 3c105124dcdd..4b5505fa980c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -191,6 +191,7 @@ struct state_dependent_clocks { struct clk_mgr_internal { struct clk_mgr base; + int smu_ver; struct pp_smu_funcs *pp_smu; struct clk_mgr_internal_funcs *funcs; -- cgit v1.2.3 From 46825fcfbe162609393976e18f106dafddca6f67 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Sun, 23 Jun 2019 12:07:02 -0500 Subject: drm/amd/display: avoid power gate domains that doesn't exist Signed-off-by: Tony Cheng Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | 4 ++-- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 24 ++++++++++++++-------- 2 files changed, 17 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 3a49f1ffb5dd..245b80b92681 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -227,8 +227,8 @@ SR(DOMAIN7_PG_CONFIG), \ SR(DOMAIN8_PG_CONFIG), \ SR(DOMAIN9_PG_CONFIG), \ - SR(DOMAIN10_PG_CONFIG), \ - SR(DOMAIN11_PG_CONFIG), \ +/* SR(DOMAIN10_PG_CONFIG), Navi1x HUBP5 not powergate-able*/\ +/* SR(DOMAIN11_PG_CONFIG), Navi1x DPP5 is not powergate-able */\ SR(DOMAIN16_PG_CONFIG), \ SR(DOMAIN17_PG_CONFIG), \ SR(DOMAIN18_PG_CONFIG), \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index b61774d2e8b2..6764ad845cd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -94,25 +94,31 @@ static void enable_power_gating_plane( REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); - /*Do not power gate DCHUB5, should be left at HW default, power on permanently*/ - /*REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN10_POWER_FORCEON, force_on);*/ + if (REG(DOMAIN8_PG_CONFIG)) + REG_UPDATE(DOMAIN8_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); + if (REG(DOMAIN10_PG_CONFIG)) + REG_UPDATE(DOMAIN10_PG_CONFIG, DOMAIN8_POWER_FORCEON, force_on); /* DPP0/1/2/3/4/5 */ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); - /*Do not power gate DPP5, should be left at HW default, power on permanently*/ - /*REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN11_POWER_FORCEON, force_on);*/ + if (REG(DOMAIN9_PG_CONFIG)) + REG_UPDATE(DOMAIN9_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); + if (REG(DOMAIN11_PG_CONFIG)) + REG_UPDATE(DOMAIN11_PG_CONFIG, DOMAIN9_POWER_FORCEON, force_on); + /* DCS0/1/2/3/4/5 */ REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN16_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN17_POWER_FORCEON, force_on); REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN18_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on); - REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on); + if (REG(DOMAIN19_PG_CONFIG)) + REG_UPDATE(DOMAIN19_PG_CONFIG, DOMAIN19_POWER_FORCEON, force_on); + if (REG(DOMAIN20_PG_CONFIG)) + REG_UPDATE(DOMAIN20_PG_CONFIG, DOMAIN20_POWER_FORCEON, force_on); + if (REG(DOMAIN21_PG_CONFIG)) + REG_UPDATE(DOMAIN21_PG_CONFIG, DOMAIN21_POWER_FORCEON, force_on); } void dcn20_dccg_init(struct dce_hwseq *hws) -- cgit v1.2.3 From 291ac8fb0cd934104b1d154220695ec049592a25 Mon Sep 17 00:00:00 2001 From: Lewis Huang Date: Tue, 2 Jul 2019 22:33:34 -0500 Subject: drm/amd/display: Add debug entry to destroy disconnected edp link Add a flag to dc_debug_options to determine if a disconnected edp link should be destroyed. Signed-off-by: Lewis Huang Reviewed-by: Yongqiang Sun Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 20 ++++++++++++++++---- drivers/gpu/drm/amd/display/dc/dc.h | 1 + 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index d6a0a08f5cda..94f126d2331c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -180,13 +180,25 @@ static bool create_links( link = link_create(&link_init_params); if (link) { - if (dc->config.edp_not_connected && - link->connector_signal == SIGNAL_TYPE_EDP) { - link_destroy(&link); - } else { + bool should_destory_link = false; + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + if (dc->config.edp_not_connected) + should_destory_link = true; + else if (dc->debug.remove_disconnect_edp) { + enum dc_connection_type type; + dc_link_detect_sink(link, &type); + if (type == dc_connection_none) + should_destory_link = true; + } + } + + if (!should_destory_link) { dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; + } else { + link_destroy(&link); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 6da0a6fe2973..c585e16bc9f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -378,6 +378,7 @@ struct dc_debug_options { bool scl_reset_length10; bool hdmi20_disable; bool skip_detection_link_training; + bool remove_disconnect_edp; unsigned int force_odm_combine; //bit vector based on otg inst unsigned int force_fclk_khz; bool disable_tri_buf; -- cgit v1.2.3 From 21ffcc94d5b3dc024fedac700f1e7f9dacf4ab4f Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 11 Jul 2019 12:32:43 -0400 Subject: drm/amd/display: Copy GSL groups when committing a new context [Why] DC configures the GSL group for the pipe when pipe_split is enabled and we're switching flip types (buffered <-> immediate flip) on DCN2. In order to record what GSL group the pipe is using DC stores it in the pipe's stream_res. DM is not aware of this internal grouping, nor is DC resource. So when DM creates a dc_state context and passes it to DC the current GSL group is lost - DM never knew about it in the first place. After 3 immediate flips we run out of GSL groups and we're no longer able to correctly perform *any* flip for multi-pipe scenarios. [How] The gsl_group needs to be copied to the new context. DM has no insight into GSL grouping and could even potentially create a brand new context without referencing current hardware state. So this makes the most sense to have happen in DC. There are two places where DC can apply a new context: - dc_commit_state - dc_commit_updates_for_stream But what's shared between both of these is apply_ctx_for_surface. This logic only matters for DCN2, so it can be placed in dcn20_apply_ctx_for_surface. Before doing any locking (where the GSL group is setup) we can copy over the GSL groups before committing the new context. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Hersen Wu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 6764ad845cd4..69e4d0d96c7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1364,6 +1364,18 @@ static void dcn20_apply_ctx_for_surface( if (!top_pipe_to_program) return; + /* Carry over GSL groups in case the context is changing. */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->stream == stream && + pipe_ctx->stream == old_pipe_ctx->stream) + pipe_ctx->stream_res.gsl_group = + old_pipe_ctx->stream_res.gsl_group; + } + tg = top_pipe_to_program->stream_res.tg; interdependent_update = top_pipe_to_program->plane_state && -- cgit v1.2.3 From 7a83645ac0cc2258de101a6a491d31499eb27cc5 Mon Sep 17 00:00:00 2001 From: Dale Zhao Date: Wed, 10 Jul 2019 17:36:53 +0800 Subject: drm/amd/display: handle active dongle port type is DP++ or DP case [Why]: Some active dongles have DP++ port and DP port at the same time. Current code doesn't cover DP++ case and processes as default DVI case, in which audio is disabled. Because of dual mode, DP case is also treat as DVI case for the other port. [How]: According DP 1.4 spec, add DP++ procedure similar with HDMI case. Also add None dongle type for DP case. Signed-off-by: Dale Zhao Reviewed-by: Wenjing Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 30 ++++++++++++++++-------- drivers/gpu/drm/amd/display/include/dpcd_defs.h | 2 +- 2 files changed, 21 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b512fecae061..08bd9c96b9b0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2398,8 +2398,8 @@ static void get_active_converter_info( case DOWNSTREAM_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; break; - case DOWNSTREAM_DVI_HDMI: - /* At this point we don't know is it DVI or HDMI, + case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: + /* At this point we don't know is it DVI or HDMI or DP++, * assume DVI.*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; break; @@ -2416,6 +2416,10 @@ static void get_active_converter_info( det_caps, sizeof(det_caps)); switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { + /*Handle DP case as DONGLE_NONE*/ + case DOWN_STREAM_DETAILED_DP: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; case DOWN_STREAM_DETAILED_VGA: link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; @@ -2425,6 +2429,8 @@ static void get_active_converter_info( DISPLAY_DONGLE_DP_DVI_CONVERTER; break; case DOWN_STREAM_DETAILED_HDMI: + case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: + /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_HDMI_CONVERTER; @@ -2440,14 +2446,18 @@ static void get_active_converter_info( link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = - hdmi_caps.bits.YCrCr422_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = - hdmi_caps.bits.YCrCr420_PASS_THROUGH; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = - hdmi_caps.bits.YCrCr422_CONVERSION; - link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = - hdmi_caps.bits.YCrCr420_CONVERSION; + /*YCBCR capability only for HDMI case*/ + if (port_caps->bits.DWN_STRM_PORTX_TYPE + == DOWN_STREAM_DETAILED_HDMI) { + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = + hdmi_caps.bits.YCrCr422_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = + hdmi_caps.bits.YCrCr420_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = + hdmi_caps.bits.YCrCr422_CONVERSION; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = + hdmi_caps.bits.YCrCr420_CONVERSION; + } link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = translate_dpcd_max_bpc( diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index 1c66166d0a94..2c90d1b46c8b 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -43,7 +43,7 @@ enum dpcd_revision { enum dpcd_downstream_port_type { DOWNSTREAM_DP = 0, DOWNSTREAM_VGA, - DOWNSTREAM_DVI_HDMI, + DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */ DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */ }; -- cgit v1.2.3 From 7d6b60dbc6a015dbdc444e4d39549600f7156690 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 Jul 2019 13:49:30 +0100 Subject: drm/i915/execlists: Cancel breadcrumb on preempting the virtual engine As we unwind the requests for a preemption event, we return a virtual request back to its original virtual engine (so that it is available for execution on any of its siblings). In the process, this means that its breadcrumb should no longer be associated with the original physical engine, and so we are forced to decouple it. Previously, as the request could not complete without our awareness, we would move it to the next real engine without any danger. However, preempt-to-busy allowed for requests to continue on the HW and complete in the background as we unwound, which meant that we could end up retiring the request before fixing up the breadcrumb link. [51679.517943] INFO: trying to register non-static key. [51679.517956] the code is fine but needs lockdep annotation. [51679.517960] turning off the locking correctness validator. [51679.517966] CPU: 0 PID: 3270 Comm: kworker/u8:0 Tainted: G U 5.2.0+ #717 [51679.517971] Hardware name: Intel Corporation NUC7i5BNK/NUC7i5BNB, BIOS BNKBL357.86A.0052.2017.0918.1346 09/18/2017 [51679.518012] Workqueue: i915 retire_work_handler [i915] [51679.518017] Call Trace: [51679.518026] dump_stack+0x67/0x90 [51679.518031] register_lock_class+0x52c/0x540 [51679.518038] ? find_held_lock+0x2d/0x90 [51679.518042] __lock_acquire+0x68/0x1800 [51679.518047] ? find_held_lock+0x2d/0x90 [51679.518073] ? __i915_sw_fence_complete+0xff/0x1c0 [i915] [51679.518079] lock_acquire+0x90/0x170 [51679.518105] ? i915_request_cancel_breadcrumb+0x29/0x160 [i915] [51679.518112] _raw_spin_lock+0x27/0x40 [51679.518138] ? i915_request_cancel_breadcrumb+0x29/0x160 [i915] [51679.518165] i915_request_cancel_breadcrumb+0x29/0x160 [i915] [51679.518199] i915_request_retire+0x43f/0x530 [i915] [51679.518232] retire_requests+0x4d/0x60 [i915] [51679.518263] i915_retire_requests+0xdf/0x1f0 [i915] [51679.518294] retire_work_handler+0x4c/0x60 [i915] [51679.518301] process_one_work+0x22c/0x5c0 [51679.518307] worker_thread+0x37/0x390 [51679.518311] ? process_one_work+0x5c0/0x5c0 [51679.518316] kthread+0x116/0x130 [51679.518320] ? kthread_create_on_node+0x40/0x40 [51679.518325] ret_from_fork+0x24/0x30 [51679.520177] ------------[ cut here ]------------ [51679.520189] list_del corruption, ffff88883675e2f0->next is LIST_POISON1 (dead000000000100) Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190716124931.5870-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index d076d9148b6d..dce36482a252 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -495,6 +495,19 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_move(&rq->sched.link, pl); active = rq; } else { + /* + * Decouple the virtual breadcrumb before moving it + * back to the virtual engine -- we don't want the + * request to complete in the background and try + * and cancel the breadcrumb on the virtual engine + * (instead of the old engine where it is linked)! + */ + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &rq->fence.flags)) { + spin_lock(&rq->lock); + i915_request_cancel_breadcrumb(rq); + spin_unlock(&rq->lock); + } rq->engine = owner; owner->submit_request(rq); active = NULL; -- cgit v1.2.3 From 0d392cb9eb58099496a4d2981047df7ff7d22949 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 19 Jul 2019 02:41:00 +0000 Subject: drm/i915: Remove set but not used variable 'src_y' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/i915/display/intel_sprite.c: In function 'g4x_sprite_check_scaling': drivers/gpu/drm/i915/display/intel_sprite.c:1494:13: warning: variable 'src_y' set but not used [-Wunused-but-set-variable] Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190719024100.64738-1-yuehaibing@huawei.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 5e5ea867aae9..53c6594c4588 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -1518,7 +1518,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, const struct drm_framebuffer *fb = plane_state->base.fb; const struct drm_rect *src = &plane_state->base.src; const struct drm_rect *dst = &plane_state->base.dst; - int src_x, src_y, src_w, src_h, crtc_w, crtc_h; + int src_x, src_w, src_h, crtc_w, crtc_h; const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; unsigned int cpp = fb->format->cpp[0]; @@ -1529,7 +1529,6 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, crtc_h = drm_rect_height(dst); src_x = src->x1 >> 16; - src_y = src->y1 >> 16; src_w = drm_rect_width(src) >> 16; src_h = drm_rect_height(src) >> 16; -- cgit v1.2.3 From 15160879d47213c32f357bc67b6014d9aaf14ed7 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 19:06:19 +0100 Subject: drm/i915: Fix GEN8_MCR_SELECTOR programming fls returns bit positions starting from one for the lsb and the MCR register expects zero based (sub)slice addressing. Incorrent MCR programming can have the effect of directing MMIO reads of registers in the 0xb100-0xb3ff range to invalid subslice returning zeroes instead of actual content. Signed-off-by: Tvrtko Ursulin Fixes: 1e40d4aea57b ("drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads") Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717180624.20354-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index c0bc9cb7f228..6f93caf7a5a1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -962,9 +962,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + unsigned int slice = fls(sseu->slice_mask) - 1; + unsigned int subslice; u32 mcr_s_ss_select; - u32 slice = fls(sseu->slice_mask); - u32 subslice = fls(sseu->subslice_mask[slice]); + + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = fls(sseu->subslice_mask[slice]); + GEM_BUG_ON(!subslice); + subslice--; if (IS_GEN(dev_priv, 10)) mcr_s_ss_select = GEN8_MCR_SLICE(slice) | -- cgit v1.2.3 From 7405cb77e008aa83908c9824ff7897c403929d57 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 19:06:20 +0100 Subject: drm/i915: Trust programmed MCR in read_subslice_reg Instead of re-calculating the MCR selector in read_subslice_reg do the rwm on its existing value and restore it when done. This consolidates MCR programming to one place for cnl+, and avoids re-calculating its default value on older platforms during hangcheck. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717180624.20354-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 37 ++++++++++--------------------- 1 file changed, 12 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 6f93caf7a5a1..cc4d1826173d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -989,27 +989,17 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, { struct drm_i915_private *i915 = engine->i915; struct intel_uncore *uncore = engine->uncore; - u32 mcr_slice_subslice_mask; - u32 mcr_slice_subslice_select; - u32 default_mcr_s_ss_select; - u32 mcr; - u32 ret; + u32 mcr_mask, mcr_ss, mcr, old_mcr, val; enum forcewake_domains fw_domains; if (INTEL_GEN(i915) >= 11) { - mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | - GEN11_MCR_SUBSLICE_MASK; - mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) | - GEN11_MCR_SUBSLICE(subslice); + mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; + mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); } else { - mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | - GEN8_MCR_SUBSLICE_MASK; - mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) | - GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); } - default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915); - fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); fw_domains |= intel_uncore_forcewake_for_reg(uncore, @@ -1019,26 +1009,23 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, spin_lock_irq(&uncore->lock); intel_uncore_forcewake_get__locked(uncore, fw_domains); - mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR); - - WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) != - default_mcr_s_ss_select); + old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR); - mcr &= ~mcr_slice_subslice_mask; - mcr |= mcr_slice_subslice_select; + mcr &= ~mcr_mask; + mcr |= mcr_ss; intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); - ret = intel_uncore_read_fw(uncore, reg); + val = intel_uncore_read_fw(uncore, reg); - mcr &= ~mcr_slice_subslice_mask; - mcr |= default_mcr_s_ss_select; + mcr &= ~mcr_mask; + mcr |= old_mcr & mcr_mask; intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); intel_uncore_forcewake_put__locked(uncore, fw_domains); spin_unlock_irq(&uncore->lock); - return ret; + return val; } /* NB: please notice the memset */ -- cgit v1.2.3 From 6c2b0103ad92b4238c26b0b8f224f4581d98b2fe Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 19:06:21 +0100 Subject: drm/i915: Fix and improve MCR selection logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A couple issues were present in this code: 1. fls() usage was incorrect causing off by one in subslice mask lookup, which in other words means subslice mask of all zeroes is always used (subslice mask of a slice which is not present, or even out of bounds array access), rendering the checks in wa_init_mcr either futile or random. 2. Condition in WARN_ON was not correct. It is doing a bitwise and operation between a positive (present subslices) and negative mask (disabled L3 banks). This means that with corrected fls() usage the assert would always incorrectly fail. We could fix this by inverting the fuse bits in the check, but instead do one better and improve the code so it not only asserts, but finds the first common index between the two masks and only warns if no such index can be found. v2: * Simplify check for logic and redability. * Improve commentary explaining what is really happening ie. what the assert is really trying to check and why. v3: * Find first common index instead of just asserting. Signed-off-by: Tvrtko Ursulin Fixes: fe864b76c2ab ("drm/i915: Implement WaProgramMgsrForL3BankSpecificMmioReads") Reviewed-by: Chris Wilson # v1 Cc: Michał Winiarski Cc: Stuart Summers Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717180624.20354-4-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 24 -------- drivers/gpu/drm/i915/gt/intel_workarounds.c | 90 ++++++++++++++++------------- drivers/gpu/drm/i915/i915_drv.h | 2 - 3 files changed, 49 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index cc4d1826173d..65cbf1d9118d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -959,30 +959,6 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) } } -u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) -{ - const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - unsigned int slice = fls(sseu->slice_mask) - 1; - unsigned int subslice; - u32 mcr_s_ss_select; - - GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); - subslice = fls(sseu->subslice_mask[slice]); - GEM_BUG_ON(!subslice); - subslice--; - - if (IS_GEN(dev_priv, 10)) - mcr_s_ss_select = GEN8_MCR_SLICE(slice) | - GEN8_MCR_SUBSLICE(subslice); - else if (INTEL_GEN(dev_priv) >= 11) - mcr_s_ss_select = GEN11_MCR_SLICE(slice) | - GEN11_MCR_SUBSLICE(subslice); - else - mcr_s_ss_select = 0; - - return mcr_s_ss_select; -} - static u32 read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice, i915_reg_t reg) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 3b1fc7c8faa8..c2325b7ecf8d 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -762,7 +762,10 @@ static void wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; - u32 mcr_slice_subslice_mask; + unsigned int slice, subslice; + u32 l3_en, mcr, mcr_mask; + + GEM_BUG_ON(INTEL_GEN(i915) < 10); /* * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl @@ -770,42 +773,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) * the case, we might need to program MCR select to a valid L3Bank * by default, to make sure we correctly read certain registers * later on (in the range 0xB100 - 0xB3FF). - * This might be incompatible with - * WaProgramMgsrForCorrectSliceSpecificMmioReads. - * Fortunately, this should not happen in production hardware, so - * we only assert that this is the case (instead of implementing - * something more complex that requires checking the range of every - * MMIO read). - */ - if (INTEL_GEN(i915) >= 10 && - is_power_of_2(sseu->slice_mask)) { - /* - * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches - * enabled subslice, no need to redirect MCR packet - */ - u32 slice = fls(sseu->slice_mask); - u32 fuse3 = - intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3); - u8 ss_mask = sseu->subslice_mask[slice]; - - u8 enabled_mask = (ss_mask | ss_mask >> - GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK; - u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK; - - /* - * Production silicon should have matched L3Bank and - * subslice enabled - */ - WARN_ON((enabled_mask & disabled_mask) != enabled_mask); - } - - if (INTEL_GEN(i915) >= 11) - mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK | - GEN11_MCR_SUBSLICE_MASK; - else - mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK | - GEN8_MCR_SUBSLICE_MASK; - /* + * * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl * Before any MMIO read into slice/subslice specific registers, MCR * packet control register needs to be programmed to point to any @@ -815,11 +783,51 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) * are consistent across s/ss in almost all cases. In the rare * occasions, such as INSTDONE, where this value is dependent * on s/ss combo, the read should be done with read_subslice_reg. + * + * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both + * to which subslice, or to which L3 bank, the respective mmio reads + * will go, we have to find a common index which works for both + * accesses. + * + * Case where we cannot find a common index fortunately should not + * happen in production hardware, so we only emit a warning instead of + * implementing something more complex that requires checking the range + * of every MMIO read. */ - wa_write_masked_or(wal, - GEN8_MCR_SELECTOR, - mcr_slice_subslice_mask, - intel_calculate_mcr_s_ss_select(i915)); + + if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) { + u32 l3_fuse = + intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) & + GEN10_L3BANK_MASK; + + DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse); + l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse); + } else { + l3_en = ~0; + } + + slice = fls(sseu->slice_mask) - 1; + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = fls(l3_en & sseu->subslice_mask[slice]); + if (!subslice) { + DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", + sseu->subslice_mask[slice], l3_en); + subslice = fls(l3_en); + WARN_ON(!subslice); + } + subslice--; + + if (INTEL_GEN(i915) >= 11) { + mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); + mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; + } else { + mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + } + + DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr); + + wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); } static void diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a86a6ea3849f..1a58fb8c88c2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2389,8 +2389,6 @@ void i915_driver_remove(struct drm_device *dev); void intel_engine_init_hangcheck(struct intel_engine_cs *engine); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); -u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv); - static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) { return dev_priv->gvt; -- cgit v1.2.3 From fa380486d5f995b6914b4d4149743d330125414e Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 19:06:22 +0100 Subject: drm/i915: Skip CS verification of L3 bank registers Access to 0xb100 - 0xb3ff mmio range is controlled by the MCR selector which only affects CPU MMIO. Therefore these registers cannot be realiably read with MI_SRM from the command streamer so skip their verification. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717180624.20354-5-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 38 +++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index c2325b7ecf8d..619d42a2b81b 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1436,26 +1436,50 @@ err_obj: return ERR_PTR(err); } +static bool mcr_range(struct drm_i915_private *i915, u32 offset) +{ + /* + * Registers in this range are affected by the MCR selector + * which only controls CPU initiated MMIO. Routing does not + * work for CS access so we cannot verify them on this path. + */ + if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff)) + return true; + + return false; +} + static int wa_list_srm(struct i915_request *rq, const struct i915_wa_list *wal, struct i915_vma *vma) { + struct drm_i915_private *i915 = rq->i915; + unsigned int i, count = 0; const struct i915_wa *wa; - unsigned int i; u32 srm, *cs; srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (INTEL_GEN(rq->i915) >= 8) + if (INTEL_GEN(i915) >= 8) srm++; - cs = intel_ring_begin(rq, 4 * wal->count); + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg))) + count++; + } + + cs = intel_ring_begin(rq, 4 * count); if (IS_ERR(cs)) return PTR_ERR(cs); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + u32 offset = i915_mmio_reg_offset(wa->reg); + + if (mcr_range(i915, offset)) + continue; + *cs++ = srm; - *cs++ = i915_mmio_reg_offset(wa->reg); + *cs++ = offset; *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; *cs++ = 0; } @@ -1505,9 +1529,13 @@ static int engine_wa_list_verify(struct intel_context *ce, } err = 0; - for (i = 0, wa = wal->list; i < wal->count; i++, wa++) + for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { + if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg))) + continue; + if (!wa_verify(wa, results[i], wal->name, from)) err = -ENXIO; + } i915_gem_object_unpin_map(vma->obj); -- cgit v1.2.3 From 935ba6f3bf0fad42aac2b10d19498cf27c33cc98 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 19:06:23 +0100 Subject: drm/i915/icl: Verify engine workarounds in GEN8_L3SQCREG4 Having fixed the incorect MCR programming in an earlier patch, we can now stop ignoring read back of GEN8_L3SQCREG4 during engine workaround verification. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717180624.20354-6-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 27 ++++++--------------------- 1 file changed, 6 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 619d42a2b81b..ff532ff5d574 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -177,19 +177,6 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val) wa_write_masked_or(wal, reg, val, val); } -static void -ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val) -{ - struct i915_wa wa = { - .reg = reg, - .mask = mask, - .val = val, - /* Bonkers HW, skip verifying */ - }; - - _wa_add(wal, &wa); -} - #define WA_SET_BIT_MASKED(addr, mask) \ wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask)) @@ -1260,10 +1247,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); /* WaPipelineFlushCoherentLines:icl */ - ignore_wa_write_or(wal, - GEN8_L3SQCREG4, - GEN8_LQSC_FLUSH_COHERENT_LINES, - GEN8_LQSC_FLUSH_COHERENT_LINES); + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN8_LQSC_FLUSH_COHERENT_LINES); /* * Wa_1405543622:icl @@ -1290,10 +1276,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * Wa_1405733216:icl * Formerly known as WaDisableCleanEvicts */ - ignore_wa_write_or(wal, - GEN8_L3SQCREG4, - GEN11_LQSC_CLEAN_EVICT_DISABLE, - GEN11_LQSC_CLEAN_EVICT_DISABLE); + wa_write_or(wal, + GEN8_L3SQCREG4, + GEN11_LQSC_CLEAN_EVICT_DISABLE); /* WaForwardProgressSoftReset:icl */ wa_write_or(wal, -- cgit v1.2.3 From b83a309a9889225ca5acf75897ec35a9bc33cdaf Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 19:06:24 +0100 Subject: drm/i915/icl: Add Wa_1409178092 We were missing this workaround which can cause hangs if fine grained coherency was used. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717180624.20354-7-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 6 ++++++ drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 9 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index ff532ff5d574..704ace01e7f5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1297,6 +1297,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, GEN7_SARCHKMD, GEN7_DISABLE_SAMPLER_PREFETCH); + + /* Wa_1409178092:icl */ + wa_write_masked_or(wal, + GEN11_SCRATCH2, + GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, + 0); } if (IS_GEN_RANGE(i915, 9, 11)) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fdd9bc01e694..24f2a52a2b42 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7721,6 +7721,9 @@ enum { #define GEN7_L3SQCREG4 _MMIO(0xb034) #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27) +#define GEN11_SCRATCH2 _MMIO(0xb140) +#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19) + #define GEN8_L3SQCREG4 _MMIO(0xb118) #define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6) #define GEN8_LQSC_RO_PERF_DIS (1 << 27) -- cgit v1.2.3 From 92508da7e3daa1cd7ba18685d067a41c89a0f3ec Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 19 Jul 2019 10:48:44 +0100 Subject: Revert "drm/i915/guc: Turn on GuC/HuC auto mode" This reverts commit f774f09649192f326fa030564afd3f8f5d82c1e4. If GuC firmware is not present on the filesystem driver crashes the machine on boot. Signed-off-by: Tvrtko Ursulin Acked-by: Joonas Lahtinen Fixes: f774f0964919 ("drm/i915/guc: Turn on GuC/HuC auto mode") Cc: Michal Wajdeczko Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20190719094845.6242-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_params.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 5736c55694fe..d29ade3b7de6 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -54,7 +54,7 @@ struct drm_printer; param(int, disable_power_well, -1) \ param(int, enable_ips, 1) \ param(int, invert_brightness, 0) \ - param(int, enable_guc, -1) \ + param(int, enable_guc, 0) \ param(int, guc_log_level, -1) \ param(char *, guc_firmware_path, NULL) \ param(char *, huc_firmware_path, NULL) \ -- cgit v1.2.3 From a0301020c568ff6b94da0b988ab984b90ea702a1 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 19 Jul 2019 10:48:45 +0100 Subject: Revert "drm/i915: Update description of i915.enable_guc modparam" This reverts commit 0629d4da1f159778063767fb0ac1c951034c5477. If GuC firmware is not present on the filesystem driver crashes the machine on boot. Signed-off-by: Tvrtko Ursulin Acked-by: Joonas Lahtinen Fixes: 0629d4da1f15 ("drm/i915: Update description of i915.enable_guc modparam") Cc: Tvrtko Ursulin Cc: Michal Wajdeczko Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Jani Nikula Cc: intel-gfx@lists.freedesktop.org Link: https://patchwork.freedesktop.org/patch/msgid/20190719094845.6242-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_params.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index b4f481e1e6b6..296452f9efe4 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -146,7 +146,7 @@ i915_param_named_unsafe(edp_vswing, int, 0400, i915_param_named_unsafe(enable_guc, int, 0400, "Enable GuC load for GuC submission and/or HuC load. " "Required functionality can be selected using bitmask values. " - "(-1=auto [default], 0=disable, 1=GuC submission, 2=HuC load)"); + "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)"); i915_param_named(guc_log_level, int, 0400, "GuC firmware logging level. Requires GuC to be loaded. " -- cgit v1.2.3 From 6b5f3cb1aa20e196d35aba21fbf363d3736ff4d5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 19 Jul 2019 14:15:24 +0100 Subject: drm/i915/gtt: Correct unshifted 'from' for gen8_ppgtt_alloc errors Since the underlying __gen8_ppgtt_clear takes the shifted address, we must remember to provide it with the shifted original start address. Reported-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Abdiel Janulgue Reviewed-by: Tvrtko Ursulin Tested-by: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190719131524.827-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 220aba5a94d2..031bcd22f5e6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1103,7 +1103,7 @@ out: static int gen8_ppgtt_alloc(struct i915_address_space *vm, u64 start, u64 length) { - u64 from = start; + u64 from; int err; GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT))); @@ -1112,6 +1112,7 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm, start >>= GEN8_PTE_SHIFT; length >>= GEN8_PTE_SHIFT; GEM_BUG_ON(length == 0); + from = start; err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, &start, start + length, vm->top); -- cgit v1.2.3 From 5cad0ddf4b78559221a123c627df291cbe4d3ccd Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 19 Jul 2019 15:33:22 +0000 Subject: drm/i915/gtt: Don't try to clear failed empty pd allocation When __gen8_ppgtt_alloc fails without allocating anything we should not try to call __gen8_ppgtt_clear as there is nothing to clear and underlying code will complain with: [ 157.861645] gen8_pd_range:881 GEM_BUG_ON(start >= end) Signed-off-by: Michal Wajdeczko Cc: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190719153322.10464-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 031bcd22f5e6..0930455824b4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1116,7 +1116,7 @@ static int gen8_ppgtt_alloc(struct i915_address_space *vm, err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd, &start, start + length, vm->top); - if (unlikely(err)) + if (unlikely(err && from != start)) __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd, from, start, vm->top); -- cgit v1.2.3 From 0cc35a9c8279da03c34547830d5140cf38f7ce02 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 19 Jul 2019 01:51:36 +0000 Subject: drm/i915/dsi: remove set but not used variable 'hfront_porch' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes gcc '-Wunused-but-set-variable' warning: drivers/gpu/drm/i915/display/icl_dsi.c: In function 'gen11_dsi_set_transcoder_timings': drivers/gpu/drm/i915/display/icl_dsi.c:768:6: warning: variable 'hfront_porch' set but not used [-Wunused-but-set-variable] It is never used and can be removed. Reported-by: Hulk Robot Signed-off-by: YueHaibing Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190719015136.103988-1-yuehaibing@huawei.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 4d952accfaaa..a42348be0438 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -763,7 +763,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, enum transcoder dsi_trans; /* horizontal timings */ u16 htotal, hactive, hsync_start, hsync_end, hsync_size; - u16 hfront_porch, hback_porch; + u16 hback_porch; /* vertical timings */ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; @@ -772,8 +772,6 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, hsync_start = adjusted_mode->crtc_hsync_start; hsync_end = adjusted_mode->crtc_hsync_end; hsync_size = hsync_end - hsync_start; - hfront_porch = (adjusted_mode->crtc_hsync_start - - adjusted_mode->crtc_hdisplay); hback_porch = (adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end); vactive = adjusted_mode->crtc_vdisplay; -- cgit v1.2.3 From 7af5cdb158f3398a3220bd2fe81cec8d2be9317c Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Wed, 26 Jun 2019 22:05:15 -0400 Subject: drm/msm: correct NULL pointer dereference in context_init Correct attempted NULL pointer dereference in context_init() when running without an IOMMU. Reviewed-by: Rob Clark Signed-off-by: Brian Masney Fixes: 295b22ae596c ("drm/msm: Pass the MMU domain index in struct msm_file_private") Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190627020515.5660-1-masneyb@onstation.org --- drivers/gpu/drm/msm/msm_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index ab64ab470de7..c226156f2dea 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -619,7 +619,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file) msm_submitqueue_init(dev, ctx); - ctx->aspace = priv->gpu->aspace; + ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL; file->driver_priv = ctx; return 0; -- cgit v1.2.3 From 2e7b801eadbf327bf61041c943e5c44a5de4b0e5 Mon Sep 17 00:00:00 2001 From: Shubhashree Dhar Date: Mon, 24 Jun 2019 11:57:12 +0530 Subject: drm/msm/dpu: Correct dpu encoder spinlock initialization dpu encoder spinlock should be initialized during dpu encoder init instead of dpu encoder setup which is part of modeset init. Signed-off-by: Shubhashree Dhar [seanpaul resolved conflict in old init removal and revised the commit message] Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/1561357632-15361-1-git-send-email-dhar@codeaurora.org --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 0e2f74163a16..0aa8a12c9952 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2221,8 +2221,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc, if (ret) goto fail; - spin_lock_init(&dpu_enc->enc_spinlock); - atomic_set(&dpu_enc->frame_done_timeout_ms, 0); timer_setup(&dpu_enc->frame_done_timer, dpu_encoder_frame_done_timeout, 0); @@ -2276,6 +2274,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev, drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs); + spin_lock_init(&dpu_enc->enc_spinlock); dpu_enc->enabled = false; return &dpu_enc->base; -- cgit v1.2.3 From 0036bc73ccbe7e600a3468bf8e8879b122252274 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 30 Jun 2019 05:47:22 -0700 Subject: drm/msm: stop abusing dma_map/unmap for cache Recently splats like this started showing up: WARNING: CPU: 4 PID: 251 at drivers/iommu/dma-iommu.c:451 __iommu_dma_unmap+0xb8/0xc0 Modules linked in: ath10k_snoc ath10k_core fuse msm ath mac80211 uvcvideo cfg80211 videobuf2_vmalloc videobuf2_memops vide CPU: 4 PID: 251 Comm: kworker/u16:4 Tainted: G W 5.2.0-rc5-next-20190619+ #2317 Hardware name: LENOVO 81JL/LNVNB161216, BIOS 9UCN23WW(V1.06) 10/25/2018 Workqueue: msm msm_gem_free_work [msm] pstate: 80c00005 (Nzcv daif +PAN +UAO) pc : __iommu_dma_unmap+0xb8/0xc0 lr : __iommu_dma_unmap+0x54/0xc0 sp : ffff0000119abce0 x29: ffff0000119abce0 x28: 0000000000000000 x27: ffff8001f9946648 x26: ffff8001ec271068 x25: 0000000000000000 x24: ffff8001ea3580a8 x23: ffff8001f95ba010 x22: ffff80018e83ba88 x21: ffff8001e548f000 x20: fffffffffffff000 x19: 0000000000001000 x18: 00000000c00001fe x17: 0000000000000000 x16: 0000000000000000 x15: ffff000015b70068 x14: 0000000000000005 x13: 0003142cc1be1768 x12: 0000000000000001 x11: ffff8001f6de9100 x10: 0000000000000009 x9 : ffff000015b78000 x8 : 0000000000000000 x7 : 0000000000000001 x6 : fffffffffffff000 x5 : 0000000000000fff x4 : ffff00001065dbc8 x3 : 000000000000000d x2 : 0000000000001000 x1 : fffffffffffff000 x0 : 0000000000000000 Call trace: __iommu_dma_unmap+0xb8/0xc0 iommu_dma_unmap_sg+0x98/0xb8 put_pages+0x5c/0xf0 [msm] msm_gem_free_work+0x10c/0x150 [msm] process_one_work+0x1e0/0x330 worker_thread+0x40/0x438 kthread+0x12c/0x130 ret_from_fork+0x10/0x18 ---[ end trace afc0dc5ab81a06bf ]--- Not quite sure what triggered that, but we really shouldn't be abusing dma_{map,unmap}_sg() for cache maint. Cc: Stephen Boyd Tested-by: Stephen Boyd Reviewed-by: Jordan Crouse Signed-off-by: Rob Clark Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190630124735.27786-1-robdclark@gmail.com --- drivers/gpu/drm/msm/msm_gem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 8b78554cfde3..c2114c748c2f 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -97,7 +97,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_map_sg(dev->dev, msm_obj->sgt->sgl, + dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); } @@ -127,7 +127,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl, + dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl, msm_obj->sgt->nents, DMA_BIDIRECTIONAL); -- cgit v1.2.3 From ebf8fc31cbcedc9d6a81642082661c82eae284fb Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Fri, 19 Jul 2019 11:39:21 +0800 Subject: drm/amd/powerplay: custom peak clock freq for navi10 v2: add function smu_default_set_performance_level as default dpm level handler. change function name smu_set_performance_level to smu_asic_set_performance_level v1: 1.NAVI10_PEAK_SCLK_XTX 1830 Mhz 2.NAVI10_PEAK_SCLK_XT 1755 Mhz 3.NAVI10_PEAK_SCLK_XL 1625 Mhz Signed-off-by: Kevin Wang Reviewed-by: Huang Rui Reviewed-by: Kenneth Feng Reviewed-by: Evan Quan Reviewed-by: Jack Gui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 73 ++++++++++++++------------ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 4 ++ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 55 +++++++++++++++++++ drivers/gpu/drm/amd/powerplay/navi10_ppt.h | 4 ++ 4 files changed, 103 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index b6b6c10eeb6e..d3cc90eb160c 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1394,13 +1394,49 @@ static int smu_enable_umd_pstate(void *handle, return 0; } +static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + uint32_t sclk_mask, mclk_mask, soc_mask; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu_force_dpm_limit_value(smu, true); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu_force_dpm_limit_value(smu, false); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = smu_unforce_dpm_levels(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = smu_get_profiling_clk_mask(smu, level, + &sclk_mask, + &mclk_mask, + &soc_mask); + if (ret) + return ret; + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } + return ret; +} + int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings) { int ret = 0; int index = 0; - uint32_t sclk_mask, mclk_mask, soc_mask; long workload; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -1431,39 +1467,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != level) { - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = smu_force_dpm_limit_value(smu, true); - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = smu_force_dpm_limit_value(smu, false); - break; - - case AMD_DPM_FORCED_LEVEL_AUTO: - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - ret = smu_unforce_dpm_levels(smu); - break; - - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - ret = smu_get_profiling_clk_mask(smu, level, - &sclk_mask, - &mclk_mask, - &soc_mask); - if (ret) - return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); - break; - - case AMD_DPM_FORCED_LEVEL_MANUAL: - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: - default: - break; + ret = smu_asic_set_performance_level(smu, level); + if (ret) { + ret = smu_default_set_performance_level(smu, level); } - if (!ret) smu_dpm_ctx->dpm_level = level; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 514d31518853..34093ddca105 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -631,6 +631,7 @@ struct pptable_funcs { int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); int (*set_default_od_settings)(struct smu_context *smu, bool initialize); + int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); }; struct smu_funcs @@ -928,6 +929,9 @@ struct smu_funcs ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) #define smu_baco_reset(smu) \ ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) +#define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); + extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 80daded31970..b7b44ffe2670 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1590,6 +1590,60 @@ static int navi10_set_ppfeature_status(struct smu_context *smu, return 0; } +static int navi10_set_peak_clock_by_device(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + uint32_t uclk_level = 0; + + switch (adev->rev_id) { + case 0xf0: /* XTX */ + case 0xc0: + sclk_freq = NAVI10_PEAK_SCLK_XTX; + break; + case 0xf1: /* XT */ + case 0xc1: + sclk_freq = NAVI10_PEAK_SCLK_XT; + break; + default: /* XL */ + sclk_freq = NAVI10_PEAK_SCLK_XL; + break; + } + + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); + if (ret) + return ret; + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = navi10_set_peak_clock_by_device(smu); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1625,6 +1679,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_uclk_dpm_states = navi10_get_uclk_dpm_states, .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, + .set_performance_level = navi10_set_performance_level, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 957288e22f47..620ff17c2fef 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -23,6 +23,10 @@ #ifndef __NAVI10_PPT_H__ #define __NAVI10_PPT_H__ +#define NAVI10_PEAK_SCLK_XTX (1830) +#define NAVI10_PEAK_SCLK_XT (1755) +#define NAVI10_PEAK_SCLK_XL (1625) + extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif -- cgit v1.2.3 From b629167d686787507f35a8bfb46b8b12f4f26eb7 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Fri, 19 Jul 2019 16:06:29 +0800 Subject: drm/amd/powerplay: remove redundancy debug log in smu remove redundacy debug log in smu. eg: [ 6897.969447] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6897.969448] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6897.969448] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6899.024114] amdgpu: [powerplay] Unsupported SMU message: 38 [ 6899.024151] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6899.024151] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6899.024152] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6900.078296] amdgpu: [powerplay] Unsupported SMU message: 38 [ 6900.078332] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6900.078332] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6900.078333] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6901.133230] amdgpu: [powerplay] Unsupported SMU message: 38 Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 1 - drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 6 ------ drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 6 ------ 3 files changed, 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index d3cc90eb160c..0b1285ddc954 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -262,7 +262,6 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) } if(!smu_feature_is_enabled(smu, feature_id)) { - pr_warn("smu %d clk dpm feature %d is not enabled\n", clk_type, feature_id); return false; } diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index b7b44ffe2670..c178062d00a3 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -217,7 +217,6 @@ static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index) mapping = navi10_message_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU message: %d\n", index); return -EINVAL; } @@ -233,7 +232,6 @@ static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index) mapping = navi10_clk_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU clock: %d\n", index); return -EINVAL; } @@ -249,7 +247,6 @@ static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index) mapping = navi10_feature_mask_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU feature: %d\n", index); return -EINVAL; } @@ -265,7 +262,6 @@ static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index) mapping = navi10_table_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU table: %d\n", index); return -EINVAL; } @@ -281,7 +277,6 @@ static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index) mapping = navi10_pwr_src_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported power source: %d\n", index); return -EINVAL; } @@ -298,7 +293,6 @@ static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P mapping = navi10_workload_map[profile]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported workload: %d\n", (int)profile); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 080956ea0570..afcd33941f07 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -232,7 +232,6 @@ static int vega20_get_smu_table_index(struct smu_context *smc, uint32_t index) mapping = vega20_table_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU table: %d\n", index); return -EINVAL; } @@ -248,7 +247,6 @@ static int vega20_get_pwr_src_index(struct smu_context *smc, uint32_t index) mapping = vega20_pwr_src_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported power source: %d\n", index); return -EINVAL; } @@ -264,7 +262,6 @@ static int vega20_get_smu_feature_index(struct smu_context *smc, uint32_t index) mapping = vega20_feature_mask_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU feature: %d\n", index); return -EINVAL; } @@ -280,7 +277,6 @@ static int vega20_get_smu_clk_index(struct smu_context *smc, uint32_t index) mapping = vega20_clk_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU clock: %d\n", index); return -EINVAL; } @@ -296,7 +292,6 @@ static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index) mapping = vega20_message_map[index]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU message: %d\n", index); return -EINVAL; } @@ -312,7 +307,6 @@ static int vega20_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_P mapping = vega20_workload_map[profile]; if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU workload: %d\n", (int)profile); return -EINVAL; } -- cgit v1.2.3 From d52d6de2809a31b58a62f9ecba58bfd8fbf1bc63 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 19 Jul 2019 19:09:38 +0800 Subject: drm/amdgpu: set sdma irq src num according to sdma instances Otherwise, it will cause driver access non-existing sdma registers in gpu reset code path Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 921a6dd9cbae..dd8f520b3fa1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2416,10 +2416,23 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = { static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + switch (adev->sdma.num_instances) { + case 1: + adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; + adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; + break; + case 8: + adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + break; + case 2: + default: + adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; + adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; + break; + } adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs; adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs; } -- cgit v1.2.3 From 95ccc155081be9cea95202d290fe06637f47c8de Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 18 Jul 2019 15:25:04 -0500 Subject: drm/amdgpu/smu: move fan rpm query into the asic specific code On vega20, there is an SMU message to query it. On navi, it's fetched from the metrics table. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 4 ++-- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 6 +++--- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 12 +++++++----- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 ------------------ drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 20 +++++++++++++++++++- 5 files changed, 31 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8b7efd0a7028..03ca8c69114f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1734,7 +1734,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &speed); + err = smu_get_fan_speed_rpm(&adev->smu, &speed); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -1794,7 +1794,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &rpm); + err = smu_get_fan_speed_rpm(&adev->smu, &rpm); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 34093ddca105..b702c9ee975f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -623,6 +623,7 @@ struct pptable_funcs { int (*tables_init)(struct smu_context *smu, struct smu_table *tables); int (*set_thermal_fan_table)(struct smu_context *smu); int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); + int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*get_current_clk_freq_by_table)(struct smu_context *smu, @@ -696,7 +697,6 @@ struct smu_funcs int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*conv_power_profile_to_pplib_workload)(int power_profile); - int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); @@ -762,8 +762,6 @@ struct smu_funcs ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_get_current_rpm(smu, speed) \ - ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0) #define smu_set_fan_speed_rpm(smu, speed) \ ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) #define smu_send_smc_msg(smu, msg) \ @@ -852,6 +850,8 @@ struct smu_funcs ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) #define smu_set_fan_speed_percent(smu, speed) \ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) +#define smu_get_fan_speed_rpm(smu, speed) \ + ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) #define smu_msg_get_index(smu, msg) \ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index c178062d00a3..2d0c8890abbb 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -968,12 +968,13 @@ static bool navi10_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) +static int navi10_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) { SmuMetrics_t metrics; int ret = 0; - if (!value) + if (!speed) return -EINVAL; memset(&metrics, 0, sizeof(metrics)); @@ -983,7 +984,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) if (ret) return ret; - *value = metrics.CurrFanSpeed; + *speed = metrics.CurrFanSpeed; return ret; } @@ -993,10 +994,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu, { int ret = 0; uint32_t percent = 0; - uint16_t current_rpm; + uint32_t current_rpm; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = navi10_get_fan_speed(smu, ¤t_rpm); + ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -1665,6 +1666,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .unforce_dpm_levels = navi10_unforce_dpm_levels, .is_dpm_running = navi10_is_dpm_running, .get_fan_speed_percent = navi10_get_fan_speed_percent, + .get_fan_speed_rpm = navi10_get_fan_speed_rpm, .get_power_profile_mode = navi10_get_power_profile_mode, .set_power_profile_mode = navi10_set_power_profile_mode, .get_profiling_clk_mask = navi10_get_profiling_clk_mask, diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 9f3a84fbb9cf..a41ce29b23dc 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1405,23 +1405,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static int smu_v11_0_get_current_rpm(struct smu_context *smu, - uint32_t *current_rpm) -{ - int ret; - - ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); - - if (ret) { - pr_err("Attempt to get current RPM from SMC Failed!\n"); - return ret; - } - - smu_read_smc_arg(smu, current_rpm); - - return 0; -} - static uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { @@ -1807,7 +1790,6 @@ static const struct smu_funcs smu_v11_0_funcs = { .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_current_rpm = smu_v11_0_get_current_rpm, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index afcd33941f07..03e310426ffb 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -3033,6 +3033,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) return ret; } +static int vega20_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + int ret; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); + + if (ret) { + pr_err("Attempt to get current RPM from SMC Failed!\n"); + return ret; + } + + smu_read_smc_arg(smu, speed); + + return 0; +} + static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) { @@ -3040,7 +3057,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t current_rpm = 0, percent = 0; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = smu_get_current_rpm(smu, ¤t_rpm); + ret = vega20_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -3311,6 +3328,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .is_dpm_running = vega20_is_dpm_running, .set_thermal_fan_table = vega20_set_thermal_fan_table, .get_fan_speed_percent = vega20_get_fan_speed_percent, + .get_fan_speed_rpm = vega20_get_fan_speed_rpm, .set_watermarks_table = vega20_set_watermarks_table, .get_thermal_temperature_range = vega20_get_thermal_temperature_range }; -- cgit v1.2.3 From bbb6fc43f131f77fcb7ae8081f6d7c51396a2120 Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Mon, 22 Jul 2019 15:14:46 -0400 Subject: drm: silence variable 'conn' set but not used The "struct drm_connector" iteration cursor from "for_each_new_connector_in_state" is never used in atomic_remove_fb() which generates a compilation warning, drivers/gpu/drm/drm_framebuffer.c: In function 'atomic_remove_fb': drivers/gpu/drm/drm_framebuffer.c:838:24: warning: variable 'conn' set but not used [-Wunused-but-set-variable] Silence it by marking "conn" __maybe_unused. Signed-off-by: Qian Cai Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/1563822886-13570-1-git-send-email-cai@lca.pw --- drivers/gpu/drm/drm_framebuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 0b72468e8131..57564318ceea 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -835,7 +835,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb) struct drm_device *dev = fb->dev; struct drm_atomic_state *state; struct drm_plane *plane; - struct drm_connector *conn; + struct drm_connector *conn __maybe_unused; struct drm_connector_state *conn_state; int i, ret; unsigned plane_mask; -- cgit v1.2.3 From b840e4d5fec6fe2019de36b10412b69df9c4d39a Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Fri, 19 Jul 2019 11:39:21 +0800 Subject: drm/amd/powerplay: custom peak clock freq for navi10 v2: add function smu_default_set_performance_level as default dpm level handler. change function name smu_set_performance_level to smu_asic_set_performance_level v1: 1.NAVI10_PEAK_SCLK_XTX 1830 Mhz 2.NAVI10_PEAK_SCLK_XT 1755 Mhz 3.NAVI10_PEAK_SCLK_XL 1625 Mhz Signed-off-by: Kevin Wang Reviewed-by: Huang Rui Reviewed-by: Kenneth Feng Reviewed-by: Evan Quan Reviewed-by: Jack Gui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 73 ++++++++++++++------------ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 4 ++ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 55 +++++++++++++++++++ drivers/gpu/drm/amd/powerplay/navi10_ppt.h | 4 ++ 4 files changed, 103 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 768aae2e20da..c097113c3976 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1374,13 +1374,49 @@ static int smu_enable_umd_pstate(void *handle, return 0; } +static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + uint32_t sclk_mask, mclk_mask, soc_mask; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = smu_force_dpm_limit_value(smu, true); + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = smu_force_dpm_limit_value(smu, false); + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + ret = smu_unforce_dpm_levels(smu); + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = smu_get_profiling_clk_mask(smu, level, + &sclk_mask, + &mclk_mask, + &soc_mask); + if (ret) + return ret; + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: + default: + break; + } + return ret; +} + int smu_adjust_power_state_dynamic(struct smu_context *smu, enum amd_dpm_forced_level level, bool skip_display_settings) { int ret = 0; int index = 0; - uint32_t sclk_mask, mclk_mask, soc_mask; long workload; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); @@ -1411,39 +1447,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != level) { - switch (level) { - case AMD_DPM_FORCED_LEVEL_HIGH: - ret = smu_force_dpm_limit_value(smu, true); - break; - case AMD_DPM_FORCED_LEVEL_LOW: - ret = smu_force_dpm_limit_value(smu, false); - break; - - case AMD_DPM_FORCED_LEVEL_AUTO: - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: - ret = smu_unforce_dpm_levels(smu); - break; - - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: - ret = smu_get_profiling_clk_mask(smu, level, - &sclk_mask, - &mclk_mask, - &soc_mask); - if (ret) - return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); - break; - - case AMD_DPM_FORCED_LEVEL_MANUAL: - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: - default: - break; + ret = smu_asic_set_performance_level(smu, level); + if (ret) { + ret = smu_default_set_performance_level(smu, level); } - if (!ret) smu_dpm_ctx->dpm_level = level; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 1af992fb0bde..564446ff0d1b 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -621,6 +621,7 @@ struct pptable_funcs { int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); int (*set_default_od_settings)(struct smu_context *smu, bool initialize); + int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); }; struct smu_funcs @@ -918,6 +919,9 @@ struct smu_funcs ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) #define smu_baco_reset(smu) \ ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) +#define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); + extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 8293b5216aad..7b1753f9a5bf 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1565,6 +1565,60 @@ static int navi10_set_ppfeature_status(struct smu_context *smu, return 0; } +static int navi10_set_peak_clock_by_device(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + uint32_t uclk_level = 0; + + switch (adev->rev_id) { + case 0xf0: /* XTX */ + case 0xc0: + sclk_freq = NAVI10_PEAK_SCLK_XTX; + break; + case 0xf1: /* XT */ + case 0xc1: + sclk_freq = NAVI10_PEAK_SCLK_XT; + break; + default: /* XL */ + sclk_freq = NAVI10_PEAK_SCLK_XL; + break; + } + + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); + if (ret) + return ret; + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = navi10_set_peak_clock_by_device(smu); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1600,6 +1654,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_uclk_dpm_states = navi10_get_uclk_dpm_states, .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, + .set_performance_level = navi10_set_performance_level, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 957288e22f47..620ff17c2fef 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -23,6 +23,10 @@ #ifndef __NAVI10_PPT_H__ #define __NAVI10_PPT_H__ +#define NAVI10_PEAK_SCLK_XTX (1830) +#define NAVI10_PEAK_SCLK_XT (1755) +#define NAVI10_PEAK_SCLK_XL (1625) + extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif -- cgit v1.2.3 From 1bcff32679f60fe2387f63f22b8b35375052bcb0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 18 Jul 2019 15:25:04 -0500 Subject: drm/amdgpu/smu: move fan rpm query into the asic specific code On vega20, there is an SMU message to query it. On navi, it's fetched from the metrics table. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 4 ++-- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 6 +++--- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 12 +++++++----- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 ------------------ drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 20 +++++++++++++++++++- 5 files changed, 31 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8b7efd0a7028..03ca8c69114f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1734,7 +1734,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &speed); + err = smu_get_fan_speed_rpm(&adev->smu, &speed); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { @@ -1794,7 +1794,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, return -EINVAL; if (is_support_sw_smu(adev)) { - err = smu_get_current_rpm(&adev->smu, &rpm); + err = smu_get_fan_speed_rpm(&adev->smu, &rpm); if (err) return err; } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 564446ff0d1b..22e46a289a16 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -613,6 +613,7 @@ struct pptable_funcs { int (*tables_init)(struct smu_context *smu, struct smu_table *tables); int (*set_thermal_fan_table)(struct smu_context *smu); int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); + int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); int (*set_watermarks_table)(struct smu_context *smu, void *watermarks, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*get_current_clk_freq_by_table)(struct smu_context *smu, @@ -686,7 +687,6 @@ struct smu_funcs int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*conv_power_profile_to_pplib_workload)(int power_profile); - int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); @@ -752,8 +752,6 @@ struct smu_funcs ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) #define smu_set_default_od_settings(smu, initialize) \ ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_get_current_rpm(smu, speed) \ - ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0) #define smu_set_fan_speed_rpm(smu, speed) \ ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) #define smu_send_smc_msg(smu, msg) \ @@ -842,6 +840,8 @@ struct smu_funcs ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) #define smu_set_fan_speed_percent(smu, speed) \ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) +#define smu_get_fan_speed_rpm(smu, speed) \ + ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) #define smu_msg_get_index(smu, msg) \ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 7b1753f9a5bf..4aaad255a288 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -954,12 +954,13 @@ static bool navi10_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } -static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) +static int navi10_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) { SmuMetrics_t metrics; int ret = 0; - if (!value) + if (!speed) return -EINVAL; memset(&metrics, 0, sizeof(metrics)); @@ -969,7 +970,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value) if (ret) return ret; - *value = metrics.CurrFanSpeed; + *speed = metrics.CurrFanSpeed; return ret; } @@ -979,10 +980,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu, { int ret = 0; uint32_t percent = 0; - uint16_t current_rpm; + uint32_t current_rpm; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = navi10_get_fan_speed(smu, ¤t_rpm); + ret = navi10_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -1646,6 +1647,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .unforce_dpm_levels = navi10_unforce_dpm_levels, .is_dpm_running = navi10_is_dpm_running, .get_fan_speed_percent = navi10_get_fan_speed_percent, + .get_fan_speed_rpm = navi10_get_fan_speed_rpm, .get_power_profile_mode = navi10_get_power_profile_mode, .set_power_profile_mode = navi10_set_power_profile_mode, .get_profiling_clk_mask = navi10_get_profiling_clk_mask, diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 95c7c4dae523..caca9091bfcc 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1371,23 +1371,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static int smu_v11_0_get_current_rpm(struct smu_context *smu, - uint32_t *current_rpm) -{ - int ret; - - ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); - - if (ret) { - pr_err("Attempt to get current RPM from SMC Failed!\n"); - return ret; - } - - smu_read_smc_arg(smu, current_rpm); - - return 0; -} - static uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { @@ -1773,7 +1756,6 @@ static const struct smu_funcs smu_v11_0_funcs = { .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_current_rpm = smu_v11_0_get_current_rpm, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index bb9bb09cfc7a..dc139a6feeb1 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -3015,6 +3015,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu) return ret; } +static int vega20_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + int ret; + + ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm); + + if (ret) { + pr_err("Attempt to get current RPM from SMC Failed!\n"); + return ret; + } + + smu_read_smc_arg(smu, speed); + + return 0; +} + static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) { @@ -3022,7 +3039,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu, uint32_t current_rpm = 0, percent = 0; PPTable_t *pptable = smu->smu_table.driver_pptable; - ret = smu_get_current_rpm(smu, ¤t_rpm); + ret = vega20_get_fan_speed_rpm(smu, ¤t_rpm); if (ret) return ret; @@ -3293,6 +3310,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .is_dpm_running = vega20_is_dpm_running, .set_thermal_fan_table = vega20_set_thermal_fan_table, .get_fan_speed_percent = vega20_get_fan_speed_percent, + .get_fan_speed_rpm = vega20_get_fan_speed_rpm, .set_watermarks_table = vega20_set_watermarks_table, .get_thermal_temperature_range = vega20_get_thermal_temperature_range }; -- cgit v1.2.3 From 04364138ce9bd812e8eeda84febf7259e27ff57d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 19 Jul 2019 14:07:37 +0100 Subject: drm/i915/gtt: Fix rounding for 36b MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The top-level page directory for 36b is a single entry, not multiple like 32b. Fix up the rounding on the calculation of the size of the top level so that we populate the 4th level correctly for 36b. Reported-by: Jose Souza Signed-off-by: Chris Wilson Fixes: 1eda701eace2 ("drm/i915/gtt: Recursive cleanup for gen8") Cc: Abdiel Janulgue Cc: Jose Souza Tested-by: José Roberto de Souza Reviewed-by: Abdiel Janulgue Link: https://patchwork.freedesktop.org/patch/msgid/20190719130737.5835-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0930455824b4..5a7e66b332a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -905,6 +905,12 @@ static inline unsigned int gen8_pt_count(u64 start, u64 end) return end - start; } +static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm) +{ + unsigned int shift = __gen8_pte_shift(vm->top); + return (vm->total + (1ull << shift) - 1) >> shift; +} + static void __gen8_ppgtt_cleanup(struct i915_address_space *vm, struct i915_page_directory *pd, int count, int lvl) @@ -930,9 +936,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) if (intel_vgpu_active(vm->i915)) gen8_ppgtt_notify_vgt(ppgtt, false); - __gen8_ppgtt_cleanup(vm, ppgtt->pd, - vm->total >> __gen8_pte_shift(vm->top), - vm->top); + __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top); free_scratch(vm); } @@ -1392,7 +1396,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) unsigned int idx; GEM_BUG_ON(vm->top != 2); - GEM_BUG_ON((vm->total >> __gen8_pte_shift(2)) != GEN8_3LVL_PDPES); + GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES); for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) { struct i915_page_directory *pde; @@ -1429,7 +1433,7 @@ static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt) static struct i915_page_directory * gen8_alloc_top_pd(struct i915_address_space *vm) { - const unsigned int count = vm->total >> __gen8_pte_shift(vm->top); + const unsigned int count = gen8_pd_top_count(vm); struct i915_page_directory *pd; GEM_BUG_ON(count > ARRAY_SIZE(pd->entry)); @@ -1515,8 +1519,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) err_free_pd: __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd, - ppgtt->vm.total >> __gen8_pte_shift(ppgtt->vm.top), - ppgtt->vm.top); + gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top); err_free_scratch: free_scratch(&ppgtt->vm); err_free: -- cgit v1.2.3 From f3bcb0cccd36c6eb6e3afbd61f83eeca32d6cc50 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 18 Jul 2019 08:00:10 +0100 Subject: drm/i915: Remove obsolete engine cleanup Remove the outer layer cleanup of engine stubs; as i915_drv itself no longer tries to preallocate and so is not responsible for either the allocation or free. By the time we call the cleanup function, we already have cleaned up the engines. v2: Lack of symmetry between mmio_probe and mmio_release for handling the error cleanup. engine->destroy() is a compound function that is called earlier in the normal release as it ties together other bits of state. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190718070024.21781-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0caf2f3ce279..8c81a35f832d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -848,15 +848,6 @@ out_err: return -ENOMEM; } -static void i915_engines_cleanup(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) - kfree(engine); -} - static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) { destroy_workqueue(dev_priv->hotplug.dp_wq); @@ -928,7 +919,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) ret = i915_workqueues_init(dev_priv); if (ret < 0) - goto err_engines; + return ret; intel_gt_init_early(&dev_priv->gt, dev_priv); @@ -961,8 +952,6 @@ err_uc: i915_gem_cleanup_early(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); -err_engines: - i915_engines_cleanup(dev_priv); return ret; } @@ -978,7 +967,6 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); i915_workqueues_cleanup(dev_priv); - i915_engines_cleanup(dev_priv); pm_qos_remove_request(&dev_priv->sb_qos); mutex_destroy(&dev_priv->sb_lock); @@ -1039,6 +1027,7 @@ err_bridge: */ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { + intel_engines_cleanup(dev_priv); intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); -- cgit v1.2.3 From df8cf31e749788d8de8c3028e530e6fd9ef8bc79 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 18 Jul 2019 08:00:06 +0100 Subject: drm/i915/gt: Hook up intel_context_fini() Prior to freeing the struct, call the fini function to cleanup the common members. Currently this only calls the debug functions to mark the structs as destroyed, but may be extended to real work in future. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190718070024.21781-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context.c | 6 ++++++ drivers/gpu/drm/i915/gt/intel_context.h | 1 + drivers/gpu/drm/i915/gt/intel_lrc.c | 2 ++ drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 1 + drivers/gpu/drm/i915/gt/mock_engine.c | 1 + 5 files changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index b667e2b35804..9292b6ca5e9c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -204,6 +204,12 @@ intel_context_init(struct intel_context *ce, __intel_context_active, __intel_context_retire); } +void intel_context_fini(struct intel_context *ce) +{ + mutex_destroy(&ce->pin_mutex); + i915_active_fini(&ce->active); +} + static void i915_global_context_shrink(void) { kmem_cache_shrink(global.slab_ce); diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index b41c610c2ce6..23c7e4c0ce7c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -16,6 +16,7 @@ void intel_context_init(struct intel_context *ce, struct i915_gem_context *ctx, struct intel_engine_cs *engine); +void intel_context_fini(struct intel_context *ce); struct intel_context * intel_context_create(struct i915_gem_context *ctx, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index dce36482a252..884dfc1cb033 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1566,6 +1566,7 @@ static void execlists_context_destroy(struct kref *kref) if (ce->state) __execlists_context_fini(ce); + intel_context_fini(ce); intel_context_free(ce); } @@ -3199,6 +3200,7 @@ static void virtual_context_destroy(struct kref *kref) if (ve->context.state) __execlists_context_fini(&ve->context); + intel_context_fini(&ve->context); kfree(ve->bonds); kfree(ve); diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 213df144be15..1de19dac4a14 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1388,6 +1388,7 @@ static void ring_context_destroy(struct kref *ref) if (ce->state) __ring_context_fini(ce); + intel_context_fini(ce); intel_context_free(ce); } diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 490ebd121f4c..10cb312462e5 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -142,6 +142,7 @@ static void mock_context_destroy(struct kref *ref) if (ce->ring) mock_ring_free(ce->ring); + intel_context_fini(ce); intel_context_free(ce); } -- cgit v1.2.3 From 3bdd4f8485310687affe117f1b1a6e9012e897f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 22 Jul 2019 23:28:47 +0100 Subject: drm/i915: Rely on spinlock protection for GPU error capture Trust that we now have adequate protection over the low level structures via the engine->active.lock to allow ourselves to capture the GPU error state without the heavy hammer of stop_machine(). Sadly this does mean that we have to forgo some of the lesser used information (not derived from the active state) that is not controlled by the active locks. This includes the list of buffers in the ppGTT and pinned globally in the GGTT. Originally this was used to manually verify relocations, but hasn't been required for sometime and modern mesa now has the habit of ensuring that all interesting buffers within a batch are captured in their entirety (that are the auxiliary state buffers, but not the textures). A useful side-effect is that this allows us to restore error capturing for Braswell and Broxton. v2: Use pagevec for a typical arbitrary number of preallocated pages Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190722222847.24178-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 5 - drivers/gpu/drm/i915/i915_gpu_error.c | 489 ++++++++++++++-------------------- drivers/gpu/drm/i915/i915_gpu_error.h | 17 -- 3 files changed, 193 insertions(+), 318 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5a7e66b332a6..4dd1fa956143 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2970,11 +2970,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; if (ggtt->vm.clear_range != nop_clear_range) ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; - - /* Prevent recursively calling stop_machine() and deadlocks. */ - dev_info(dev_priv->drm.dev, - "Disabling error capture for VT-d workaround\n"); - i915_disable_error_state(dev_priv, -ENODEV); } ggtt->invalidate = gen6_ggtt_invalidate; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index c5b89bf4d616..2193687eac72 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -29,8 +29,8 @@ #include #include +#include #include -#include #include #include @@ -46,6 +46,9 @@ #include "i915_scatterlist.h" #include "intel_csr.h" +#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) +#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) + static inline const struct intel_engine_cs * engine_lookup(const struct drm_i915_private *i915, unsigned int id) { @@ -67,26 +70,6 @@ engine_name(const struct drm_i915_private *i915, unsigned int id) return __engine_name(engine_lookup(i915, id)); } -static const char *tiling_flag(int tiling) -{ - switch (tiling) { - default: - case I915_TILING_NONE: return ""; - case I915_TILING_X: return " X"; - case I915_TILING_Y: return " Y"; - } -} - -static const char *dirty_flag(int dirty) -{ - return dirty ? " dirty" : ""; -} - -static const char *purgeable_flag(int purgeable) -{ - return purgeable ? " purgeable" : ""; -} - static void __sg_set_buf(struct scatterlist *sg, void *addr, unsigned int len, loff_t it) { @@ -114,7 +97,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) if (e->cur == e->end) { struct scatterlist *sgl; - sgl = (typeof(sgl))__get_free_page(GFP_KERNEL); + sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL); if (!sgl) { e->err = -ENOMEM; return false; @@ -134,7 +117,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len) } e->size = ALIGN(len + 1, SZ_64K); - e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + e->buf = kmalloc(e->size, ALLOW_FAIL); if (!e->buf) { e->size = PAGE_ALIGN(len + 1); e->buf = kmalloc(e->size, GFP_KERNEL); @@ -211,47 +194,115 @@ i915_error_printer(struct drm_i915_error_state_buf *e) return p; } +/* single threaded page allocator with a reserved stash for emergencies */ +static void pool_fini(struct pagevec *pv) +{ + pagevec_release(pv); +} + +static int pool_refill(struct pagevec *pv, gfp_t gfp) +{ + while (pagevec_space(pv)) { + struct page *p; + + p = alloc_page(gfp); + if (!p) + return -ENOMEM; + + pagevec_add(pv, p); + } + + return 0; +} + +static int pool_init(struct pagevec *pv, gfp_t gfp) +{ + int err; + + pagevec_init(pv); + + err = pool_refill(pv, gfp); + if (err) + pool_fini(pv); + + return err; +} + +static void *pool_alloc(struct pagevec *pv, gfp_t gfp) +{ + struct page *p; + + p = alloc_page(gfp); + if (!p && pagevec_count(pv)) + p = pv->pages[--pv->nr]; + + return p ? page_address(p) : NULL; +} + +static void pool_free(struct pagevec *pv, void *addr) +{ + struct page *p = virt_to_page(addr); + + if (pagevec_space(pv)) + pagevec_add(pv, p); + else + __free_page(p); +} + #ifdef CONFIG_DRM_I915_COMPRESS_ERROR struct compress { + struct pagevec pool; struct z_stream_s zstream; void *tmp; }; static bool compress_init(struct compress *c) { - struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream)); + struct z_stream_s *zstream = &c->zstream; - zstream->workspace = - kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), - GFP_ATOMIC | __GFP_NOWARN); - if (!zstream->workspace) + if (pool_init(&c->pool, ALLOW_FAIL)) return false; - if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) { - kfree(zstream->workspace); + zstream->workspace = + kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), + ALLOW_FAIL); + if (!zstream->workspace) { + pool_fini(&c->pool); return false; } c->tmp = NULL; if (i915_has_memcpy_from_wc()) - c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN); + c->tmp = pool_alloc(&c->pool, ALLOW_FAIL); return true; } -static void *compress_next_page(struct drm_i915_error_object *dst) +static bool compress_start(struct compress *c) { - unsigned long page; + struct z_stream_s *zstream = &c->zstream; + void *workspace = zstream->workspace; + + memset(zstream, 0, sizeof(*zstream)); + zstream->workspace = workspace; + + return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK; +} + +static void *compress_next_page(struct compress *c, + struct drm_i915_error_object *dst) +{ + void *page; if (dst->page_count >= dst->num_pages) return ERR_PTR(-ENOSPC); - page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); + page = pool_alloc(&c->pool, ATOMIC_MAYFAIL); if (!page) return ERR_PTR(-ENOMEM); - return dst->pages[dst->page_count++] = (void *)page; + return dst->pages[dst->page_count++] = page; } static int compress_page(struct compress *c, @@ -267,7 +318,7 @@ static int compress_page(struct compress *c, do { if (zstream->avail_out == 0) { - zstream->next_out = compress_next_page(dst); + zstream->next_out = compress_next_page(c, dst); if (IS_ERR(zstream->next_out)) return PTR_ERR(zstream->next_out); @@ -295,7 +346,7 @@ static int compress_flush(struct compress *c, do { switch (zlib_deflate(zstream, Z_FINISH)) { case Z_OK: /* more space requested */ - zstream->next_out = compress_next_page(dst); + zstream->next_out = compress_next_page(c, dst); if (IS_ERR(zstream->next_out)) return PTR_ERR(zstream->next_out); @@ -316,15 +367,17 @@ end: return 0; } -static void compress_fini(struct compress *c, - struct drm_i915_error_object *dst) +static void compress_finish(struct compress *c) { - struct z_stream_s *zstream = &c->zstream; + zlib_deflateEnd(&c->zstream); +} - zlib_deflateEnd(zstream); - kfree(zstream->workspace); +static void compress_fini(struct compress *c) +{ + kfree(c->zstream.workspace); if (c->tmp) - free_page((unsigned long)c->tmp); + pool_free(&c->pool, c->tmp); + pool_fini(&c->pool); } static void err_compression_marker(struct drm_i915_error_state_buf *m) @@ -335,9 +388,15 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #else struct compress { + struct pagevec pool; }; static bool compress_init(struct compress *c) +{ + return pool_init(&c->pool, ALLOW_FAIL) == 0; +} + +static bool compress_start(struct compress *c) { return true; } @@ -346,14 +405,12 @@ static int compress_page(struct compress *c, void *src, struct drm_i915_error_object *dst) { - unsigned long page; void *ptr; - page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN); - if (!page) + ptr = pool_alloc(&c->pool, ATOMIC_MAYFAIL); + if (!ptr) return -ENOMEM; - ptr = (void *)page; if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE)) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; @@ -367,11 +424,15 @@ static int compress_flush(struct compress *c, return 0; } -static void compress_fini(struct compress *c, - struct drm_i915_error_object *dst) +static void compress_finish(struct compress *c) { } +static void compress_fini(struct compress *c) +{ + pool_fini(&c->pool); +} + static void err_compression_marker(struct drm_i915_error_state_buf *m) { err_puts(m, "~"); @@ -379,36 +440,6 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) #endif -static void print_error_buffers(struct drm_i915_error_state_buf *m, - const char *name, - struct drm_i915_error_buffer *err, - int count) -{ - err_printf(m, "%s [%d]:\n", name, count); - - while (count--) { - err_printf(m, " %08x_%08x %8u %02x %02x", - upper_32_bits(err->gtt_offset), - lower_32_bits(err->gtt_offset), - err->size, - err->read_domains, - err->write_domain); - err_puts(m, tiling_flag(err->tiling)); - err_puts(m, dirty_flag(err->dirty)); - err_puts(m, purgeable_flag(err->purgeable)); - err_puts(m, err->userptr ? " userptr" : ""); - err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); - - if (err->name) - err_printf(m, " (name: %d)", err->name); - if (err->fence_reg != I915_FENCE_REG_NONE) - err_printf(m, " (fence: %d)", err->fence_reg); - - err_puts(m, "\n"); - err++; - } -} - static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { @@ -734,33 +765,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, error_print_engine(m, &error->engine[i], error->epoch); } - for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) { - char buf[128]; - int len, first = 1; - - if (!error->active_vm[i]) - break; - - len = scnprintf(buf, sizeof(buf), "Active ("); - for (j = 0; j < ARRAY_SIZE(error->engine); j++) { - if (error->engine[j].vm != error->active_vm[i]) - continue; - - len += scnprintf(buf + len, sizeof(buf), "%s%s", - first ? "" : ", ", - m->i915->engine[j]->name); - first = 0; - } - scnprintf(buf + len, sizeof(buf), ")"); - print_error_buffers(m, buf, - error->active_bo[i], - error->active_bo_count[i]); - } - - print_error_buffers(m, "Pinned (global)", - error->pinned_bo, - error->pinned_bo_count); - for (i = 0; i < ARRAY_SIZE(error->engine); i++) { const struct drm_i915_error_engine *ee = &error->engine[i]; @@ -974,10 +978,6 @@ void __i915_gpu_state_free(struct kref *error_ref) kfree(ee->requests); } - for (i = 0; i < ARRAY_SIZE(error->active_bo); i++) - kfree(error->active_bo[i]); - kfree(error->pinned_bo); - kfree(error->overlay); kfree(error->display); @@ -990,12 +990,12 @@ void __i915_gpu_state_free(struct kref *error_ref) static struct drm_i915_error_object * i915_error_object_create(struct drm_i915_private *i915, - struct i915_vma *vma) + struct i915_vma *vma, + struct compress *compress) { struct i915_ggtt *ggtt = &i915->ggtt; const u64 slot = ggtt->error_capture.start; struct drm_i915_error_object *dst; - struct compress compress; unsigned long num_pages; struct sgt_iter iter; dma_addr_t dma; @@ -1006,22 +1006,21 @@ i915_error_object_create(struct drm_i915_private *i915, num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), - GFP_ATOMIC | __GFP_NOWARN); + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL); if (!dst) return NULL; + if (!compress_start(compress)) { + kfree(dst); + return NULL; + } + dst->gtt_offset = vma->node.start; dst->gtt_size = vma->node.size; dst->num_pages = num_pages; dst->page_count = 0; dst->unused = 0; - if (!compress_init(&compress)) { - kfree(dst); - return NULL; - } - ret = -EINVAL; for_each_sgt_dma(dma, iter, vma->pages) { void __iomem *s; @@ -1029,69 +1028,23 @@ i915_error_object_create(struct drm_i915_private *i915, ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); - ret = compress_page(&compress, (void __force *)s, dst); + ret = compress_page(compress, (void __force *)s, dst); io_mapping_unmap_atomic(s); if (ret) break; } - if (ret || compress_flush(&compress, dst)) { + if (ret || compress_flush(compress, dst)) { while (dst->page_count--) - free_page((unsigned long)dst->pages[dst->page_count]); + pool_free(&compress->pool, dst->pages[dst->page_count]); kfree(dst); dst = NULL; } + compress_finish(compress); - compress_fini(&compress, dst); return dst; } -static void capture_bo(struct drm_i915_error_buffer *err, - struct i915_vma *vma) -{ - struct drm_i915_gem_object *obj = vma->obj; - - err->size = obj->base.size; - err->name = obj->base.name; - - err->gtt_offset = vma->node.start; - err->read_domains = obj->read_domains; - err->write_domain = obj->write_domain; - err->fence_reg = vma->fence ? vma->fence->id : -1; - err->tiling = i915_gem_object_get_tiling(obj); - err->dirty = obj->mm.dirty; - err->purgeable = obj->mm.madv != I915_MADV_WILLNEED; - err->userptr = obj->userptr.mm != NULL; - err->cache_level = obj->cache_level; -} - -static u32 capture_error_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head, - unsigned int flags) -#define ACTIVE_ONLY BIT(0) -#define PINNED_ONLY BIT(1) -{ - struct i915_vma *vma; - int i = 0; - - list_for_each_entry(vma, head, vm_link) { - if (!vma->obj) - continue; - - if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma)) - continue; - - if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma)) - continue; - - capture_bo(err++, vma); - if (++i == count) - break; - } - - return i; -} - /* * Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by @@ -1281,7 +1234,7 @@ static void engine_record_requests(struct intel_engine_cs *engine, if (!count) return; - ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC); + ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL); if (!ee->requests) return; @@ -1349,8 +1302,10 @@ static void record_context(struct drm_i915_error_context *e, e->active = atomic_read(&ctx->active_count); } -static void request_record_user_bo(struct i915_request *request, - struct drm_i915_error_engine *ee) +static void +request_record_user_bo(struct i915_request *request, + struct drm_i915_error_engine *ee, + struct compress *compress) { struct i915_capture_list *c; struct drm_i915_error_object **bo; @@ -1362,18 +1317,20 @@ static void request_record_user_bo(struct i915_request *request, if (!max) return; - bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC); + bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); if (!bo) { /* If we can't capture everything, try to capture something. */ max = min_t(long, max, PAGE_SIZE / sizeof(*bo)); - bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC); + bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); } if (!bo) return; count = 0; for (c = request->capture_list; c; c = c->next) { - bo[count] = i915_error_object_create(request->i915, c->vma); + bo[count] = i915_error_object_create(request->i915, + c->vma, + compress); if (!bo[count]) break; if (++count == max) @@ -1386,7 +1343,8 @@ static void request_record_user_bo(struct i915_request *request, static struct drm_i915_error_object * capture_object(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + struct compress *compress) { if (obj && i915_gem_object_has_pages(obj)) { struct i915_vma fake = { @@ -1396,13 +1354,14 @@ capture_object(struct drm_i915_private *dev_priv, .obj = obj, }; - return i915_error_object_create(dev_priv, &fake); + return i915_error_object_create(dev_priv, &fake, compress); } else { return NULL; } } -static void gem_record_rings(struct i915_gpu_state *error) +static void +gem_record_rings(struct i915_gpu_state *error, struct compress *compress) { struct drm_i915_private *i915 = error->i915; int i; @@ -1420,6 +1379,9 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->engine_id = i; + /* Refill our page pool before entering atomic section */ + pool_refill(&compress->pool, ALLOW_FAIL); + error_record_engine_registers(error, engine, ee); error_record_engine_execlists(engine, ee); @@ -1429,8 +1391,6 @@ static void gem_record_rings(struct i915_gpu_state *error) struct i915_gem_context *ctx = request->gem_context; struct intel_ring *ring = request->ring; - ee->vm = ctx->vm ?: &engine->gt->ggtt->vm; - record_context(&ee->context, ctx); /* We need to copy these to an anonymous buffer @@ -1438,17 +1398,21 @@ static void gem_record_rings(struct i915_gpu_state *error) * by userspace. */ ee->batchbuffer = - i915_error_object_create(i915, request->batch); + i915_error_object_create(i915, + request->batch, + compress); if (HAS_BROKEN_CS_TLB(i915)) ee->wa_batchbuffer = i915_error_object_create(i915, - engine->gt->scratch); - request_record_user_bo(request, ee); + engine->gt->scratch, + compress); + request_record_user_bo(request, ee, compress); ee->ctx = i915_error_object_create(i915, - request->hw_context->state); + request->hw_context->state, + compress); error->simulated |= i915_gem_context_no_error_capture(ctx); @@ -1460,7 +1424,9 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; ee->ringbuffer = - i915_error_object_create(i915, ring->vma); + i915_error_object_create(i915, + ring->vma, + compress); engine_record_requests(engine, request, ee); } @@ -1468,89 +1434,21 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->hws_page = i915_error_object_create(i915, - engine->status_page.vma); - - ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma); - - ee->default_state = capture_object(i915, engine->default_state); - } -} - -static void gem_capture_vm(struct i915_gpu_state *error, - struct i915_address_space *vm, - int idx) -{ - struct drm_i915_error_buffer *active_bo; - struct i915_vma *vma; - int count; - - count = 0; - list_for_each_entry(vma, &vm->bound_list, vm_link) - if (i915_vma_is_active(vma)) - count++; - - active_bo = NULL; - if (count) - active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC); - if (active_bo) - count = capture_error_bo(active_bo, - count, &vm->bound_list, - ACTIVE_ONLY); - else - count = 0; - - error->active_vm[idx] = vm; - error->active_bo[idx] = active_bo; - error->active_bo_count[idx] = count; -} - -static void capture_active_buffers(struct i915_gpu_state *error) -{ - int cnt = 0, i, j; - - BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo)); - BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm)); - BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count)); - - /* Scan each engine looking for unique active contexts/vm */ - for (i = 0; i < ARRAY_SIZE(error->engine); i++) { - struct drm_i915_error_engine *ee = &error->engine[i]; - bool found; + engine->status_page.vma, + compress); - if (!ee->vm) - continue; + ee->wa_ctx = + i915_error_object_create(i915, + engine->wa_ctx.vma, + compress); - found = false; - for (j = 0; j < i && !found; j++) - found = error->engine[j].vm == ee->vm; - if (!found) - gem_capture_vm(error, ee->vm, cnt++); + ee->default_state = + capture_object(i915, engine->default_state, compress); } } -static void capture_pinned_buffers(struct i915_gpu_state *error) -{ - struct i915_address_space *vm = &error->i915->ggtt.vm; - struct drm_i915_error_buffer *bo; - struct i915_vma *vma; - int count; - - count = 0; - list_for_each_entry(vma, &vm->bound_list, vm_link) - count++; - - bo = NULL; - if (count) - bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC); - if (!bo) - return; - - error->pinned_bo_count = - capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY); - error->pinned_bo = bo; -} - -static void capture_uc_state(struct i915_gpu_state *error) +static void +capture_uc_state(struct i915_gpu_state *error, struct compress *compress) { struct drm_i915_private *i915 = error->i915; struct i915_error_uc *error_uc = &error->uc; @@ -1567,9 +1465,11 @@ static void capture_uc_state(struct i915_gpu_state *error) * As modparams are generally accesible from the userspace make * explicit copies of the firmware paths. */ - error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, GFP_ATOMIC); - error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, GFP_ATOMIC); - error_uc->guc_log = i915_error_object_create(i915, uc->guc.log.vma); + error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); + error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); + error_uc->guc_log = i915_error_object_create(i915, + uc->guc.log.vma, + compress); } /* Capture all registers which don't fit into another category. */ @@ -1753,56 +1653,53 @@ static void capture_finish(struct i915_gpu_state *error) ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); } -static int capture(void *data) -{ - struct i915_gpu_state *error = data; - - error->time = ktime_get_real(); - error->boottime = ktime_get_boottime(); - error->uptime = ktime_sub(ktime_get(), - error->i915->gt.last_init_time); - error->capture = jiffies; - - capture_params(error); - capture_gen_state(error); - capture_uc_state(error); - capture_reg_state(error); - gem_record_fences(error); - gem_record_rings(error); - capture_active_buffers(error); - capture_pinned_buffers(error); - - error->overlay = intel_overlay_capture_error_state(error->i915); - error->display = intel_display_capture_error_state(error->i915); - - error->epoch = capture_find_epoch(error); - - capture_finish(error); - return 0; -} - #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) struct i915_gpu_state * i915_capture_gpu_state(struct drm_i915_private *i915) { struct i915_gpu_state *error; + struct compress compress; /* Check if GPU capture has been disabled */ error = READ_ONCE(i915->gpu_error.first_error); if (IS_ERR(error)) return error; - error = kzalloc(sizeof(*error), GFP_ATOMIC); + error = kzalloc(sizeof(*error), ALLOW_FAIL); if (!error) { i915_disable_error_state(i915, -ENOMEM); return ERR_PTR(-ENOMEM); } + if (!compress_init(&compress)) { + kfree(error); + i915_disable_error_state(i915, -ENOMEM); + return ERR_PTR(-ENOMEM); + } + kref_init(&error->ref); error->i915 = i915; - stop_machine(capture, error, NULL); + error->time = ktime_get_real(); + error->boottime = ktime_get_boottime(); + error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time); + error->capture = jiffies; + + capture_params(error); + capture_gen_state(error); + capture_uc_state(error, &compress); + capture_reg_state(error); + gem_record_fences(error); + gem_record_rings(error, &compress); + + error->overlay = intel_overlay_capture_error_state(i915); + error->display = intel_display_capture_error_state(i915); + + error->epoch = capture_find_epoch(error); + + capture_finish(error); + compress_fini(&compress); return error; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 85f06bc5da05..a24c35107d16 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -85,7 +85,6 @@ struct i915_gpu_state { /* Software tracked state */ bool idle; unsigned long hangcheck_timestamp; - struct i915_address_space *vm; int num_requests; u32 reset_count; @@ -161,22 +160,6 @@ struct i915_gpu_state { } vm_info; } engine[I915_NUM_ENGINES]; - struct drm_i915_error_buffer { - u32 size; - u32 name; - u64 gtt_offset; - u32 read_domains; - u32 write_domain; - s32 fence_reg:I915_MAX_NUM_FENCE_BITS; - u32 tiling:2; - u32 dirty:1; - u32 purgeable:1; - u32 userptr:1; - u32 cache_level:3; - } *active_bo[I915_NUM_ENGINES], *pinned_bo; - u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; - struct i915_address_space *active_vm[I915_NUM_ENGINES]; - struct scatterlist *sgl, *fit; }; -- cgit v1.2.3 From aed7450b153666074d1e1d6c38f6a1ff14c7f80c Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Fri, 19 Jul 2019 11:25:13 +0530 Subject: drm/i915: Add HDCP capability info to i915_display_info. To identify the HDCP capability of the display connected to CI systems, we need to add the hdcp capability probing in i915_display_info. This will also help to populate the HDCP capability of the CI systems to CI H/W logs maintained at https://intel-gfx-ci.01.org/hardware/. It will facilitate to determine the kms_content_protection behavior on a particular CI system. v2: Reused the intel_hdcp_info() in i915_hdcp_sink_capability_show(). [Ram] Shifted intel_hdcp_info() to the end of intel_dp_info. [Ram] v3: used seq_puts() instead of seq_pritnf(). [Ram] Cc: daniel.vetter@intel.com Cc: ramalingam.c@intel.com Signed-off-by: Anshuman Gupta Reviewed-by: Ramalingam C Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190719055513.2089-1-anshuman.gupta@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 40 ++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6b84d04a6a28..6d3911469801 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2569,6 +2569,25 @@ static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) intel_seq_print_mode(m, 2, mode); } +static void intel_hdcp_info(struct seq_file *m, + struct intel_connector *intel_connector) +{ + bool hdcp_cap, hdcp2_cap; + + hdcp_cap = intel_hdcp_capable(intel_connector); + hdcp2_cap = intel_hdcp2_capable(intel_connector); + + if (hdcp_cap) + seq_puts(m, "HDCP1.4 "); + if (hdcp2_cap) + seq_puts(m, "HDCP2.2 "); + + if (!hdcp_cap && !hdcp2_cap) + seq_puts(m, "None"); + + seq_puts(m, "\n"); +} + static void intel_dp_info(struct seq_file *m, struct intel_connector *intel_connector) { @@ -2582,6 +2601,10 @@ static void intel_dp_info(struct seq_file *m, drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, &intel_dp->aux); + if (intel_connector->hdcp.shim) { + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + } } static void intel_dp_mst_info(struct seq_file *m, @@ -2605,6 +2628,10 @@ static void intel_hdmi_info(struct seq_file *m, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); + if (intel_connector->hdcp.shim) { + seq_puts(m, "\tHDCP version: "); + intel_hdcp_info(m, intel_connector); + } } static void intel_lvds_info(struct seq_file *m, @@ -4528,7 +4555,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; struct intel_connector *intel_connector = to_intel_connector(connector); - bool hdcp_cap, hdcp2_cap; if (connector->status != connector_status_connected) return -ENODEV; @@ -4539,17 +4565,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); - hdcp_cap = intel_hdcp_capable(intel_connector); - hdcp2_cap = intel_hdcp2_capable(intel_connector); - - if (hdcp_cap) - seq_puts(m, "HDCP1.4 "); - if (hdcp2_cap) - seq_puts(m, "HDCP2.2 "); - - if (!hdcp_cap && !hdcp2_cap) - seq_puts(m, "None"); - seq_puts(m, "\n"); + intel_hdcp_info(m, intel_connector); return 0; } -- cgit v1.2.3 From 3fcba88188a85a398c2c9245d148c952e2d53889 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 23 Jul 2019 10:14:03 +0100 Subject: drm/i915/uc: Gt-fy uc reset This was the last place in gt/uc that was still using I915_READ with the global dev_priv. Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190723091404.6449-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 5ebb0a534718..4480a3dc2449 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -37,17 +37,17 @@ static void guc_free_load_err_log(struct intel_guc *guc); */ static int __intel_uc_reset_hw(struct intel_uc *uc) { - struct drm_i915_private *dev_priv = uc_to_gt(uc)->i915; + struct intel_gt *gt = uc_to_gt(uc); int ret; u32 guc_status; - ret = intel_reset_guc(&dev_priv->gt); + ret = intel_reset_guc(gt); if (ret) { DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); return ret; } - guc_status = I915_READ(GUC_STATUS); + guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); WARN(!(guc_status & GS_MIA_IN_RESET), "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status); -- cgit v1.2.3 From de6a263400f232a3787bdb112d9c974ebd29c4b4 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 23 Jul 2019 10:14:04 +0100 Subject: drm/i915/uc: Sanitize uC when GT is sanitized The microcontrollers are part of GT so it makes logical sense to have them sanitized at the same time. This also fixed an issue with our status tracking where the FW load status is not reset around hibernation. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190723091404.6449-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 1 - drivers/gpu/drm/i915/gt/intel_gt_pm.c | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 8faf262278ae..b5561cbdc5ea 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -239,7 +239,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - intel_uc_sanitize(&i915->gt.uc); i915_gem_sanitize(i915); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 61ed912341f1..65c0d0c9d543 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -118,6 +118,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) GEM_TRACE("\n"); + intel_uc_sanitize(>->uc); + if (!reset_engines(gt) && !force) return; -- cgit v1.2.3 From d8bf0e7627e6e887bd0ed3707216e0e69ec95710 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Jul 2019 10:58:00 +0100 Subject: drm/i915/selftests: Let igt_vma_partial et al breathe Give the scheduler a chance to breathe by calling cond_resched() as some of the loops may take some time on slower machines, and so catch the attention of the watchdogs. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111196 Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190723095800.2820-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_vma.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index fbc79b14823a..a5bec0a4cdcc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -193,6 +193,8 @@ static int igt_vma_create(void *arg) list_del_init(&ctx->link); mock_context_close(ctx); } + + cond_resched(); } end: @@ -341,6 +343,8 @@ static int igt_vma_pin1(void *arg) goto out; } } + + cond_resched(); } err = 0; @@ -597,6 +601,8 @@ static int igt_vma_rotate_remap(void *arg) } i915_vma_unpin(vma); + + cond_resched(); } } } @@ -752,6 +758,8 @@ static int igt_vma_partial(void *arg) i915_vma_unpin(vma); nvma++; + + cond_resched(); } } @@ -961,6 +969,8 @@ static int igt_vma_remapped_gtt(void *arg) } } i915_vma_unpin_iomap(vma); + + cond_resched(); } } -- cgit v1.2.3 From 14f8a0eb02531bdeca723af123be3e20c4aa1045 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Jul 2019 10:12:18 +0100 Subject: drm/i915: Squelch nop wait-for-idle trace If the system is already idle, omit the GEM_TRACE saying we are about to wait for idle. It looks confusing in the logs to see a continual stream of wait-for-idle, as one immediately assumes it is stuck in a loop. Signed-off-by: Chris Wilson Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20190723091218.5886-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c6ba350e6e4f..01dd0d1d9bf6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -951,15 +951,15 @@ wait_for_timelines(struct drm_i915_private *i915, int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags, long timeout) { + /* If the device is asleep, we have no requests outstanding */ + if (!READ_ONCE(i915->gt.awake)) + return 0; + GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n", flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "", yesno(i915->gt.awake)); - /* If the device is asleep, we have no requests outstanding */ - if (!READ_ONCE(i915->gt.awake)) - return 0; - timeout = wait_for_timelines(i915, flags, timeout); if (timeout < 0) return timeout; -- cgit v1.2.3 From 44c1ee17644c822842782c5e33bf2df19b67df03 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 23 Jul 2019 08:37:33 -0700 Subject: drm/i915/huc: fix status check Fix botched refactoring of the code that uncorrectly split a check on a bool, treating it as a u32. Reported-by: Dan Carpenter Fixes: 84b1ca2f0e68 ("drm/i915/uc: prefer intel_gt over i915 in GuC/HuC paths") Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190723153733.19401-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index bc14439173d7..ab6c1564b6a7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -162,7 +162,7 @@ int intel_huc_check_status(struct intel_huc *huc) { struct intel_gt *gt = huc_to_gt(huc); intel_wakeref_t wakeref; - bool status = false; + u32 status = 0; if (!intel_uc_is_using_huc(>->uc)) return -ENODEV; -- cgit v1.2.3 From 906339a95e5a35f01cdf0430fca6dd526870a5ae Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 23 Jul 2019 18:39:16 +0800 Subject: drm/i915: Use dev_get_drvdata Instead of using to_pci_dev + pci_get_drvdata, use dev_get_drvdata to make code simpler. Signed-off-by: Chuhong Yuan Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190723103915.3964-1-hslester96@gmail.com --- drivers/gpu/drm/i915/i915_drv.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8c81a35f832d..7d0813150146 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2395,8 +2395,7 @@ static int i915_resume_switcheroo(struct drm_device *dev) static int i915_pm_prepare(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); if (!dev) { dev_err(kdev, "DRM not initialized, aborting suspend.\n"); @@ -2411,8 +2410,7 @@ static int i915_pm_prepare(struct device *kdev) static int i915_pm_suspend(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); if (!dev) { dev_err(kdev, "DRM not initialized, aborting suspend.\n"); @@ -2906,8 +2904,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int intel_runtime_suspend(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); struct drm_i915_private *dev_priv = to_i915(dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret; @@ -3005,8 +3002,7 @@ static int intel_runtime_suspend(struct device *kdev) static int intel_runtime_resume(struct device *kdev) { - struct pci_dev *pdev = to_pci_dev(kdev); - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_device *dev = dev_get_drvdata(kdev); struct drm_i915_private *dev_priv = to_i915(dev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret = 0; -- cgit v1.2.3 From 92e6475ae0a0383b012eb21c1aaf0e5456b1a3d9 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 3 Jul 2019 10:02:39 -0400 Subject: drm/amd/display: Set enabled to false at start of audio disable [Why] In an effort to stop redundant calls to dce110_disable_audio_stream the audio->enabled flag was added to the audio resource struct. While this state probably shouldn't have been tracked on the audio struct itself it still works fine for some sequences. However, it does not work for cases where we're freeing the audio resource (such as hotplugs) or when dynamic audio is enabled. In these cases the pipe_ctx->stream_res.audio = NULL before we can set audio->enabled = false. The next time we acquire the audio resource such as on hotplug the audio will not be enabled for the stream since DC thinks it's still enabled. Audio state tracking should cover this sequence. [How] Set audio->enabled = false at the start as long as we have pipe_ctx->stream_res.audio. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Zhan Liu Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 8005989c1263..fafb4b470140 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1021,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); if (pipe_ctx->stream_res.audio) { + pipe_ctx->stream_res.audio->enabled = false; + if (dc->res_pool->pp_smu) pp_smu = dc->res_pool->pp_smu; @@ -1051,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) /* dal_audio_disable_azalia_audio_jack_presence(stream->audio, * stream->stream_engine_id); */ - if (pipe_ctx->stream_res.audio) - pipe_ctx->stream_res.audio->enabled = false; } } -- cgit v1.2.3 From 2bf8fb39eb70b6cf1d0f7efbe9fa9fb27d55fdf4 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 09:58:49 +0100 Subject: drm/i915/guc: Set GuC init params only once All the GuC objects are perma-pinned, so their offset can't change at runtime. We can therefore set (and log!) the parameters only once during boot. Suggested-by: Chris Wilson Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Michal Wajdeczko Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190724085849.18047-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 143 ++++++++++++++++++--------------- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 2 +- 3 files changed, 83 insertions(+), 67 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 83f2c197375f..1ea6a9e50c02 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -126,66 +126,6 @@ static void guc_shared_data_destroy(struct intel_guc *guc) i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); } -int intel_guc_init(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - int ret; - - ret = intel_uc_fw_init(&guc->fw); - if (ret) - goto err_fetch; - - ret = guc_shared_data_create(guc); - if (ret) - goto err_fw; - GEM_BUG_ON(!guc->shared_data); - - ret = intel_guc_log_create(&guc->log); - if (ret) - goto err_shared; - - ret = intel_guc_ads_create(guc); - if (ret) - goto err_log; - GEM_BUG_ON(!guc->ads_vma); - - ret = intel_guc_ct_init(&guc->ct); - if (ret) - goto err_ads; - - /* We need to notify the guc whenever we change the GGTT */ - i915_ggtt_enable_guc(gt->ggtt); - - return 0; - -err_ads: - intel_guc_ads_destroy(guc); -err_log: - intel_guc_log_destroy(&guc->log); -err_shared: - guc_shared_data_destroy(guc); -err_fw: - intel_uc_fw_fini(&guc->fw); -err_fetch: - intel_uc_fw_cleanup_fetch(&guc->fw); - return ret; -} - -void intel_guc_fini(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - i915_ggtt_disable_guc(gt->ggtt); - - intel_guc_ct_fini(&guc->ct); - - intel_guc_ads_destroy(guc); - intel_guc_log_destroy(&guc->log); - guc_shared_data_destroy(guc); - intel_uc_fw_fini(&guc->fw); - intel_uc_fw_cleanup_fetch(&guc->fw); -} - static u32 guc_ctl_debug_flags(struct intel_guc *guc) { u32 level = intel_guc_log_get_level(&guc->log); @@ -281,13 +221,12 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc) * transfer. These parameters are read by the firmware on startup * and cannot be changed thereafter. */ -void intel_guc_init_params(struct intel_guc *guc) +static void guc_init_params(struct intel_guc *guc) { - struct intel_uncore *uncore = guc_to_gt(guc)->uncore; - u32 params[GUC_CTL_MAX_DWORDS]; + u32 *params = guc->params; int i; - memset(params, 0, sizeof(params)); + BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc); params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); @@ -297,6 +236,17 @@ void intel_guc_init_params(struct intel_guc *guc) for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]); +} + +/* + * Initialise the GuC parameter block before starting the firmware + * transfer. These parameters are read by the firmware on startup + * and cannot be changed thereafter. + */ +void intel_guc_write_params(struct intel_guc *guc) +{ + struct intel_uncore *uncore = guc_to_gt(guc)->uncore; + int i; /* * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and @@ -308,11 +258,74 @@ void intel_guc_init_params(struct intel_guc *guc) intel_uncore_write(uncore, SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) - intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), params[i]); + intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]); intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER); } +int intel_guc_init(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + int ret; + + ret = intel_uc_fw_init(&guc->fw); + if (ret) + goto err_fetch; + + ret = guc_shared_data_create(guc); + if (ret) + goto err_fw; + GEM_BUG_ON(!guc->shared_data); + + ret = intel_guc_log_create(&guc->log); + if (ret) + goto err_shared; + + ret = intel_guc_ads_create(guc); + if (ret) + goto err_log; + GEM_BUG_ON(!guc->ads_vma); + + ret = intel_guc_ct_init(&guc->ct); + if (ret) + goto err_ads; + + /* now that everything is perma-pinned, initialize the parameters */ + guc_init_params(guc); + + /* We need to notify the guc whenever we change the GGTT */ + i915_ggtt_enable_guc(gt->ggtt); + + return 0; + +err_ads: + intel_guc_ads_destroy(guc); +err_log: + intel_guc_log_destroy(&guc->log); +err_shared: + guc_shared_data_destroy(guc); +err_fw: + intel_uc_fw_fini(&guc->fw); +err_fetch: + intel_uc_fw_cleanup_fetch(&guc->fw); + return ret; +} + +void intel_guc_fini(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + i915_ggtt_disable_guc(gt->ggtt); + + intel_guc_ct_fini(&guc->ct); + + intel_guc_ads_destroy(guc); + intel_guc_log_destroy(&guc->log); + guc_shared_data_destroy(guc); + intel_uc_fw_fini(&guc->fw); + intel_uc_fw_cleanup_fetch(&guc->fw); +} + int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 6852352381ce..ac6333ad7102 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -76,6 +76,9 @@ struct intel_guc { /* Cyclic counter mod pagesize */ u32 db_cacheline; + /* Control params for fw initialization */ + u32 params[GUC_CTL_MAX_DWORDS]; + /* GuC's FW specific registers used in MMIO send */ struct { u32 base; @@ -152,7 +155,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc, void intel_guc_init_early(struct intel_guc *guc); void intel_guc_init_send_regs(struct intel_guc *guc); -void intel_guc_init_params(struct intel_guc *guc); +void intel_guc_write_params(struct intel_guc *guc); int intel_guc_init(struct intel_guc *guc); void intel_guc_fini(struct intel_guc *guc); int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 4480a3dc2449..25a8ab3bd22c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -492,7 +492,7 @@ int intel_uc_init_hw(struct intel_uc *uc) } intel_guc_ads_reset(guc); - intel_guc_init_params(guc); + intel_guc_write_params(guc); ret = intel_guc_fw_upload(guc); if (ret == 0) break; -- cgit v1.2.3 From 298f78895b081911e0b3605f07d79ebd3d4cf7b0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:43:34 +0100 Subject: iommu/io-pgtable: Rename iommu_gather_ops to iommu_flush_ops In preparation for TLB flush gathering in the IOMMU API, rename the iommu_gather_ops structure in io-pgtable to iommu_flush_ops, which better describes its purpose and avoids the potential for confusion between different levels of the API. $ find linux/ -type f -name '*.[ch]' | xargs sed -i 's/gather_ops/flush_ops/g' Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 4 ++-- drivers/iommu/arm-smmu.c | 8 ++++---- drivers/iommu/io-pgtable-arm-v7s.c | 2 +- drivers/iommu/io-pgtable-arm.c | 2 +- drivers/iommu/ipmmu-vmsa.c | 4 ++-- drivers/iommu/msm_iommu.c | 4 ++-- drivers/iommu/mtk_iommu.c | 4 ++-- drivers/iommu/qcom_iommu.c | 4 ++-- include/linux/io-pgtable.h | 6 +++--- 10 files changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 92ac995dd9c6..17bceb11e708 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -257,7 +257,7 @@ static void mmu_tlb_sync_context(void *cookie) // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X } -static const struct iommu_gather_ops mmu_tlb_ops = { +static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_add_flush = mmu_tlb_inv_range_nosync, .tlb_sync = mmu_tlb_sync_context, diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index a9a9fabd3968..7e137e1e28f1 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1603,7 +1603,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } -static const struct iommu_gather_ops arm_smmu_gather_ops = { +static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync, @@ -1796,7 +1796,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) .ias = ias, .oas = oas, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY, - .tlb = &arm_smmu_gather_ops, + .tlb = &arm_smmu_flush_ops, .iommu_dev = smmu->dev, }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 64977c131ee6..dc08db347ef3 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -251,7 +251,7 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - const struct iommu_gather_ops *tlb_ops; + const struct iommu_flush_ops *tlb_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; bool non_strict; @@ -547,19 +547,19 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); } -static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { +static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; -static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = { +static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index a62733c6a632..116f97ee991e 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -817,7 +817,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static const struct iommu_gather_ops dummy_tlb_ops = { +static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 0d6633921c1e..402f913b6f6d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -1081,7 +1081,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static const struct iommu_gather_ops dummy_tlb_ops __initconst = { +static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ad0098c0c87c..2c14a2c65b22 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -367,7 +367,7 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, /* The hardware doesn't support selective TLB flush. */ } -static const struct iommu_gather_ops ipmmu_gather_ops = { +static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, @@ -480,7 +480,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.ias = 32; domain->cfg.oas = 40; - domain->cfg.tlb = &ipmmu_gather_ops; + domain->cfg.tlb = &ipmmu_flush_ops; domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); domain->io_domain.geometry.force_aperture = true; /* diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b25e2eb9e038..8b602384a385 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -178,7 +178,7 @@ static void __flush_iotlb_sync(void *cookie) */ } -static const struct iommu_gather_ops msm_iommu_gather_ops = { +static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, .tlb_add_flush = __flush_iotlb_range, .tlb_sync = __flush_iotlb_sync, @@ -345,7 +345,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv) .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 32, - .tlb = &msm_iommu_gather_ops, + .tlb = &msm_iommu_flush_ops, .iommu_dev = priv->dev, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 82e4be4dfdaf..fed77658d67e 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -188,7 +188,7 @@ static void mtk_iommu_tlb_sync(void *cookie) } } -static const struct iommu_gather_ops mtk_iommu_gather_ops = { +static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, .tlb_sync = mtk_iommu_tlb_sync, @@ -267,7 +267,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 32, - .tlb = &mtk_iommu_gather_ops, + .tlb = &mtk_iommu_flush_ops, .iommu_dev = data->dev, }; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 34d0b9783b3e..fd9d9f4da735 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -164,7 +164,7 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, } } -static const struct iommu_gather_ops qcom_gather_ops = { +static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, .tlb_sync = qcom_iommu_tlb_sync, @@ -215,7 +215,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, .ias = 32, .oas = 40, - .tlb = &qcom_gather_ops, + .tlb = &qcom_flush_ops, .iommu_dev = qcom_iommu->dev, }; diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index b5a450a3bb47..6292ea15d674 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -17,7 +17,7 @@ enum io_pgtable_fmt { }; /** - * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. + * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. * * @tlb_flush_all: Synchronously invalidate the entire TLB context. * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. @@ -28,7 +28,7 @@ enum io_pgtable_fmt { * Note that these can all be called in atomic context and must therefore * not block. */ -struct iommu_gather_ops { +struct iommu_flush_ops { void (*tlb_flush_all)(void *cookie); void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie); @@ -84,7 +84,7 @@ struct io_pgtable_cfg { unsigned int ias; unsigned int oas; bool coherent_walk; - const struct iommu_gather_ops *tlb; + const struct iommu_flush_ops *tlb; struct device *iommu_dev; /* Low-level data specific to the table format */ -- cgit v1.2.3 From 702668e606c0618fd62c7a5d051c9faee256c049 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:06 -0700 Subject: drm/i915/uc: Unify uC platform check We have several HAS_* checks for GuC and HuC but we mostly use HAS_GUC and HAS_HUC, with only 1 exception. Since our HW always has either both uC or neither of them, just replace all the checks with a unified HAS_UC. v2: use HAS_GT_UC (Michal) v3: fix comment (Michal) Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Acked-by: Chris Wilson Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 2 +- drivers/gpu/drm/i915/gt/uc/selftest_guc.c | 4 ++-- drivers/gpu/drm/i915/i915_debugfs.c | 6 +++--- drivers/gpu/drm/i915/i915_drv.h | 15 ++------------- drivers/gpu/drm/i915/i915_gpu_error.c | 4 ++-- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/i915_pci.c | 4 ++-- drivers/gpu/drm/i915/intel_device_info.h | 2 +- drivers/gpu/drm/i915/intel_pm.c | 4 ++-- drivers/gpu/drm/i915/intel_wopcm.c | 4 ++-- 13 files changed, 21 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 55e2ddcbd215..98c071fe532b 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -595,7 +595,7 @@ int intel_reset_guc(struct intel_gt *gt) INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC; int ret; - GEM_BUG_ON(!HAS_GUC(gt->i915)); + GEM_BUG_ON(!HAS_GT_UC(gt->i915)); intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); ret = gen6_hw_domain_reset(gt, guc_domain); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 3dfa40fdbe99..87169e826747 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -80,7 +80,7 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - if (!HAS_GUC(i915)) { + if (!HAS_GT_UC(i915)) { guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; return; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 543854c42d9d..ff6f7b157ecb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -74,7 +74,7 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - if (!HAS_HUC(dev_priv)) { + if (!HAS_GT_UC(dev_priv)) { huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; return; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 25a8ab3bd22c..bdb171c3f36e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -61,7 +61,7 @@ static int __get_platform_enable_guc(struct intel_uc *uc) struct intel_uc_fw *huc_fw = &uc->huc.fw; int enable_guc = 0; - if (!HAS_GUC(uc_to_gt(uc)->i915)) + if (!HAS_GT_UC(uc_to_gt(uc)->i915)) return 0; /* We don't want to enable GuC/HuC on pre-Gen11 by default */ diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index 93f7c930ab18..371f7a60c987 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -134,7 +134,7 @@ static int igt_guc_clients(void *args) struct intel_guc *guc; int err = 0; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GT_UC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); @@ -226,7 +226,7 @@ static int igt_guc_doorbells(void *arg) int i, err = 0; u16 db_id; - GEM_BUG_ON(!HAS_GUC(dev_priv)); + GEM_BUG_ON(!HAS_GT_UC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6d3911469801..24787bb48c9f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1865,7 +1865,7 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; struct drm_printer p; - if (!HAS_HUC(dev_priv)) + if (!HAS_GT_UC(dev_priv)) return -ENODEV; p = drm_seq_file_printer(m); @@ -1883,7 +1883,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; struct drm_printer p; - if (!HAS_GUC(dev_priv)) + if (!HAS_GT_UC(dev_priv)) return -ENODEV; p = drm_seq_file_printer(m); @@ -2062,7 +2062,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) u32 *log; int i = 0; - if (!HAS_GUC(dev_priv)) + if (!HAS_GT_UC(dev_priv)) return -ENODEV; if (dump_load_err) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1a58fb8c88c2..364a9fb543b6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2271,20 +2271,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) -/* - * For now, anything with a GuC requires uCode loading, and then supports - * command submission once loaded. But these are logically independent - * properties, so we have separate macros to test them. - */ -#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc) -#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) -#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv)) - -/* For now, anything with a GuC has also HuC */ -#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv)) -#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv)) +#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) -/* Having a GuC is not the same as using a GuC */ +/* Having GuC/HuC is not the same as using GuC/HuC */ #define USES_GUC(dev_priv) intel_uc_is_using_guc(&(dev_priv)->gt.uc) #define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc) #define USES_HUC(dev_priv) intel_uc_is_using_huc(&(dev_priv)->gt.uc) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2193687eac72..56dfc2650836 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -651,7 +651,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m, const struct i915_gpu_state *error = container_of(error_uc, typeof(*error), uc); - if (!error->device_info.has_guc) + if (!error->device_info.has_gt_uc) return; intel_uc_fw_dump(&error_uc->guc_fw, &p); @@ -1455,7 +1455,7 @@ capture_uc_state(struct i915_gpu_state *error, struct compress *compress) struct intel_uc *uc = &i915->gt.uc; /* Capturing uC state won't be useful if there is no GuC */ - if (!error->device_info.has_guc) + if (!error->device_info.has_gt_uc) return; error_uc->guc_fw = uc->guc.fw; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 11c73af92597..a17d4fd17962 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -4766,7 +4766,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev_priv->l3_parity.remap_info[i] = NULL; /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ - if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11) + if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; /* Let's track the enabled rps events */ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 40076ba431d4..bd9211b3d76e 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -595,7 +595,7 @@ static const struct intel_device_info intel_cherryview_info = { GEN9_DEFAULT_PAGE_SIZES, \ .has_logical_ring_preemption = 1, \ .display.has_csr = 1, \ - .has_guc = 1, \ + .has_gt_uc = 1, \ .display.has_ipc = 1, \ .ddb_size = 896 @@ -647,7 +647,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ .has_logical_ring_preemption = 1, \ - .has_guc = 1, \ + .has_gt_uc = 1, \ .ppgtt_type = INTEL_PPGTT_FULL, \ .ppgtt_size = 48, \ .has_reset_engine = 1, \ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 45a9badc9b8e..4f58e8d71b67 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -112,7 +112,7 @@ enum intel_ppgtt_type { func(gpu_reset_clobbers_display); \ func(has_reset_engine); \ func(has_fpga_dbg); \ - func(has_guc); \ + func(has_gt_uc); \ func(has_l3_dpf); \ func(has_llc); \ func(has_logical_ring_contexts); \ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 22472f2bd31b..30399b245f07 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7162,7 +7162,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC(dev_priv)) + if (HAS_GT_UC(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -7243,7 +7243,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) for_each_engine(engine, dev_priv, id) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - if (HAS_GUC(dev_priv)) + if (HAS_GT_UC(dev_priv)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); I915_WRITE(GEN6_RC_SLEEP, 0); diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index fafd4e6a1147..0e86a9e85b49 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -74,7 +74,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - if (!HAS_GUC(i915)) + if (!HAS_GT_UC(i915)) return; if (INTEL_GEN(i915) >= 11) @@ -263,7 +263,7 @@ int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt) if (!USES_GUC(i915)) return 0; - GEM_BUG_ON(!HAS_GUC(i915)); + GEM_BUG_ON(!HAS_GT_UC(i915)); GEM_BUG_ON(!wopcm->guc.size); GEM_BUG_ON(!wopcm->guc.base); -- cgit v1.2.3 From 305ceebd5284a3a6cbeb0f67f1f839cd92be1847 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:07 -0700 Subject: drm/i915: Fix handling of non-supported uC There are 2 issues around handling of missing uC support: - We treat lack of uC HW and lack of uC FW definition as 2 different cases, but both of them mean that we don't support the uC on the platform we're running on. - We rely on the modparam to decide if we can take uC paths or not, but we don't sanitize it if it is set incorrectly on platform with no uC support. To fix both of them, unify the 2 cases in a single one and sanitize the modparam on invalid configuration (after printing an error message). The log has been adapted as well, since the user doesn't care why we don't support GuC/HuC (no HW or no FW), just that we do not. Developers can easily find the answer based on the platform, so we can simplify the log. Correcting the modparam has been preferred over failing the load since this is what we usually do for non-supported feature (e.g. the now gone enable_ppgtt would fall back to the highest supported PPGTT mode if the selected one was not available). Note that this patch purposely doesn't change the behavior for platforms that do have uC support, in which case we will still fail if enable_guc is set and the firmware is not available on the system. Suggested-by: Michal Wajdeczko Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-3-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 11 ++++----- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 11 ++++----- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 37 ++++++++++++++++--------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 8 ------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 5 ----- 5 files changed, 31 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 87169e826747..17ce78240cf8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -80,12 +80,10 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - if (!HAS_GT_UC(i915)) { - guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - return; - } + guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; + if (!HAS_GT_UC(i915)) + return; if (i915_modparams.guc_firmware_path) { guc_fw->path = i915_modparams.guc_firmware_path; @@ -112,6 +110,9 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR; guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR; } + + if (guc_fw->path) + guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index ff6f7b157ecb..c3a7bd57fb55 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -74,12 +74,10 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - if (!HAS_GT_UC(dev_priv)) { - huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - return; - } + huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; + if (!HAS_GT_UC(dev_priv)) + return; if (i915_modparams.huc_firmware_path) { huc_fw->path = i915_modparams.huc_firmware_path; @@ -106,6 +104,9 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR; huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR; } + + if (huc_fw->path) + huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index bdb171c3f36e..3f672ea7456b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -68,7 +68,7 @@ static int __get_platform_enable_guc(struct intel_uc *uc) if (INTEL_GEN(uc_to_gt(uc)->i915) < 11) return 0; - if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw)) + if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw)) enable_guc |= ENABLE_GUC_LOAD_HUC; return enable_guc; @@ -123,26 +123,28 @@ static void sanitize_options_early(struct intel_uc *uc) yesno(intel_uc_is_using_huc(uc))); /* Verify GuC firmware availability */ - if (intel_uc_is_using_guc(uc) && !intel_uc_fw_is_selected(guc_fw)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - !intel_uc_fw_supported(guc_fw) ? - "no GuC hardware" : "no GuC firmware"); + if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) { + DRM_WARN("Incompatible option detected: enable_guc=%d, " + "but GuC is not supported!\n", + i915_modparams.enable_guc); + DRM_INFO("Disabling GuC/HuC loading!\n"); + i915_modparams.enable_guc = 0; } /* Verify HuC firmware availability */ - if (intel_uc_is_using_huc(uc) && !intel_uc_fw_is_selected(huc_fw)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - !intel_uc_fw_supported(huc_fw) ? - "no HuC hardware" : "no HuC firmware"); + if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) { + DRM_WARN("Incompatible option detected: enable_guc=%d, " + "but HuC is not supported!\n", + i915_modparams.enable_guc); + DRM_INFO("Disabling HuC loading!\n"); + i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC; } /* XXX: GuC submission is unavailable for now */ if (intel_uc_is_using_guc_submission(uc)) { - DRM_INFO("Incompatible option detected: %s=%d, %s!\n", - "enable_guc", i915_modparams.enable_guc, - "GuC submission not supported"); + DRM_INFO("Incompatible option detected: enable_guc=%d, " + "but GuC submission is not supported!\n", + i915_modparams.enable_guc); DRM_INFO("Switching to non-GuC submission mode!\n"); i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; } @@ -153,10 +155,9 @@ static void sanitize_options_early(struct intel_uc *uc) __get_default_guc_log_level(uc); if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(uc)) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "guc_log_level", i915_modparams.guc_log_level, - !intel_uc_fw_supported(guc_fw) ? - "no GuC hardware" : "GuC not enabled"); + DRM_WARN("Incompatible option detected: guc_log_level=%d, " + "but GuC is not enabled!\n", + i915_modparams.guc_log_level); i915_modparams.guc_log_level = 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 8ce7210907c0..432b632b04c0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -49,14 +49,6 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, GEM_BUG_ON(!intel_uc_fw_supported(uc_fw)); - if (!uc_fw->path) { - dev_info(dev_priv->drm.dev, - "%s: No firmware was defined for %s!\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_platform_name(INTEL_INFO(dev_priv)->platform)); - return; - } - DRM_DEBUG_DRIVER("%s fw fetch %s\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 833d04d06576..55ac9eeab440 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -125,11 +125,6 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, uc_fw->type = type; } -static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw) -{ - return uc_fw->path != NULL; -} - static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) { return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; -- cgit v1.2.3 From 21a27d1cdd41a0df8ea5b3ecc61ed7973bd10368 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:08 -0700 Subject: drm/i915/uc: Unify uC FW selection Instead of having 2 identical functions for GuC and HuC firmware selection, we can unify the selection logic and just use different lists based on FW type. Note that the revid is not relevant for current blobs, but the upcoming CML will be identified as CFL rev 5, so by considering the revid we're ready for that. v2: rework blob list defs (Michal), add order check (Chris), fuse GuC and HuC lists into one. v3: remove difference between no uC HW and no uC FW, simplify related selection code, check the whole fw list (Michal) Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Anusha Srivatsa Cc: Chris Wilson Reviewed-by: Chris Wilson #v2 Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-4-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 89 +---------------- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 91 +---------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 156 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 24 ++--- 4 files changed, 164 insertions(+), 196 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 17ce78240cf8..99f44d8ae026 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -31,90 +31,6 @@ #include "intel_guc_fw.h" #include "i915_drv.h" -#define __MAKE_GUC_FW_PATH(KEY) \ - "i915/" \ - __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \ - __stringify(KEY##_GUC_FW_MAJOR) "." \ - __stringify(KEY##_GUC_FW_MINOR) "." \ - __stringify(KEY##_GUC_FW_PATCH) ".bin" - -#define SKL_GUC_FW_PREFIX skl -#define SKL_GUC_FW_MAJOR 33 -#define SKL_GUC_FW_MINOR 0 -#define SKL_GUC_FW_PATCH 0 -#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL) -MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH); - -#define BXT_GUC_FW_PREFIX bxt -#define BXT_GUC_FW_MAJOR 33 -#define BXT_GUC_FW_MINOR 0 -#define BXT_GUC_FW_PATCH 0 -#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT) -MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH); - -#define KBL_GUC_FW_PREFIX kbl -#define KBL_GUC_FW_MAJOR 33 -#define KBL_GUC_FW_MINOR 0 -#define KBL_GUC_FW_PATCH 0 -#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL) -MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH); - -#define GLK_GUC_FW_PREFIX glk -#define GLK_GUC_FW_MAJOR 33 -#define GLK_GUC_FW_MINOR 0 -#define GLK_GUC_FW_PATCH 0 -#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK) -MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH); - -#define ICL_GUC_FW_PREFIX icl -#define ICL_GUC_FW_MAJOR 33 -#define ICL_GUC_FW_MINOR 0 -#define ICL_GUC_FW_PATCH 0 -#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL) -MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH); - -static void guc_fw_select(struct intel_uc_fw *guc_fw) -{ - struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; - - GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - - guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - - if (!HAS_GT_UC(i915)) - return; - - if (i915_modparams.guc_firmware_path) { - guc_fw->path = i915_modparams.guc_firmware_path; - guc_fw->major_ver_wanted = 0; - guc_fw->minor_ver_wanted = 0; - } else if (IS_ICELAKE(i915)) { - guc_fw->path = ICL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR; - } else if (IS_GEMINILAKE(i915)) { - guc_fw->path = GLK_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR; - } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) { - guc_fw->path = KBL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR; - } else if (IS_BROXTON(i915)) { - guc_fw->path = BXT_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR; - } else if (IS_SKYLAKE(i915)) { - guc_fw->path = SKL_GUC_FIRMWARE_PATH; - guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR; - guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR; - } - - if (guc_fw->path) - guc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; -} - /** * intel_guc_fw_init_early() - initializes GuC firmware struct * @guc: intel_guc struct @@ -123,10 +39,7 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) */ void intel_guc_fw_init_early(struct intel_guc *guc) { - struct intel_uc_fw *guc_fw = &guc->fw; - - intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC); - guc_fw_select(guc_fw); + intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, guc_to_gt(guc)->i915); } static void guc_prepare_xfer(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index c3a7bd57fb55..ba2e1a835830 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -23,92 +23,6 @@ * Note that HuC firmware loading must be done before GuC loading. */ -#define BXT_HUC_FW_MAJOR 01 -#define BXT_HUC_FW_MINOR 8 -#define BXT_BLD_NUM 2893 - -#define SKL_HUC_FW_MAJOR 01 -#define SKL_HUC_FW_MINOR 07 -#define SKL_BLD_NUM 1398 - -#define KBL_HUC_FW_MAJOR 02 -#define KBL_HUC_FW_MINOR 00 -#define KBL_BLD_NUM 1810 - -#define GLK_HUC_FW_MAJOR 03 -#define GLK_HUC_FW_MINOR 01 -#define GLK_BLD_NUM 2893 - -#define ICL_HUC_FW_MAJOR 8 -#define ICL_HUC_FW_MINOR 4 -#define ICL_BLD_NUM 3238 - -#define HUC_FW_PATH(platform, major, minor, bld_num) \ - "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ - __stringify(minor) "_" __stringify(bld_num) ".bin" - -#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \ - SKL_HUC_FW_MINOR, SKL_BLD_NUM) -MODULE_FIRMWARE(I915_SKL_HUC_UCODE); - -#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \ - BXT_HUC_FW_MINOR, BXT_BLD_NUM) -MODULE_FIRMWARE(I915_BXT_HUC_UCODE); - -#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \ - KBL_HUC_FW_MINOR, KBL_BLD_NUM) -MODULE_FIRMWARE(I915_KBL_HUC_UCODE); - -#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ - GLK_HUC_FW_MINOR, GLK_BLD_NUM) -MODULE_FIRMWARE(I915_GLK_HUC_UCODE); - -#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \ - ICL_HUC_FW_MINOR, ICL_BLD_NUM) -MODULE_FIRMWARE(I915_ICL_HUC_UCODE); - -static void huc_fw_select(struct intel_uc_fw *huc_fw) -{ - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - struct drm_i915_private *dev_priv = huc_to_gt(huc)->i915; - - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - - huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - - if (!HAS_GT_UC(dev_priv)) - return; - - if (i915_modparams.huc_firmware_path) { - huc_fw->path = i915_modparams.huc_firmware_path; - huc_fw->major_ver_wanted = 0; - huc_fw->minor_ver_wanted = 0; - } else if (IS_SKYLAKE(dev_priv)) { - huc_fw->path = I915_SKL_HUC_UCODE; - huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR; - } else if (IS_BROXTON(dev_priv)) { - huc_fw->path = I915_BXT_HUC_UCODE; - huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR; - } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - huc_fw->path = I915_KBL_HUC_UCODE; - huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - huc_fw->path = I915_GLK_HUC_UCODE; - huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR; - } else if (IS_ICELAKE(dev_priv)) { - huc_fw->path = I915_ICL_HUC_UCODE; - huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR; - } - - if (huc_fw->path) - huc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; -} - /** * intel_huc_fw_init_early() - initializes HuC firmware struct * @huc: intel_huc struct @@ -117,10 +31,7 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) */ void intel_huc_fw_init_early(struct intel_huc *huc) { - struct intel_uc_fw *huc_fw = &huc->fw; - - intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC); - huc_fw_select(huc_fw); + intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915); } static void huc_xfer_rsa(struct intel_huc *huc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 432b632b04c0..9206d4221789 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -29,6 +29,162 @@ #include "intel_uc_fw.h" #include "i915_drv.h" +/* + * List of required GuC and HuC binaries per-platform. + * Must be ordered based on platform + revid, from newer to older. + */ +#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ + fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ + fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) + +#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \ + "i915/" \ + __stringify(prefix_) name_ \ + __stringify(major_) separator_ \ + __stringify(minor_) separator_ \ + __stringify(patch_) ".bin" + +#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \ + __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_) + +#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \ + __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_) + +/* All blobs need to be declared via MODULE_FIRMWARE() */ +#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \ + MODULE_FIRMWARE(guc_); \ + MODULE_FIRMWARE(huc_); + +INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH) + +/* The below structs and macros are used to iterate across the list of blobs */ +struct __packed uc_fw_blob { + u8 major; + u8 minor; + const char *path; +}; + +#define UC_FW_BLOB(major_, minor_, path_) \ + { .major = major_, .minor = minor_, .path = path_ } + +#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \ + UC_FW_BLOB(major_, minor_, \ + MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_)) + +#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \ + UC_FW_BLOB(major_, minor_, \ + MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_)) + +struct __packed uc_fw_platform_requirement { + enum intel_platform p; + u8 rev; /* first platform rev using this FW */ + const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES]; +}; + +#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \ +{ \ + .p = INTEL_##platform_, \ + .rev = revid_, \ + .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \ + .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \ +}, + +static void +__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev) +{ + static const struct uc_fw_platform_requirement fw_blobs[] = { + INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB) + }; + int i; + + for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) { + if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) { + const struct uc_fw_blob *blob = + &fw_blobs[i].blobs[uc_fw->type]; + uc_fw->path = blob->path; + uc_fw->major_ver_wanted = blob->major; + uc_fw->minor_ver_wanted = blob->minor; + break; + } + } + + /* make sure the list is ordered as expected */ + if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) { + for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) { + if (fw_blobs[i].p < fw_blobs[i - 1].p) + continue; + + if (fw_blobs[i].p == fw_blobs[i - 1].p && + fw_blobs[i].rev < fw_blobs[i - 1].rev) + continue; + + pr_err("invalid FW blob order: %s r%u comes before %s r%u\n", + intel_platform_name(fw_blobs[i - 1].p), + fw_blobs[i - 1].rev, + intel_platform_name(fw_blobs[i].p), + fw_blobs[i].rev); + + uc_fw->path = NULL; + } + } +} + +static bool +__uc_fw_override(struct intel_uc_fw *uc_fw) +{ + switch (uc_fw->type) { + case INTEL_UC_FW_TYPE_GUC: + uc_fw->path = i915_modparams.guc_firmware_path; + break; + case INTEL_UC_FW_TYPE_HUC: + uc_fw->path = i915_modparams.huc_firmware_path; + break; + } + + return uc_fw->path; +} + +/** + * intel_uc_fw_init_early - initialize the uC object and select the firmware + * @i915: device private + * @uc_fw: uC firmware + * @type: type of uC + * + * Initialize the state of our uC object and relevant tracking and select the + * firmware to fetch and load. + */ +void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_type type, + struct drm_i915_private *i915) +{ + /* + * we use FIRMWARE_UNINITIALIZED to detect checks against fetch_status + * before we're looked at the HW caps to see if we have uc support + */ + BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); + GEM_BUG_ON(uc_fw->fetch_status); + GEM_BUG_ON(uc_fw->load_status); + GEM_BUG_ON(uc_fw->path); + + uc_fw->type = type; + + if (HAS_GT_UC(i915) && likely(!__uc_fw_override(uc_fw))) + __uc_fw_auto_select(uc_fw, INTEL_INFO(i915)->platform, + INTEL_REVID(i915)); + + if (uc_fw->path) { + uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; + uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_STARTED; + } else { + uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; + uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; + } +} + /** * intel_uc_fw_fetch - fetch uC firmware * diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 55ac9eeab440..c93e271917c9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -44,9 +44,10 @@ enum intel_uc_fw_status { }; enum intel_uc_fw_type { - INTEL_UC_FW_TYPE_GUC, + INTEL_UC_FW_TYPE_GUC = 0, INTEL_UC_FW_TYPE_HUC }; +#define INTEL_UC_FW_NUM_TYPES 2 /* * This structure encapsulates all the data needed during the process @@ -109,22 +110,6 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type) return "uC"; } -static inline -void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, - enum intel_uc_fw_type type) -{ - /* - * we use FIRMWARE_UNINITIALIZED to detect checks against fetch_status - * before we're looked at the HW caps to see if we have uc support - */ - BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); - - uc_fw->path = NULL; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_UNINITIALIZED; - uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_STARTED; - uc_fw->type = type; -} - static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) { return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; @@ -159,7 +144,10 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) return uc_fw->header_size + uc_fw->ucode_size; } -void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, +void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, + enum intel_uc_fw_type type, + struct drm_i915_private *i915); +void intel_uc_fw_fetch(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, -- cgit v1.2.3 From 91e55e545ab8bbedd3a518dbe6124c10388ff92f Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:09 -0700 Subject: drm/i915/uc: Unify uc_fw status tracking We currently track fetch and load status separately, but the 2 are actually sequential in the uc lifetime (fetch must complete before we can attempt the load!). Unifying the 2 variables we can better follow the sequential states and improve our trackng of the uC state. Also, sprinkle some GEM_BUG_ON to make sure we transition correctly between states. v2: rename states, add the running state (Michal), drop some logs in the fetch path (Michal, Chris) v3: re-rename states, extend early status check to all helpers (Michal) Suggested-by: Michal Wajdeczko Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-5-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 4 +- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 6 +- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 8 ++- drivers/gpu/drm/i915/gt/uc/intel_huc.h | 5 ++ drivers/gpu/drm/i915/gt/uc/intel_uc.c | 10 +-- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 78 ++++++++--------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 63 ++++++++++++------ 8 files changed, 92 insertions(+), 84 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index ac6333ad7102..714e9892aaff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -172,9 +172,9 @@ int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); -static inline bool intel_guc_is_loaded(struct intel_guc *guc) +static inline bool intel_guc_is_running(struct intel_guc *guc) { - return intel_uc_fw_is_loaded(&guc->fw); + return intel_uc_fw_is_running(&guc->fw); } static inline int intel_guc_sanitize(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 99f44d8ae026..eec767383e92 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -230,5 +230,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw) */ int intel_guc_fw_upload(struct intel_guc *guc) { - return intel_uc_fw_upload(&guc->fw, guc_fw_xfer); + int ret = intel_uc_fw_upload(&guc->fw, guc_fw_xfer); + if (!ret) + guc->fw.status = INTEL_UC_FIRMWARE_RUNNING; + + return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index a0f2a01365bc..b4238fe16a03 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -941,7 +941,7 @@ static void __guc_client_disable(struct intel_guc_client *client) * the case, instead of trying (in vain) to communicate with it, let's * just cleanup the doorbell HW and our internal state. */ - if (intel_guc_is_loaded(client->guc)) + if (intel_guc_is_running(client->guc)) destroy_doorbell(client); else __fini_doorbell(client); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ab6c1564b6a7..a45976e56af7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -117,8 +117,8 @@ int intel_huc_auth(struct intel_huc *huc) struct intel_guc *guc = >->uc.guc; int ret; - if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; + GEM_BUG_ON(!intel_uc_fw_is_loaded(&huc->fw)); + GEM_BUG_ON(intel_huc_is_authenticated(huc)); ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->rsa_data)); @@ -138,10 +138,12 @@ int intel_huc_auth(struct intel_huc *huc) goto fail; } + huc->fw.status = INTEL_UC_FIRMWARE_RUNNING; + return 0; fail: - huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL; + huc->fw.status = INTEL_UC_FIRMWARE_FAIL; DRM_ERROR("HuC: Authentication failed %d\n", ret); return ret; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index 9fa3d4629f2e..ea340f85bc46 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -56,4 +56,9 @@ static inline int intel_huc_sanitize(struct intel_huc *huc) return 0; } +static inline bool intel_huc_is_authenticated(struct intel_huc *huc) +{ + return intel_uc_fw_is_running(&huc->fw); +} + #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 3f672ea7456b..b1815abecf30 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -560,7 +560,7 @@ void intel_uc_fini_hw(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; - if (!intel_guc_is_loaded(guc)) + if (!intel_guc_is_running(guc)) return; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); @@ -582,7 +582,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; - if (!intel_guc_is_loaded(guc)) + if (!intel_guc_is_running(guc)) return; guc_stop_communication(guc); @@ -594,7 +594,7 @@ void intel_uc_runtime_suspend(struct intel_uc *uc) struct intel_guc *guc = &uc->guc; int err; - if (!intel_guc_is_loaded(guc)) + if (!intel_guc_is_running(guc)) return; err = intel_guc_suspend(guc); @@ -609,7 +609,7 @@ void intel_uc_suspend(struct intel_uc *uc) struct intel_guc *guc = &uc->guc; intel_wakeref_t wakeref; - if (!intel_guc_is_loaded(guc)) + if (!intel_guc_is_running(guc)) return; with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) @@ -621,7 +621,7 @@ int intel_uc_resume(struct intel_uc *uc) struct intel_guc *guc = &uc->guc; int err; - if (!intel_guc_is_loaded(guc)) + if (!intel_guc_is_running(guc)) return 0; guc_enable_communication(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 9206d4221789..1e7df2c19265 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -162,12 +162,11 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) { /* - * we use FIRMWARE_UNINITIALIZED to detect checks against fetch_status + * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status * before we're looked at the HW caps to see if we have uc support */ BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED); - GEM_BUG_ON(uc_fw->fetch_status); - GEM_BUG_ON(uc_fw->load_status); + GEM_BUG_ON(uc_fw->status); GEM_BUG_ON(uc_fw->path); uc_fw->type = type; @@ -176,13 +175,10 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, __uc_fw_auto_select(uc_fw, INTEL_INFO(i915)->platform, INTEL_REVID(i915)); - if (uc_fw->path) { - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; - uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_STARTED; - } else { - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - uc_fw->load_status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; - } + if (uc_fw->path) + uc_fw->status = INTEL_UC_FIRMWARE_SELECTED; + else + uc_fw->status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; } /** @@ -205,20 +201,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, GEM_BUG_ON(!intel_uc_fw_supported(uc_fw)); - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - - uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); - err = request_firmware(&fw, uc_fw->path, &pdev->dev); - if (err) { - DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n", - intel_uc_fw_type_repr(uc_fw->type), err); + if (err) goto fail; - } DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n", intel_uc_fw_type_repr(uc_fw->type), fw->size, fw); @@ -320,19 +305,13 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, uc_fw->obj = obj; uc_fw->size = fw->size; - uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); + uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE; release_firmware(fw); return; fail: - uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL; - DRM_DEBUG_DRIVER("%s fw fetch %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->fetch_status)); + uc_fw->status = INTEL_UC_FIRMWARE_MISSING; DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); @@ -388,14 +367,11 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, DRM_DEBUG_DRIVER("%s fw load %s\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) - return -ENOEXEC; - - uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); + /* make sure the status was cleared the last time we reset the uc */ + GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); + if (!intel_uc_fw_is_available(uc_fw)) + return -ENOEXEC; /* Call custom loader */ intel_uc_fw_ggtt_bind(uc_fw); err = xfer(uc_fw); @@ -403,10 +379,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, if (err) goto fail; - uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); + uc_fw->status = INTEL_UC_FIRMWARE_TRANSFERRED; + DRM_DEBUG_DRIVER("%s fw xfer completed\n", + intel_uc_fw_type_repr(uc_fw->type)); DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n", intel_uc_fw_type_repr(uc_fw->type), @@ -416,10 +391,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, return 0; fail: - uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL; - DRM_DEBUG_DRIVER("%s fw load %s\n", - intel_uc_fw_type_repr(uc_fw->type), - intel_uc_fw_status_repr(uc_fw->load_status)); + uc_fw->status = INTEL_UC_FIRMWARE_FAIL; + DRM_DEBUG_DRIVER("%s fw load failed\n", + intel_uc_fw_type_repr(uc_fw->type)); DRM_WARN("%s: Failed to load firmware %s (error %d)\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); @@ -431,7 +405,10 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw) { int err; - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + /* this should happen before the load! */ + GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); + + if (!intel_uc_fw_is_available(uc_fw)) return -ENOEXEC; err = i915_gem_object_pin_pages(uc_fw->obj); @@ -444,7 +421,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw) void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) { - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + if (!intel_uc_fw_is_available(uc_fw)) return; i915_gem_object_unpin_pages(uc_fw->obj); @@ -478,7 +455,7 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) if (obj) i915_gem_object_put(obj); - uc_fw->fetch_status = INTEL_UC_FIRMWARE_NOT_STARTED; + uc_fw->status = INTEL_UC_FIRMWARE_SELECTED; } /** @@ -492,9 +469,8 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) { drm_printf(p, "%s firmware: %s\n", intel_uc_fw_type_repr(uc_fw->type), uc_fw->path); - drm_printf(p, "\tstatus: fetch %s, load %s\n", - intel_uc_fw_status_repr(uc_fw->fetch_status), - intel_uc_fw_status_repr(uc_fw->load_status)); + drm_printf(p, "\tstatus: %s\n", + intel_uc_fw_status_repr(uc_fw->status)); drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, uc_fw->major_ver_found, uc_fw->minor_ver_found); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index c93e271917c9..f6aa2e3e4d1f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -35,12 +35,14 @@ struct drm_i915_private; #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" enum intel_uc_fw_status { - INTEL_UC_FIRMWARE_NOT_SUPPORTED = -2, /* no uc HW */ - INTEL_UC_FIRMWARE_FAIL = -1, + INTEL_UC_FIRMWARE_FAIL = -3, /* failed to xfer or init/auth the fw */ + INTEL_UC_FIRMWARE_MISSING = -2, /* blob not found on the system */ + INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */ INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */ - INTEL_UC_FIRMWARE_NOT_STARTED = 1, - INTEL_UC_FIRMWARE_PENDING, - INTEL_UC_FIRMWARE_SUCCESS + INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */ + INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */ + INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */ + INTEL_UC_FIRMWARE_RUNNING /* init/auth done */ }; enum intel_uc_fw_type { @@ -57,8 +59,7 @@ struct intel_uc_fw { const char *path; size_t size; struct drm_i915_gem_object *obj; - enum intel_uc_fw_status fetch_status; - enum intel_uc_fw_status load_status; + enum intel_uc_fw_status status; /* * The firmware build process will generate a version header file with major and @@ -83,18 +84,22 @@ static inline const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) { switch (status) { - case INTEL_UC_FIRMWARE_NOT_SUPPORTED: - return "N/A - uc HW not available"; case INTEL_UC_FIRMWARE_FAIL: return "FAIL"; + case INTEL_UC_FIRMWARE_MISSING: + return "MISSING"; + case INTEL_UC_FIRMWARE_NOT_SUPPORTED: + return "N/A"; case INTEL_UC_FIRMWARE_UNINITIALIZED: return "UNINITIALIZED"; - case INTEL_UC_FIRMWARE_NOT_STARTED: - return "NOT_STARTED"; - case INTEL_UC_FIRMWARE_PENDING: - return "PENDING"; - case INTEL_UC_FIRMWARE_SUCCESS: - return "SUCCESS"; + case INTEL_UC_FIRMWARE_SELECTED: + return "SELECTED"; + case INTEL_UC_FIRMWARE_AVAILABLE: + return "AVAILABLE"; + case INTEL_UC_FIRMWARE_TRANSFERRED: + return "TRANSFERRED"; + case INTEL_UC_FIRMWARE_RUNNING: + return "RUNNING"; } return ""; } @@ -110,22 +115,38 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type) return "uC"; } +static inline enum intel_uc_fw_status +__intel_uc_fw_status(struct intel_uc_fw *uc_fw) +{ + /* shouldn't call this before checking hw/blob availability */ + GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED); + return uc_fw->status; +} + +static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE; +} + static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) { - return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; + return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED; +} + +static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw) +{ + return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING; } static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw) { - /* shouldn't call this before checking hw/blob availability */ - GEM_BUG_ON(uc_fw->fetch_status == INTEL_UC_FIRMWARE_UNINITIALIZED); - return uc_fw->fetch_status != INTEL_UC_FIRMWARE_NOT_SUPPORTED; + return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED; } static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) { if (intel_uc_fw_is_loaded(uc_fw)) - uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; + uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE; } /** @@ -138,7 +159,7 @@ static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) */ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) { - if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) + if (!intel_uc_fw_is_available(uc_fw)) return 0; return uc_fw->header_size + uc_fw->ucode_size; -- cgit v1.2.3 From 90dd992260cc44cec8f6e41ab5e64938921912c0 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:10 -0700 Subject: drm/i915/uc: Move xfer rsa logic to common function The way we copy the RSA is the same for GuC and HuC, so we can move the logic in a common function. this will also make any update needed for local memory easier. v2: return the number of copied bytes and check it (Chris) Signed-off-by: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson #v1 Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-6-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 7 +++---- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 10 +++++----- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 20 ++++++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 1 + 4 files changed, 29 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index eec767383e92..385f6d38bf49 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -75,13 +75,12 @@ static void guc_prepare_xfer(struct intel_guc *guc) static void guc_xfer_rsa(struct intel_guc *guc) { struct intel_uncore *uncore = guc_to_gt(guc)->uncore; - struct intel_uc_fw *fw = &guc->fw; - struct sg_table *pages = fw->obj->mm.pages; u32 rsa[UOS_RSA_SCRATCH_COUNT]; + size_t copied; int i; - sg_pcopy_to_buffer(pages->sgl, pages->nents, - rsa, sizeof(rsa), fw->rsa_offset); + copied = intel_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa)); + GEM_BUG_ON(copied < sizeof(rsa)); for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index ba2e1a835830..472568843ccf 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -36,17 +36,17 @@ void intel_huc_fw_init_early(struct intel_huc *huc) static void huc_xfer_rsa(struct intel_huc *huc) { - struct intel_uc_fw *fw = &huc->fw; - struct sg_table *pages = fw->obj->mm.pages; + size_t copied; /* * HuC firmware image is outside GuC accessible range. * Copy the RSA signature out of the image into * the perma-pinned region set aside for it */ - sg_pcopy_to_buffer(pages->sgl, pages->nents, - huc->rsa_data_vaddr, fw->rsa_size, - fw->rsa_offset); + GEM_BUG_ON(huc->fw.rsa_size > huc->rsa_data->size); + copied = intel_uc_fw_copy_rsa(&huc->fw, huc->rsa_data_vaddr, + huc->rsa_data->size); + GEM_BUG_ON(copied < huc->fw.rsa_size); } static int huc_xfer_ucode(struct intel_huc *huc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 1e7df2c19265..f60129c17e40 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -458,6 +458,26 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw) uc_fw->status = INTEL_UC_FIRMWARE_SELECTED; } +/** + * intel_uc_fw_copy_rsa - copy fw RSA to buffer + * + * @uc_fw: uC firmware + * @dst: dst buffer + * @max_len: max number of bytes to copy + * + * Return: number of copied bytes. + */ +size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) +{ + struct sg_table *pages = uc_fw->obj->mm.pages; + u32 size = min_t(u32, uc_fw->rsa_size, max_len); + + GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw)); + + return sg_pcopy_to_buffer(pages->sgl, pages->nents, + dst, size, uc_fw->rsa_offset); +} + /** * intel_uc_fw_dump - dump information about uC firmware * @uc_fw: uC firmware diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index f6aa2e3e4d1f..c843d00b1b75 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -176,6 +176,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, int intel_uc_fw_init(struct intel_uc_fw *uc_fw); void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw); +size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); #endif -- cgit v1.2.3 From 9cb27945570567b75af7a8751803d41d0a6ce1a7 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:11 -0700 Subject: drm/i915/huc: Copy huc rsa only once The binary is perma-pinned and the rsa is not going to change, so copy it only once and not on every load. v2: onion unwind (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Fernando Pacheco Reviewed-by: Chris Wilson #v1 Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-7-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 27 +++++++++++++++++++++++---- drivers/gpu/drm/i915/gt/uc/intel_huc.h | 1 - drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 17 ----------------- 3 files changed, 23 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index a45976e56af7..c9535caba844 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -50,6 +50,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) struct intel_gt *gt = huc_to_gt(huc); struct intel_guc *guc = >->uc.guc; struct i915_vma *vma; + size_t copied; void *vaddr; /* @@ -62,6 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) * the authentication since its GGTT offset will be GuC * accessible. */ + GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE); vma = intel_guc_allocate_vma(guc, PAGE_SIZE); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -72,26 +74,43 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) return PTR_ERR(vaddr); } + copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size); + GEM_BUG_ON(copied < huc->fw.rsa_size); + + i915_gem_object_unpin_map(vma->obj); + huc->rsa_data = vma; - huc->rsa_data_vaddr = vaddr; return 0; } static void intel_huc_rsa_data_destroy(struct intel_huc *huc) { - i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP); + i915_vma_unpin_and_release(&huc->rsa_data, 0); } int intel_huc_init(struct intel_huc *huc) { int err; - err = intel_huc_rsa_data_create(huc); + err = intel_uc_fw_init(&huc->fw); if (err) return err; - return intel_uc_fw_init(&huc->fw); + /* + * HuC firmware image is outside GuC accessible range. + * Copy the RSA signature out of the image into + * a perma-pinned region set aside for it + */ + err = intel_huc_rsa_data_create(huc); + if (err) + goto out_fini; + + return 0; + +out_fini: + intel_uc_fw_fini(&huc->fw); + return err; } void intel_huc_fini(struct intel_huc *huc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index ea340f85bc46..4465209ce233 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -35,7 +35,6 @@ struct intel_huc { /* HuC-specific additions */ struct i915_vma *rsa_data; - void *rsa_data_vaddr; struct { i915_reg_t reg; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 472568843ccf..7d2d2eb94d22 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -34,21 +34,6 @@ void intel_huc_fw_init_early(struct intel_huc *huc) intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915); } -static void huc_xfer_rsa(struct intel_huc *huc) -{ - size_t copied; - - /* - * HuC firmware image is outside GuC accessible range. - * Copy the RSA signature out of the image into - * the perma-pinned region set aside for it - */ - GEM_BUG_ON(huc->fw.rsa_size > huc->rsa_data->size); - copied = intel_uc_fw_copy_rsa(&huc->fw, huc->rsa_data_vaddr, - huc->rsa_data->size); - GEM_BUG_ON(copied < huc->fw.rsa_size); -} - static int huc_xfer_ucode(struct intel_huc *huc) { struct intel_uc_fw *huc_fw = &huc->fw; @@ -108,8 +93,6 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw) { struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - huc_xfer_rsa(huc); - return huc_xfer_ucode(huc); } -- cgit v1.2.3 From 4ca8d2ef8dab4ad61347e52174b870c44f1508a3 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:12 -0700 Subject: drm/i915/uc: Plumb the gt through fw_upload The gt is our new central structure for uc-related code, so we can use that instead of jumping back to i915 via the fw object. Since we have it in the upload function it is easy to pass it through the lower levels of the xfer process instead of continuosly jumping via uc_fw->uc->gt, which will also make things a bit cleaner for the next patch. Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-8-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 35 ++++++++++++++----------------- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 32 +++++++++++----------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 29 ++++++++++++------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 8 ++++--- 4 files changed, 48 insertions(+), 56 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 385f6d38bf49..3ea0de6f4b73 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -42,10 +42,8 @@ void intel_guc_fw_init_early(struct intel_guc *guc) intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, guc_to_gt(guc)->i915); } -static void guc_prepare_xfer(struct intel_guc *guc) +static void guc_prepare_xfer(struct intel_uncore *uncore) { - struct intel_gt *gt = guc_to_gt(guc); - struct intel_uncore *uncore = gt->uncore; u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES | GUC_ENABLE_READ_CACHE_LOGIC | GUC_ENABLE_MIA_CACHING | @@ -56,12 +54,12 @@ static void guc_prepare_xfer(struct intel_guc *guc) /* Must program this register before loading the ucode with DMA */ intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags); - if (IS_GEN9_LP(gt->i915)) + if (IS_GEN9_LP(uncore->i915)) intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE); else intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE); - if (IS_GEN(gt->i915, 9)) { + if (IS_GEN(uncore->i915, 9)) { /* DOP Clock Gating Enable for GuC clocks */ intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE); @@ -72,14 +70,14 @@ static void guc_prepare_xfer(struct intel_guc *guc) } /* Copy RSA signature from the fw image to HW for verification */ -static void guc_xfer_rsa(struct intel_guc *guc) +static void guc_xfer_rsa(struct intel_uc_fw *guc_fw, + struct intel_uncore *uncore) { - struct intel_uncore *uncore = guc_to_gt(guc)->uncore; u32 rsa[UOS_RSA_SCRATCH_COUNT]; size_t copied; int i; - copied = intel_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa)); + copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa)); GEM_BUG_ON(copied < sizeof(rsa)); for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) @@ -155,10 +153,10 @@ static int guc_wait_ucode(struct intel_uncore *uncore) * transfer between GTT locations. This functionality is left out of the API * for now as there is no need for it. */ -static int guc_xfer_ucode(struct intel_guc *guc) +static int guc_xfer_ucode(struct intel_uc_fw *guc_fw, + struct intel_gt *gt) { - struct intel_uncore *uncore = guc_to_gt(guc)->uncore; - struct intel_uc_fw *guc_fw = &guc->fw; + struct intel_uncore *uncore = gt->uncore; unsigned long offset; /* @@ -169,7 +167,7 @@ static int guc_xfer_ucode(struct intel_guc *guc) guc_fw->header_size + guc_fw->ucode_size); /* Set the source address for the new blob */ - offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset; + offset = intel_uc_fw_ggtt_offset(guc_fw, gt->ggtt) + guc_fw->header_offset; intel_uncore_write(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); intel_uncore_write(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); @@ -189,26 +187,25 @@ static int guc_xfer_ucode(struct intel_guc *guc) /* * Load the GuC firmware blob into the MinuteIA. */ -static int guc_fw_xfer(struct intel_uc_fw *guc_fw) +static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct intel_gt *gt) { - struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); - struct intel_uncore *uncore = guc_to_gt(guc)->uncore; + struct intel_uncore *uncore = gt->uncore; int ret; GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - guc_prepare_xfer(guc); + guc_prepare_xfer(uncore); /* * Note that GuC needs the CSS header plus uKernel code to be copied * by the DMA engine in one operation, whereas the RSA signature is * loaded via MMIO. */ - guc_xfer_rsa(guc); + guc_xfer_rsa(guc_fw, uncore); - ret = guc_xfer_ucode(guc); + ret = guc_xfer_ucode(guc_fw, gt); intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); @@ -229,7 +226,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw) */ int intel_guc_fw_upload(struct intel_guc *guc) { - int ret = intel_uc_fw_upload(&guc->fw, guc_fw_xfer); + int ret = intel_uc_fw_upload(&guc->fw, guc_to_gt(guc), guc_fw_xfer); if (!ret) guc->fw.status = INTEL_UC_FIRMWARE_RUNNING; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 7d2d2eb94d22..2e7ac8863728 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -34,10 +34,17 @@ void intel_huc_fw_init_early(struct intel_huc *huc) intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915); } -static int huc_xfer_ucode(struct intel_huc *huc) +/** + * huc_fw_xfer() - DMA's the firmware + * @huc_fw: the firmware descriptor + * + * Transfer the firmware image to RAM for execution by the microcontroller. + * + * Return: 0 on success, non-zero on failure + */ +static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct intel_gt *gt) { - struct intel_uc_fw *huc_fw = &huc->fw; - struct intel_uncore *uncore = huc_to_gt(huc)->uncore; + struct intel_uncore *uncore = gt->uncore; unsigned long offset = 0; u32 size; int ret; @@ -47,7 +54,7 @@ static int huc_xfer_ucode(struct intel_huc *huc) intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Set the source address for the uCode */ - offset = intel_uc_fw_ggtt_offset(huc_fw) + + offset = intel_uc_fw_ggtt_offset(huc_fw, gt->ggtt) + huc_fw->header_offset; intel_uncore_write(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); @@ -81,21 +88,6 @@ static int huc_xfer_ucode(struct intel_huc *huc) return ret; } -/** - * huc_fw_xfer() - DMA's the firmware - * @huc_fw: the firmware descriptor - * - * Transfer the firmware image to RAM for execution by the microcontroller. - * - * Return: 0 on success, non-zero on failure - */ -static int huc_fw_xfer(struct intel_uc_fw *huc_fw) -{ - struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); - - return huc_xfer_ucode(huc); -} - /** * intel_huc_fw_upload() - load HuC uCode to device * @huc: intel_huc structure @@ -110,5 +102,5 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw) */ int intel_huc_fw_upload(struct intel_huc *huc) { - return intel_uc_fw_upload(&huc->fw, huc_fw_xfer); + return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), huc_fw_xfer); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index f60129c17e40..8d099dac0224 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -321,12 +321,13 @@ fail: release_firmware(fw); /* OK even if fw is NULL */ } -static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw) +static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw, + struct intel_gt *gt) { struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; + struct i915_ggtt *ggtt = gt->ggtt; struct i915_vma dummy = { - .node.start = intel_uc_fw_ggtt_offset(uc_fw), + .node.start = intel_uc_fw_ggtt_offset(uc_fw, ggtt), .node.size = obj->base.size, .pages = obj->mm.pages, .vm = &ggtt->vm, @@ -341,11 +342,12 @@ static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw) ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0); } -static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw) +static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw, + struct intel_gt *gt) { struct drm_i915_gem_object *obj = uc_fw->obj; - struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; - u64 start = intel_uc_fw_ggtt_offset(uc_fw); + struct i915_ggtt *ggtt = gt->ggtt; + u64 start = intel_uc_fw_ggtt_offset(uc_fw, ggtt); ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); } @@ -353,14 +355,15 @@ static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw) /** * intel_uc_fw_upload - load uC firmware using custom loader * @uc_fw: uC firmware + * @gt: the intel_gt structure * @xfer: custom uC firmware loader function * * Loads uC firmware using custom loader and updates internal flags. * * Return: 0 on success, non-zero on failure. */ -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, - int (*xfer)(struct intel_uc_fw *uc_fw)) +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + int (*xfer)(struct intel_uc_fw *uc_fw, struct intel_gt *gt)) { int err; @@ -373,9 +376,9 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, if (!intel_uc_fw_is_available(uc_fw)) return -ENOEXEC; /* Call custom loader */ - intel_uc_fw_ggtt_bind(uc_fw); - err = xfer(uc_fw); - intel_uc_fw_ggtt_unbind(uc_fw); + intel_uc_fw_ggtt_bind(uc_fw, gt); + err = xfer(uc_fw, gt); + intel_uc_fw_ggtt_unbind(uc_fw, gt); if (err) goto fail; @@ -427,10 +430,8 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) i915_gem_object_unpin_pages(uc_fw->obj); } -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw) +u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) { - struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev); - struct i915_ggtt *ggtt = &i915->ggtt; struct drm_mm_node *node = &ggtt->uc_fw; GEM_BUG_ON(!node->allocated); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index c843d00b1b75..a69b6f00fe16 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -30,6 +30,8 @@ struct drm_printer; struct drm_i915_private; +struct intel_gt; +struct i915_ggtt; /* Home of GuC, HuC and DMC firmwares */ #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" @@ -171,11 +173,11 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, void intel_uc_fw_fetch(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); -int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, - int (*xfer)(struct intel_uc_fw *uc_fw)); +int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + int (*xfer)(struct intel_uc_fw *uc_fw, struct intel_gt *gt)); int intel_uc_fw_init(struct intel_uc_fw *uc_fw); void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw); +u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt); size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); -- cgit v1.2.3 From 8d5682f66252c421b96f720fda7f94431e444593 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 24 Jul 2019 17:18:13 -0700 Subject: drm/i915/uc: Unify uC firmware upload The way we load the firmwares is the same for both GuC and HuC, the only difference is in the wopcm destination address and the dma flags, so we easily can move the logic to a common function and pass in offset and flags. The only other difference in the uplaod path are some the extra steps that guc does before and after the xfer, but those don't require the guc fw to be pinned in ggtt and can safely be performed before calling the uc_upload function. Note that this patch re-introduces the dma xfer wait for guc loading that was removed with "drm/i915/guc: Propagate the fw xfer timeout". This is not going to slow us down on a successful load (the dma has to complete before fw init can start), but could slightly increase the timeout in case of a fw init error. v2: use _fw variants for uncore accesses (Chris), fix guc_fw status on failed wait. v3: use dev_err and print DMA_CTRL (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725001813.4740-9-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c | 107 ++++++++---------------------- drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c | 57 +--------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 80 +++++++++++++++++----- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 4 +- 4 files changed, 93 insertions(+), 155 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index 3ea0de6f4b73..28735c14b9a0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -84,13 +84,6 @@ static void guc_xfer_rsa(struct intel_uc_fw *guc_fw, intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]); } -static bool guc_xfer_completed(struct intel_uncore *uncore, u32 *status) -{ - /* Did we complete the xfer? */ - *status = intel_uncore_read(uncore, DMA_CTRL); - return !(*status & START_DMA); -} - /* * Read the GuC status register (GUC_STATUS) and store it in the * specified location; then return a boolean indicating whether @@ -137,65 +130,27 @@ static int guc_wait_ucode(struct intel_uncore *uncore) ret = -ENXIO; } - if (ret == 0 && !guc_xfer_completed(uncore, &status)) { - DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", - status); - ret = -ENXIO; - } - return ret; } -/* - * Transfer the firmware image to RAM for execution by the microcontroller. +/** + * intel_guc_fw_upload() - load GuC uCode to device + * @guc: intel_guc structure * - * Architecturally, the DMA engine is bidirectional, and can potentially even - * transfer between GTT locations. This functionality is left out of the API - * for now as there is no need for it. - */ -static int guc_xfer_ucode(struct intel_uc_fw *guc_fw, - struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - unsigned long offset; - - /* - * The header plus uCode will be copied to WOPCM via DMA, excluding any - * other components - */ - intel_uncore_write(uncore, DMA_COPY_SIZE, - guc_fw->header_size + guc_fw->ucode_size); - - /* Set the source address for the new blob */ - offset = intel_uc_fw_ggtt_offset(guc_fw, gt->ggtt) + guc_fw->header_offset; - intel_uncore_write(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); - intel_uncore_write(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); - - /* - * Set the DMA destination. Current uCode expects the code to be - * loaded at 8k; locations below this are used for the stack. - */ - intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0x2000); - intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); - - /* Finally start the DMA */ - intel_uncore_write(uncore, DMA_CTRL, - _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); - - return guc_wait_ucode(uncore); -} -/* - * Load the GuC firmware blob into the MinuteIA. + * Called from intel_uc_init_hw() during driver load, resume from sleep and + * after a GPU reset. + * + * The firmware image should have already been fetched into memory, so only + * check that fetch succeeded, and then transfer the image to the h/w. + * + * Return: non-zero code on error */ -static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct intel_gt *gt) +int intel_guc_fw_upload(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); struct intel_uncore *uncore = gt->uncore; int ret; - GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - guc_prepare_xfer(uncore); /* @@ -203,32 +158,24 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct intel_gt *gt) * by the DMA engine in one operation, whereas the RSA signature is * loaded via MMIO. */ - guc_xfer_rsa(guc_fw, uncore); + guc_xfer_rsa(&guc->fw, uncore); - ret = guc_xfer_ucode(guc_fw, gt); - - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + /* + * Current uCode expects the code to be loaded at 8k; locations below + * this are used for the stack. + */ + ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE); + if (ret) + goto out; - return ret; -} + ret = guc_wait_ucode(uncore); + if (ret) + goto out; -/** - * intel_guc_fw_upload() - load GuC uCode to device - * @guc: intel_guc structure - * - * Called from intel_uc_init_hw() during driver load, resume from sleep and - * after a GPU reset. - * - * The firmware image should have already been fetched into memory, so only - * check that fetch succeeded, and then transfer the image to the h/w. - * - * Return: non-zero code on error - */ -int intel_guc_fw_upload(struct intel_guc *guc) -{ - int ret = intel_uc_fw_upload(&guc->fw, guc_to_gt(guc), guc_fw_xfer); - if (!ret) - guc->fw.status = INTEL_UC_FIRMWARE_RUNNING; + guc->fw.status = INTEL_UC_FIRMWARE_RUNNING; + return 0; +out: + guc->fw.status = INTEL_UC_FIRMWARE_FAIL; return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 2e7ac8863728..0e885859c828 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -34,60 +34,6 @@ void intel_huc_fw_init_early(struct intel_huc *huc) intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915); } -/** - * huc_fw_xfer() - DMA's the firmware - * @huc_fw: the firmware descriptor - * - * Transfer the firmware image to RAM for execution by the microcontroller. - * - * Return: 0 on success, non-zero on failure - */ -static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - unsigned long offset = 0; - u32 size; - int ret; - - GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - /* Set the source address for the uCode */ - offset = intel_uc_fw_ggtt_offset(huc_fw, gt->ggtt) + - huc_fw->header_offset; - intel_uncore_write(uncore, DMA_ADDR_0_LOW, - lower_32_bits(offset)); - intel_uncore_write(uncore, DMA_ADDR_0_HIGH, - upper_32_bits(offset) & 0xFFFF); - - /* - * Hardware doesn't look at destination address for HuC. Set it to 0, - * but still program the correct address space. - */ - intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0); - intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); - - size = huc_fw->header_size + huc_fw->ucode_size; - intel_uncore_write(uncore, DMA_COPY_SIZE, size); - - /* Start the DMA */ - intel_uncore_write(uncore, DMA_CTRL, - _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); - - /* Wait for DMA to finish */ - ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); - - DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); - - /* Disable the bits once DMA is over */ - intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); - - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - - return ret; -} - /** * intel_huc_fw_upload() - load HuC uCode to device * @huc: intel_huc structure @@ -102,5 +48,6 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct intel_gt *gt) */ int intel_huc_fw_upload(struct intel_huc *huc) { - return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), huc_fw_xfer); + /* HW doesn't look at destination address for HuC, so set it to 0 */ + return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 8d099dac0224..789b3d7228a4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -321,13 +321,24 @@ fail: release_firmware(fw); /* OK even if fw is NULL */ } +static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) +{ + struct drm_mm_node *node = &ggtt->uc_fw; + + GEM_BUG_ON(!node->allocated); + GEM_BUG_ON(upper_32_bits(node->start)); + GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); + + return lower_32_bits(node->start); +} + static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw, struct intel_gt *gt) { struct drm_i915_gem_object *obj = uc_fw->obj; struct i915_ggtt *ggtt = gt->ggtt; struct i915_vma dummy = { - .node.start = intel_uc_fw_ggtt_offset(uc_fw, ggtt), + .node.start = uc_fw_ggtt_offset(uc_fw, ggtt), .node.size = obj->base.size, .pages = obj->mm.pages, .vm = &ggtt->vm, @@ -347,23 +358,69 @@ static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw, { struct drm_i915_gem_object *obj = uc_fw->obj; struct i915_ggtt *ggtt = gt->ggtt; - u64 start = intel_uc_fw_ggtt_offset(uc_fw, ggtt); + u64 start = uc_fw_ggtt_offset(uc_fw, ggtt); ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size); } +static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, + u32 wopcm_offset, u32 dma_flags) +{ + struct intel_uncore *uncore = gt->uncore; + u64 offset; + int ret; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + /* Set the source address for the uCode */ + offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt) + uc_fw->header_offset; + GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000); + intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); + intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset)); + + /* Set the DMA destination */ + intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset); + intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + + /* + * Set the transfer size. The header plus uCode will be copied to WOPCM + * via DMA, excluding any other components + */ + intel_uncore_write_fw(uncore, DMA_COPY_SIZE, + uc_fw->header_size + uc_fw->ucode_size); + + /* Start the DMA */ + intel_uncore_write_fw(uncore, DMA_CTRL, + _MASKED_BIT_ENABLE(dma_flags | START_DMA)); + + /* Wait for DMA to finish */ + ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); + if (ret) + dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n", + intel_uc_fw_type_repr(uc_fw->type), + intel_uncore_read_fw(uncore, DMA_CTRL)); + + /* Disable the bits once DMA is over */ + intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags)); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + return ret; +} + /** * intel_uc_fw_upload - load uC firmware using custom loader * @uc_fw: uC firmware * @gt: the intel_gt structure - * @xfer: custom uC firmware loader function + * @wopcm_offset: destination offset in wopcm + * @dma_flags: flags for flags for dma ctrl * - * Loads uC firmware using custom loader and updates internal flags. + * Loads uC firmware and updates internal flags. * * Return: 0 on success, non-zero on failure. */ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, - int (*xfer)(struct intel_uc_fw *uc_fw, struct intel_gt *gt)) + u32 wopcm_offset, u32 dma_flags) { int err; @@ -377,7 +434,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, return -ENOEXEC; /* Call custom loader */ intel_uc_fw_ggtt_bind(uc_fw, gt); - err = xfer(uc_fw, gt); + err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags); intel_uc_fw_ggtt_unbind(uc_fw, gt); if (err) goto fail; @@ -430,17 +487,6 @@ void intel_uc_fw_fini(struct intel_uc_fw *uc_fw) i915_gem_object_unpin_pages(uc_fw->obj); } -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) -{ - struct drm_mm_node *node = &ggtt->uc_fw; - - GEM_BUG_ON(!node->allocated); - GEM_BUG_ON(upper_32_bits(node->start)); - GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); - - return lower_32_bits(node->start); -} - /** * intel_uc_fw_cleanup_fetch - cleanup uC firmware * diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index a69b6f00fe16..ff684c0c808e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -31,7 +31,6 @@ struct drm_printer; struct drm_i915_private; struct intel_gt; -struct i915_ggtt; /* Home of GuC, HuC and DMC firmwares */ #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" @@ -174,10 +173,9 @@ void intel_uc_fw_fetch(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, - int (*xfer)(struct intel_uc_fw *uc_fw, struct intel_gt *gt)); + u32 wopcm_offset, u32 dma_flags); int intel_uc_fw_init(struct intel_uc_fw *uc_fw); void intel_uc_fw_fini(struct intel_uc_fw *uc_fw); -u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt); size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len); void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p); -- cgit v1.2.3 From 2a6fc3cb5cb68597f1072bfeef28d2ca02310220 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Fri, 5 Jul 2019 18:11:39 +0300 Subject: drm/tegra: Fix gpiod_get_from_of_node() regression That function now returns ERR_PTR instead of NULL if "hpd-gpio" is not present in device-tree. The offending patch missed to adapt the Tegra's DRM driver for the API change. Fixes: 025bf37725f1 ("gpio: Fix return value mismatch of function gpiod_get_from_of_node()") Signed-off-by: Dmitry Osipenko Acked-by: Jon Hunter Signed-off-by: Thierry Reding --- drivers/gpu/drm/tegra/output.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 274cb955e2e1..bdcaa4c7168c 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -126,8 +126,12 @@ int tegra_output_probe(struct tegra_output *output) "nvidia,hpd-gpio", 0, GPIOD_IN, "HDMI hotplug detect"); - if (IS_ERR(output->hpd_gpio)) - return PTR_ERR(output->hpd_gpio); + if (IS_ERR(output->hpd_gpio)) { + if (PTR_ERR(output->hpd_gpio) != -ENOENT) + return PTR_ERR(output->hpd_gpio); + + output->hpd_gpio = NULL; + } if (output->hpd_gpio) { err = gpiod_to_irq(output->hpd_gpio); -- cgit v1.2.3 From 02712bc3250849c1cf99d626aea98f610e695f34 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Jul 2019 08:52:53 +0200 Subject: mm/hmm: move hmm_vma_range_done and hmm_vma_fault to nouveau These two functions are marked as a legacy APIs to get rid of, but seem to suit the current nouveau flow. Move it to the only user in preparation for fixing a locking bug involving caller and callee. All comments referring to the old API have been removed as this now is a driver private helper. Link: https://lore.kernel.org/r/20190724065258.16603-3-hch@lst.de Tested-by: Ralph Campbell Signed-off-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/nouveau/nouveau_svm.c | 46 +++++++++++++++++++++++++++-- include/linux/hmm.h | 54 ----------------------------------- 2 files changed, 44 insertions(+), 56 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8c92374afcf2..6c1b04de0db8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -475,6 +475,48 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, fault->inst, fault->addr, fault->access); } +static inline bool +nouveau_range_done(struct hmm_range *range) +{ + bool ret = hmm_range_valid(range); + + hmm_range_unregister(range); + return ret; +} + +static int +nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range, + bool block) +{ + long ret; + + range->default_flags = 0; + range->pfn_flags_mask = -1UL; + + ret = hmm_range_register(range, mirror, + range->start, range->end, + PAGE_SHIFT); + if (ret) + return (int)ret; + + if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { + up_read(&range->vma->vm_mm->mmap_sem); + return -EAGAIN; + } + + ret = hmm_range_fault(range, block); + if (ret <= 0) { + if (ret == -EBUSY || !ret) { + up_read(&range->vma->vm_mm->mmap_sem); + ret = -EBUSY; + } else if (ret == -EAGAIN) + ret = -EBUSY; + hmm_range_unregister(range); + return ret; + } + return 0; +} + static int nouveau_svm_fault(struct nvif_notify *notify) { @@ -649,10 +691,10 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = hmm_vma_fault(&svmm->mirror, &range, true); + ret = nouveau_range_fault(&svmm->mirror, &range, true); if (ret == 0) { mutex_lock(&svmm->mutex); - if (!hmm_vma_range_done(&range)) { + if (!nouveau_range_done(&range)) { mutex_unlock(&svmm->mutex); goto again; } diff --git a/include/linux/hmm.h b/include/linux/hmm.h index b8a08b2a10ca..7ef56dc18050 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range, */ #define HMM_RANGE_DEFAULT_TIMEOUT 1000 -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline bool hmm_vma_range_done(struct hmm_range *range) -{ - bool ret = hmm_range_valid(range); - - hmm_range_unregister(range); - return ret; -} - -/* This is a temporary helper to avoid merge conflict between trees. */ -static inline int hmm_vma_fault(struct hmm_mirror *mirror, - struct hmm_range *range, bool block) -{ - long ret; - - /* - * With the old API the driver must set each individual entries with - * the requested flags (valid, write, ...). So here we set the mask to - * keep intact the entries provided by the driver and zero out the - * default_flags. - */ - range->default_flags = 0; - range->pfn_flags_mask = -1UL; - - ret = hmm_range_register(range, mirror, - range->start, range->end, - PAGE_SHIFT); - if (ret) - return (int)ret; - - if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { - /* - * The mmap_sem was taken by driver we release it here and - * returns -EAGAIN which correspond to mmap_sem have been - * drop in the old API. - */ - up_read(&range->vma->vm_mm->mmap_sem); - return -EAGAIN; - } - - ret = hmm_range_fault(range, block); - if (ret <= 0) { - if (ret == -EBUSY || !ret) { - /* Same as above, drop mmap_sem to match old API. */ - up_read(&range->vma->vm_mm->mmap_sem); - ret = -EBUSY; - } else if (ret == -EAGAIN) - ret = -EBUSY; - hmm_range_unregister(range); - return ret; - } - return 0; -} - /* Below are for HMM internal use only! Not to be used by device driver! */ static inline void hmm_mm_init(struct mm_struct *mm) { -- cgit v1.2.3 From 1b88b99bdff85e75df1cfaa418ed2ef8e1bffeb2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Jul 2019 08:52:56 +0200 Subject: nouveau: return -EBUSY when hmm_range_wait_until_valid fails -EAGAIN has a magic meaning for non-blocking faults, so don't overload it. Given that the caller doesn't check for specific error codes this change is purely cosmetic. Link: https://lore.kernel.org/r/20190724065258.16603-6-hch@lst.de Tested-by: Ralph Campbell Signed-off-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/nouveau/nouveau_svm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index a835cebb6d90..545100f7c594 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -502,7 +502,7 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { up_read(&range->vma->vm_mm->mmap_sem); - return -EAGAIN; + return -EBUSY; } ret = hmm_range_fault(range, true); -- cgit v1.2.3 From f32471e2cf87112b8f5dc10469b27c39c1a41722 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Jul 2019 08:52:57 +0200 Subject: mm/hmm: remove the legacy hmm_pfn_* APIs Switch the one remaining user in nouveau over to its replacement, and remove all the wrappers. Link: https://lore.kernel.org/r/20190724065258.16603-7-hch@lst.de Tested-by: Ralph Campbell Signed-off-by: Christoph Hellwig Reviewed-by: Ralph Campbell Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/nouveau/nouveau_dmem.c | 2 +- include/linux/hmm.h | 34 ---------------------------------- 2 files changed, 1 insertion(+), 35 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 1333220787a1..345c63cb752a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -845,7 +845,7 @@ nouveau_dmem_convert_pfn(struct nouveau_drm *drm, struct page *page; uint64_t addr; - page = hmm_pfn_to_page(range, range->pfns[i]); + page = hmm_device_entry_to_page(range, range->pfns[i]); if (page == NULL) continue; diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 7ef56dc18050..9f32586684c9 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -290,40 +290,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, range->flags[HMM_PFN_VALID]; } -/* - * Old API: - * hmm_pfn_to_page() - * hmm_pfn_to_pfn() - * hmm_pfn_from_page() - * hmm_pfn_from_pfn() - * - * This are the OLD API please use new API, it is here to avoid cross-tree - * merge painfullness ie we convert things to new API in stages. - */ -static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, - uint64_t pfn) -{ - return hmm_device_entry_to_page(range, pfn); -} - -static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, - uint64_t pfn) -{ - return hmm_device_entry_to_pfn(range, pfn); -} - -static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, - struct page *page) -{ - return hmm_device_entry_from_page(range, page); -} - -static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, - unsigned long pfn) -{ - return hmm_device_entry_from_pfn(range, pfn); -} - /* * Mirroring: how to synchronize device page table with CPU page table. * -- cgit v1.2.3 From 5fbcf5015db8e9f04a9da6d40322622fa229da54 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Jul 2019 08:52:54 +0200 Subject: nouveau: remove the block parameter to nouveau_range_fault The parameter is always false, so remove it as well as the -EAGAIN handling that can only happen for the non-blocking case. Link: https://lore.kernel.org/r/20190724065258.16603-4-hch@lst.de Tested-by: Ralph Campbell Signed-off-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/nouveau/nouveau_svm.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 6c1b04de0db8..e3097492b4ad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -485,8 +485,7 @@ nouveau_range_done(struct hmm_range *range) } static int -nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range, - bool block) +nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) { long ret; @@ -504,13 +503,12 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range, return -EAGAIN; } - ret = hmm_range_fault(range, block); + ret = hmm_range_fault(range, true); if (ret <= 0) { if (ret == -EBUSY || !ret) { up_read(&range->vma->vm_mm->mmap_sem); ret = -EBUSY; - } else if (ret == -EAGAIN) - ret = -EBUSY; + } hmm_range_unregister(range); return ret; } @@ -691,7 +689,7 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = nouveau_range_fault(&svmm->mirror, &range, true); + ret = nouveau_range_fault(&svmm->mirror, &range); if (ret == 0) { mutex_lock(&svmm->mutex); if (!nouveau_range_done(&range)) { -- cgit v1.2.3 From de4ee728465f7c0c29241550e083139b2ce9159c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Jul 2019 08:52:55 +0200 Subject: nouveau: unlock mmap_sem on all errors from nouveau_range_fault Currently nouveau_svm_fault expects nouveau_range_fault to never unlock mmap_sem, but the latter unlocks it for a random selection of error codes. Fix this up by always unlocking mmap_sem for non-zero return values in nouveau_range_fault, and only unlocking it in the caller for successful returns. Link: https://lore.kernel.org/r/20190724065258.16603-5-hch@lst.de Tested-by: Ralph Campbell Signed-off-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/nouveau/nouveau_svm.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index e3097492b4ad..a835cebb6d90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -495,8 +495,10 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) ret = hmm_range_register(range, mirror, range->start, range->end, PAGE_SHIFT); - if (ret) + if (ret) { + up_read(&range->vma->vm_mm->mmap_sem); return (int)ret; + } if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { up_read(&range->vma->vm_mm->mmap_sem); @@ -505,10 +507,9 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) ret = hmm_range_fault(range, true); if (ret <= 0) { - if (ret == -EBUSY || !ret) { - up_read(&range->vma->vm_mm->mmap_sem); + if (ret == 0) ret = -EBUSY; - } + up_read(&range->vma->vm_mm->mmap_sem); hmm_range_unregister(range); return ret; } @@ -706,8 +707,8 @@ again: NULL); svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); + up_read(&svmm->mm->mmap_sem); } - up_read(&svmm->mm->mmap_sem); /* Cancel any faults in the window whose pages didn't manage * to keep their valid bit, or stay writeable when required. -- cgit v1.2.3 From 199ddded380a99940b1f155e19cc885f57bfb852 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 25 Jul 2019 14:13:07 +0000 Subject: drm/i915/uc: Update drawing for firmware layout Sphinx was rendering firmware layout as html table, but since we want to add sizes relations switch to plain text graphics. v2: also update text and do it before move (Daniele) Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725141308.24660-2-michal.wajdeczko@intel.com --- Documentation/gpu/i915.rst | 12 +++++------ drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 31 +++++++++++++---------------- 2 files changed, 20 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index c2173d120492..3e233f9d675f 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -430,6 +430,12 @@ WOPCM Layout GuC === +Firmware Layout +------------------- + +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h + :doc: Firmware Layout + GuC-specific firmware loader ---------------------------- @@ -445,12 +451,6 @@ GuC-based command submission .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c :internal: -GuC Firmware Layout -------------------- - -.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h - :doc: GuC Firmware Layout - GuC Address Space ----------------- diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 30cca3a29323..108b386c52ec 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -122,23 +122,20 @@ #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ /** - * DOC: GuC Firmware Layout + * DOC: Firmware Layout * - * The GuC firmware layout looks like this: + * The GuC/HuC firmware layout looks like this:: * - * +-------------------------------+ - * | uc_css_header | - * | | - * | contains major/minor version | - * +-------------------------------+ - * | uCode | - * +-------------------------------+ - * | RSA signature | - * +-------------------------------+ - * | modulus key | - * +-------------------------------+ - * | exponent val | - * +-------------------------------+ + * +======================================================================+ + * | Firmware blob | + * +===============+===============+============+============+============+ + * | CSS header | uCode | RSA key | modulus | exponent | + * +===============+===============+============+============+============+ + * <-header size-> <---header size continued -----------> + * <--- size -----------------------------------------------------------> + * <-key size-> + * <-mod size-> + * <-exp size-> * * The firmware may or may not have modulus key and exponent data. The header, * uCode and RSA signature are must-have components that will be used by driver. @@ -155,8 +152,8 @@ * 4. Modulus and exponent key are not required by driver. They may not appear * in fw. So driver will load a truncated firmware in this case. * - * HuC firmware layout is same as GuC firmware. - * Only HuC version information is saved in a different way. + * The only difference between GuC and HuC firmwares is how the version + * information is saved. */ struct uc_css_header { -- cgit v1.2.3 From abf30f235390b8da1cd28b73e7ec3f0ff96ac450 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 25 Jul 2019 14:13:08 +0000 Subject: drm/i915/uc: Move uc firmware layout definitions to dedicated file Generic uc firmware layout definitions are unlikely to change and are separate to other GuC specific definitions. v2: reordered Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725141308.24660-3-michal.wajdeczko@intel.com --- Documentation/gpu/i915.rst | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h | 67 ----------------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h | 80 ++++++++++++++++++++++++++++ 4 files changed, 82 insertions(+), 68 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h (limited to 'drivers/gpu/drm') diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 3e233f9d675f..0e322688be5c 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -433,7 +433,7 @@ GuC Firmware Layout ------------------- -.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h :doc: Firmware Layout GuC-specific firmware loader diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 108b386c52ec..06a9bdfb0faf 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -121,73 +121,6 @@ #define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */ -/** - * DOC: Firmware Layout - * - * The GuC/HuC firmware layout looks like this:: - * - * +======================================================================+ - * | Firmware blob | - * +===============+===============+============+============+============+ - * | CSS header | uCode | RSA key | modulus | exponent | - * +===============+===============+============+============+============+ - * <-header size-> <---header size continued -----------> - * <--- size -----------------------------------------------------------> - * <-key size-> - * <-mod size-> - * <-exp size-> - * - * The firmware may or may not have modulus key and exponent data. The header, - * uCode and RSA signature are must-have components that will be used by driver. - * Length of each components, which is all in dwords, can be found in header. - * In the case that modulus and exponent are not present in fw, a.k.a truncated - * image, the length value still appears in header. - * - * Driver will do some basic fw size validation based on the following rules: - * - * 1. Header, uCode and RSA are must-have components. - * 2. All firmware components, if they present, are in the sequence illustrated - * in the layout table above. - * 3. Length info of each component can be found in header, in dwords. - * 4. Modulus and exponent key are not required by driver. They may not appear - * in fw. So driver will load a truncated firmware in this case. - * - * The only difference between GuC and HuC firmwares is how the version - * information is saved. - */ - -struct uc_css_header { - u32 module_type; - /* header_size includes all non-uCode bits, including css_header, rsa - * key, modulus key and exponent data. */ - u32 header_size_dw; - u32 header_version; - u32 module_id; - u32 module_vendor; - u32 date; -#define CSS_DATE_DAY (0xFF << 0) -#define CSS_DATE_MONTH (0xFF << 8) -#define CSS_DATE_YEAR (0xFFFF << 16) - u32 size_dw; /* uCode plus header_size_dw */ - u32 key_size_dw; - u32 modulus_size_dw; - u32 exponent_size_dw; - u32 time; -#define CSS_TIME_HOUR (0xFF << 0) -#define CSS_DATE_MIN (0xFF << 8) -#define CSS_DATE_SEC (0xFFFF << 16) - char username[8]; - char buildnumber[12]; - u32 sw_version; -#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) -#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) -#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) -#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) -#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) - u32 reserved[14]; - u32 header_info; -} __packed; - /* Work item for submitting workloads into work queue of GuC. */ struct guc_wq_item { u32 header; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 789b3d7228a4..168d368bcd3e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -27,6 +27,7 @@ #include #include "intel_uc_fw.h" +#include "intel_uc_fw_abi.h" #include "i915_drv.h" /* diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h new file mode 100644 index 000000000000..545e86c52a9e --- /dev/null +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef _INTEL_UC_FW_ABI_H +#define _INTEL_UC_FW_ABI_H + +#include + +/** + * DOC: Firmware Layout + * + * The GuC/HuC firmware layout looks like this:: + * + * +======================================================================+ + * | Firmware blob | + * +===============+===============+============+============+============+ + * | CSS header | uCode | RSA key | modulus | exponent | + * +===============+===============+============+============+============+ + * <-header size-> <---header size continued -----------> + * <--- size -----------------------------------------------------------> + * <-key size-> + * <-mod size-> + * <-exp size-> + * + * The firmware may or may not have modulus key and exponent data. The header, + * uCode and RSA signature are must-have components that will be used by driver. + * Length of each components, which is all in dwords, can be found in header. + * In the case that modulus and exponent are not present in fw, a.k.a truncated + * image, the length value still appears in header. + * + * Driver will do some basic fw size validation based on the following rules: + * + * 1. Header, uCode and RSA are must-have components. + * 2. All firmware components, if they present, are in the sequence illustrated + * in the layout table above. + * 3. Length info of each component can be found in header, in dwords. + * 4. Modulus and exponent key are not required by driver. They may not appear + * in fw. So driver will load a truncated firmware in this case. + * + * The only difference between GuC and HuC firmwares is how the version + * information is saved. + */ + +struct uc_css_header { + u32 module_type; + /* + * header_size includes all non-uCode bits, including css_header, rsa + * key, modulus key and exponent data. + */ + u32 header_size_dw; + u32 header_version; + u32 module_id; + u32 module_vendor; + u32 date; +#define CSS_DATE_DAY (0xFF << 0) +#define CSS_DATE_MONTH (0xFF << 8) +#define CSS_DATE_YEAR (0xFFFF << 16) + u32 size_dw; /* uCode plus header_size_dw */ + u32 key_size_dw; + u32 modulus_size_dw; + u32 exponent_size_dw; + u32 time; +#define CSS_TIME_HOUR (0xFF << 0) +#define CSS_DATE_MIN (0xFF << 8) +#define CSS_DATE_SEC (0xFFFF << 16) + char username[8]; + char buildnumber[12]; + u32 sw_version; +#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) +#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) +#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) + u32 reserved[14]; + u32 header_info; +} __packed; + +#endif /* _INTEL_UC_FW_ABI_H */ -- cgit v1.2.3 From 79c7a28e1f3a74b95ae2eae36ed0046fc8e6c7fd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 25 Jul 2019 23:38:43 +0100 Subject: drm/i915: Capture vma contents outside of spinlock Currently we use the engine->active.lock to ensure that the request is not retired as we capture the data. However, we only need to ensure that the vma are not removed prior to use acquiring their contents, and since we have already relinquished our stop-machine protection, we assume that the user will not be overwriting the contents before we are able to record them. In order to capture the vma outside of the spinlock, we acquire a reference and mark the vma as active to prevent it from being unbound. However, since it is tricky allocate an entry in the fence tree (doing so would require taking a mutex) while inside the engine spinlock, we use an atomic bit and special case the handling for i915_active_wait. The core benefit is that we can use some non-atomic methods for mapping the device pages, we can remove the slow compression phase out of atomic context (i.e. stop antagonising the nmi-watchdog), and no we longer need large reserves of atomic pages. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111215 Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190725223843.8971-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_active.c | 34 +++++++++- drivers/gpu/drm/i915/i915_active.h | 3 + drivers/gpu/drm/i915/i915_active_types.h | 3 + drivers/gpu/drm/i915/i915_gpu_error.c | 112 +++++++++++++++++++++---------- 4 files changed, 117 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 13f304a29fc8..22341c62c204 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -196,6 +196,7 @@ void __i915_active_init(struct drm_i915_private *i915, debug_active_init(ref); ref->i915 = i915; + ref->flags = 0; ref->active = active; ref->retire = retire; ref->tree = RB_ROOT; @@ -262,6 +263,34 @@ void i915_active_release(struct i915_active *ref) active_retire(ref); } +static void __active_ungrab(struct i915_active *ref) +{ + clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags); +} + +bool i915_active_trygrab(struct i915_active *ref) +{ + debug_active_assert(ref); + + if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)) + return false; + + if (!atomic_add_unless(&ref->count, 1, 0)) { + __active_ungrab(ref); + return false; + } + + return true; +} + +void i915_active_ungrab(struct i915_active *ref) +{ + GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)); + + active_retire(ref); + __active_ungrab(ref); +} + int i915_active_wait(struct i915_active *ref) { struct active_node *it, *n; @@ -270,7 +299,7 @@ int i915_active_wait(struct i915_active *ref) might_sleep(); might_lock(&ref->mutex); - if (RB_EMPTY_ROOT(&ref->tree)) + if (i915_active_is_idle(ref)) return 0; err = mutex_lock_interruptible(&ref->mutex); @@ -292,6 +321,9 @@ int i915_active_wait(struct i915_active *ref) if (err) return err; + if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE)) + return -EINTR; + if (!i915_active_is_idle(ref)) return -EBUSY; diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 134166d31251..ba68b077ec6c 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -395,6 +395,9 @@ int i915_active_acquire(struct i915_active *ref); void i915_active_release(struct i915_active *ref); void __i915_active_release_nested(struct i915_active *ref, int subclass); +bool i915_active_trygrab(struct i915_active *ref); +void i915_active_ungrab(struct i915_active *ref); + static inline bool i915_active_is_idle(const struct i915_active *ref) { diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index 5b0a3024ce24..74743dd0d5f0 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -36,6 +36,9 @@ struct i915_active { struct mutex mutex; atomic_t count; + unsigned long flags; +#define I915_ACTIVE_GRAB_BIT 0 + int (*active)(struct i915_active *ref); void (*retire)(struct i915_active *ref); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 56dfc2650836..674d341a23f6 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -298,7 +298,7 @@ static void *compress_next_page(struct compress *c, if (dst->page_count >= dst->num_pages) return ERR_PTR(-ENOSPC); - page = pool_alloc(&c->pool, ATOMIC_MAYFAIL); + page = pool_alloc(&c->pool, ALLOW_FAIL); if (!page) return ERR_PTR(-ENOMEM); @@ -327,8 +327,6 @@ static int compress_page(struct compress *c, if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK) return -EIO; - - touch_nmi_watchdog(); } while (zstream->avail_in); /* Fallback to uncompressed if we increase size? */ @@ -407,7 +405,7 @@ static int compress_page(struct compress *c, { void *ptr; - ptr = pool_alloc(&c->pool, ATOMIC_MAYFAIL); + ptr = pool_alloc(&c->pool, ALLOW_FAIL); if (!ptr) return -ENOMEM; @@ -1001,12 +999,14 @@ i915_error_object_create(struct drm_i915_private *i915, dma_addr_t dma; int ret; + might_sleep(); + if (!vma || !vma->pages) return NULL; num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ATOMIC_MAYFAIL); + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL); if (!dst) return NULL; @@ -1027,9 +1027,9 @@ i915_error_object_create(struct drm_i915_private *i915, ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); - s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); + s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); ret = compress_page(compress, (void __force *)s, dst); - io_mapping_unmap_atomic(s); + io_mapping_unmap(s); if (ret) break; } @@ -1302,10 +1302,42 @@ static void record_context(struct drm_i915_error_context *e, e->active = atomic_read(&ctx->active_count); } -static void +struct capture_vma { + struct capture_vma *next; + void **slot; +}; + +static struct capture_vma * +capture_vma(struct capture_vma *next, + struct i915_vma *vma, + struct drm_i915_error_object **out) +{ + struct capture_vma *c; + + *out = NULL; + if (!vma) + return next; + + c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL); + if (!c) + return next; + + if (!i915_active_trygrab(&vma->active)) { + kfree(c); + return next; + } + + c->slot = (void **)out; + *c->slot = i915_vma_get(vma); + + c->next = next; + return c; +} + +static struct capture_vma * request_record_user_bo(struct i915_request *request, struct drm_i915_error_engine *ee, - struct compress *compress) + struct capture_vma *capture) { struct i915_capture_list *c; struct drm_i915_error_object **bo; @@ -1315,7 +1347,7 @@ request_record_user_bo(struct i915_request *request, for (c = request->capture_list; c; c = c->next) max++; if (!max) - return; + return capture; bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); if (!bo) { @@ -1324,21 +1356,19 @@ request_record_user_bo(struct i915_request *request, bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL); } if (!bo) - return; + return capture; count = 0; for (c = request->capture_list; c; c = c->next) { - bo[count] = i915_error_object_create(request->i915, - c->vma, - compress); - if (!bo[count]) - break; + capture = capture_vma(capture, c->vma, &bo[count]); if (++count == max) break; } ee->user_bo = bo; ee->user_bo_count = count; + + return capture; } static struct drm_i915_error_object * @@ -1369,6 +1399,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress) for (i = 0; i < I915_NUM_ENGINES; i++) { struct intel_engine_cs *engine = i915->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; + struct capture_vma *capture = NULL; struct i915_request *request; unsigned long flags; @@ -1393,26 +1424,29 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress) record_context(&ee->context, ctx); - /* We need to copy these to an anonymous buffer + /* + * We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten * by userspace. */ - ee->batchbuffer = - i915_error_object_create(i915, - request->batch, - compress); + capture = capture_vma(capture, + request->batch, + &ee->batchbuffer); if (HAS_BROKEN_CS_TLB(i915)) - ee->wa_batchbuffer = - i915_error_object_create(i915, - engine->gt->scratch, - compress); - request_record_user_bo(request, ee, compress); + capture = capture_vma(capture, + engine->gt->scratch, + &ee->wa_batchbuffer); + + capture = request_record_user_bo(request, ee, capture); - ee->ctx = - i915_error_object_create(i915, - request->hw_context->state, - compress); + capture = capture_vma(capture, + request->hw_context->state, + &ee->ctx); + + capture = capture_vma(capture, + ring->vma, + &ee->ringbuffer); error->simulated |= i915_gem_context_no_error_capture(ctx); @@ -1423,15 +1457,25 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress) ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; - ee->ringbuffer = - i915_error_object_create(i915, - ring->vma, - compress); engine_record_requests(engine, request, ee); } spin_unlock_irqrestore(&engine->active.lock, flags); + while (capture) { + struct capture_vma *this = capture; + struct i915_vma *vma = *this->slot; + + *this->slot = + i915_error_object_create(i915, vma, compress); + + i915_active_ungrab(&vma->active); + i915_vma_put(vma); + + capture = this->next; + kfree(this); + } + ee->hws_page = i915_error_object_create(i915, engine->status_page.vma, -- cgit v1.2.3 From 97dee74bb34a1364c6b3f3f2e3f31165063d3c6e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 25 Jul 2019 21:03:14 +0000 Subject: drm/i915/uc: Reorder params in intel_uc_fw_fetch All intel_uc_fw_* functions are taking uc_fw as first param except intel_uc_fw_fetch() which is taking i915. Fix that. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725210314.21188-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 4 ++-- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 9 +++------ drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 4 ++-- 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index b1815abecf30..8205b3c81048 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -355,10 +355,10 @@ void intel_uc_fetch_firmwares(struct intel_uc *uc) if (!intel_uc_is_using_guc(uc)) return; - intel_uc_fw_fetch(i915, &uc->guc.fw); + intel_uc_fw_fetch(&uc->guc.fw, i915); if (intel_uc_is_using_huc(uc)) - intel_uc_fw_fetch(i915, &uc->huc.fw); + intel_uc_fw_fetch(&uc->huc.fw, i915); } void intel_uc_cleanup_firmwares(struct intel_uc *uc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 168d368bcd3e..5fbdd17a864b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -190,10 +190,8 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, * * Fetch uC firmware into GEM obj. */ -void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, - struct intel_uc_fw *uc_fw) +void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) { - struct pci_dev *pdev = dev_priv->drm.pdev; struct drm_i915_gem_object *obj; const struct firmware *fw = NULL; struct uc_css_header *css; @@ -202,7 +200,7 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, GEM_BUG_ON(!intel_uc_fw_supported(uc_fw)); - err = request_firmware(&fw, uc_fw->path, &pdev->dev); + err = request_firmware(&fw, uc_fw->path, i915->drm.dev); if (err) goto fail; @@ -295,8 +293,7 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv, goto fail; } - obj = i915_gem_object_create_shmem_from_data(dev_priv, - fw->data, fw->size); + obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size); if (IS_ERR(obj)) { err = PTR_ERR(obj); DRM_DEBUG_DRIVER("%s fw object_create err=%d\n", diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index ff684c0c808e..eddbb237fabe 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -169,8 +169,8 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type, struct drm_i915_private *i915); -void intel_uc_fw_fetch(struct drm_i915_private *i915, - struct intel_uc_fw *uc_fw); +void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, + struct drm_i915_private *i915); void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw); int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, u32 wopcm_offset, u32 dma_flags); -- cgit v1.2.3 From 737298d18836fd14b8820de6504536c998986bcd Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sun, 21 Jul 2019 16:41:37 -0500 Subject: drm/amdkfd: Fix missing break in switch statement Add missing break statement in order to prevent the code from falling through to case CHIP_NAVI10. This bug was found thanks to the ongoing efforts to enable -Wimplicit-fallthrough. Fixes: 14328aa58ce5 ("drm/amdkfd: Add navi10 support to amdkfd. (v3)") Cc: stable@vger.kernel.org Reviewed-by: Alex Deucher Signed-off-by: Gustavo A. R. Silva --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 792371442195..4e3fc284f6ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -668,6 +668,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, case CHIP_RAVEN: pcache_info = raven_cache_info; num_of_cache_types = ARRAY_SIZE(raven_cache_info); + break; case CHIP_NAVI10: pcache_info = navi10_cache_info; num_of_cache_types = ARRAY_SIZE(navi10_cache_info); -- cgit v1.2.3 From d64062b57eeb58d4928aed945515bf53f7944913 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Sun, 21 Jul 2019 17:37:33 -0500 Subject: drm/amdgpu/gfx10: Fix missing break in switch statement Add missing break statement in order to prevent the code from falling through to case AMDGPU_IRQ_STATE_ENABLE. This bug was found thanks to the ongoing efforts to enable -Wimplicit-fallthrough. Fixes: a644d85a5cd4 ("drm/amdgpu: add gfx v10 implementation (v10)") Cc: stable@vger.kernel.org Reviewed-by: Alex Deucher Signed-off-by: Gustavo A. R. Silva --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 1675d5837c3c..35e8e29139b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4611,6 +4611,7 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 0); WREG32(cp_int_cntl_reg, cp_int_cntl); + break; case AMDGPU_IRQ_STATE_ENABLE: cp_int_cntl = RREG32(cp_int_cntl_reg); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, -- cgit v1.2.3 From 12fce1ab4ad97773a19b7de4f5f4953cb74e9881 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 22 Jul 2019 11:26:31 -0500 Subject: drm/amdkfd/kfd_mqd_manager_v10: Avoid fall-through warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation to enabling -Wimplicit-fallthrough, this patch silences the following warning: drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_mqd_manager_v10.c: In function ‘mqd_manager_init_v10’: ./include/linux/dynamic_debug.h:122:52: warning: this statement may fall through [-Wimplicit-fallthrough=] #define __dynamic_func_call(id, fmt, func, ...) do { \ ^ ./include/linux/dynamic_debug.h:143:2: note: in expansion of macro ‘__dynamic_func_call’ __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__) ^~~~~~~~~~~~~~~~~~~ ./include/linux/dynamic_debug.h:153:2: note: in expansion of macro ‘_dynamic_func_call’ _dynamic_func_call(fmt, __dynamic_pr_debug, \ ^~~~~~~~~~~~~~~~~~ ./include/linux/printk.h:336:2: note: in expansion of macro ‘dynamic_pr_debug’ dynamic_pr_debug(fmt, ##__VA_ARGS__) ^~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_mqd_manager_v10.c:432:3: note: in expansion of macro ‘pr_debug’ pr_debug("%s@%i\n", __func__, __LINE__); ^~~~~~~~ drivers/gpu/drm/amd/amdgpu/../amdkfd/kfd_mqd_manager_v10.c:433:2: note: here case KFD_MQD_TYPE_COMPUTE: ^~~~ by removing the call to pr_debug() in KFD_MQD_TYPE_CP: "The mqd init for CP and COMPUTE will have the same routine." [1] This bug was found thanks to the ongoing efforts to enable -Wimplicit-fallthrough. [1] https://lore.kernel.org/lkml/c735a1cc-a545-50fb-44e7-c0ad93ee8ee7@amd.com/ Reviewed-by: Alex Deucher Signed-off-by: Gustavo A. R. Silva --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 4f8a6ffc5775..9cd3eb2d90bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -429,7 +429,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: - pr_debug("%s@%i\n", __func__, __LINE__); case KFD_MQD_TYPE_COMPUTE: pr_debug("%s@%i\n", __func__, __LINE__); mqd->allocate_mqd = allocate_mqd; -- cgit v1.2.3 From 9e87891799dc4b203ad680ff431bfcce679c89be Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 25 Jul 2019 19:13:51 -0500 Subject: drm/amd/display: Mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Warning level 3 was used: -Wimplicit-fallthrough=3 This patch is part of the ongoing efforts to enable -Wimplicit-fallthrough. Signed-off-by: Gustavo A. R. Silva --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 51a3dfe97f0e..31aa6ee5cd5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -102,14 +102,19 @@ void dccg2_init(struct dccg *dccg) switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { case 6: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); + /* Fall through */ case 5: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); + /* Fall through */ case 4: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); + /* Fall through */ case 3: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); + /* Fall through */ case 2: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); + /* Fall through */ case 1: REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); break; -- cgit v1.2.3 From 2defb94edb44784b0b5064633e05c97fdb1b0e0f Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 22 Jul 2019 13:03:46 -0500 Subject: drm/i915: Mark expected switch fall-throughs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. This patch fixes the following warnings: drivers/gpu/drm/i915/gem/i915_gem_mman.c: In function ‘i915_gem_fault’: drivers/gpu/drm/i915/gem/i915_gem_mman.c:342:6: warning: this statement may fall through [-Wimplicit-fallthrough=] if (!i915_terminally_wedged(i915)) ^ drivers/gpu/drm/i915/gem/i915_gem_mman.c:345:2: note: here case -EAGAIN: ^~~~ drivers/gpu/drm/i915/gem/i915_gem_pages.c: In function ‘i915_gem_object_map’: ./include/linux/compiler.h:78:22: warning: this statement may fall through [-Wimplicit-fallthrough=] # define unlikely(x) __builtin_expect(!!(x), 0) ^~~~~~~~~~~~~~~~~~~~~~~~~~ ./include/asm-generic/bug.h:136:2: note: in expansion of macro ‘unlikely’ unlikely(__ret_warn_on); \ ^~~~~~~~ drivers/gpu/drm/i915/i915_utils.h:49:25: note: in expansion of macro ‘WARN’ #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ ^~~~ drivers/gpu/drm/i915/gem/i915_gem_pages.c:270:3: note: in expansion of macro ‘MISSING_CASE’ MISSING_CASE(type); ^~~~~~~~~~~~ drivers/gpu/drm/i915/gem/i915_gem_pages.c:272:2: note: here case I915_MAP_WB: ^~~~ drivers/gpu/drm/i915/i915_gpu_error.c: In function ‘error_record_engine_registers’: ./include/linux/compiler.h:78:22: warning: this statement may fall through [-Wimplicit-fallthrough=] # define unlikely(x) __builtin_expect(!!(x), 0) ^~~~~~~~~~~~~~~~~~~~~~~~~~ ./include/asm-generic/bug.h:136:2: note: in expansion of macro ‘unlikely’ unlikely(__ret_warn_on); \ ^~~~~~~~ drivers/gpu/drm/i915/i915_utils.h:49:25: note: in expansion of macro ‘WARN’ #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ ^~~~ drivers/gpu/drm/i915/i915_gpu_error.c:1196:5: note: in expansion of macro ‘MISSING_CASE’ MISSING_CASE(engine->id); ^~~~~~~~~~~~ drivers/gpu/drm/i915/i915_gpu_error.c:1197:4: note: here case RCS0: ^~~~ drivers/gpu/drm/i915/display/intel_dp.c: In function ‘intel_dp_get_fia_supported_lane_count’: ./include/linux/compiler.h:78:22: warning: this statement may fall through [-Wimplicit-fallthrough=] # define unlikely(x) __builtin_expect(!!(x), 0) ^~~~~~~~~~~~~~~~~~~~~~~~~~ ./include/asm-generic/bug.h:136:2: note: in expansion of macro ‘unlikely’ unlikely(__ret_warn_on); \ ^~~~~~~~ drivers/gpu/drm/i915/i915_utils.h:49:25: note: in expansion of macro ‘WARN’ #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ ^~~~ drivers/gpu/drm/i915/display/intel_dp.c:233:3: note: in expansion of macro ‘MISSING_CASE’ MISSING_CASE(lane_info); ^~~~~~~~~~~~ drivers/gpu/drm/i915/display/intel_dp.c:234:2: note: here case 1: ^~~~ drivers/gpu/drm/i915/display/intel_display.c: In function ‘check_digital_port_conflicts’: CC [M] drivers/gpu/drm/nouveau/nvkm/engine/disp/cursgv100.o drivers/gpu/drm/i915/display/intel_display.c:12043:7: warning: this statement may fall through [-Wimplicit-fallthrough=] if (WARN_ON(!HAS_DDI(to_i915(dev)))) ^ drivers/gpu/drm/i915/display/intel_display.c:12046:3: note: here case INTEL_OUTPUT_DP: ^~~~ Also, notice that the Makefile is modified to stop ignoring fall-through warnings. The -Wimplicit-fallthrough option will be enabled globally in v5.3. Warning level 3 was used: -Wimplicit-fallthrough=3 This patch is part of the ongoing efforts to enable -Wimplicit-fallthrough. Reviewed-by: Kees Cook Signed-off-by: Gustavo A. R. Silva --- drivers/gpu/drm/i915/Makefile | 1 - drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/display/intel_dp.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 1 + 6 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 91355c2ea8a5..8cace65f50ce 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra subdir-ccflags-y += $(call cc-disable-warning, unused-parameter) subdir-ccflags-y += $(call cc-disable-warning, type-limits) subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) -subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) # clang warnings subdir-ccflags-y += $(call cc-disable-warning, sign-compare) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8592a7d422de..30b97ded6fdd 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -12042,7 +12042,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) case INTEL_OUTPUT_DDI: if (WARN_ON(!HAS_DDI(to_i915(dev)))) break; - /* else: fall through */ + /* else, fall through */ case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4336df46fe78..d0fc34826771 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -231,6 +231,7 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp) switch (lane_info) { default: MISSING_CASE(lane_info); + /* fall through */ case 1: case 2: case 4: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 391621ee3cbb..39a661927d8e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -341,7 +341,7 @@ err: */ if (!i915_terminally_wedged(i915)) return VM_FAULT_SIGBUS; - /* else: fall through */ + /* else, fall through */ case -EAGAIN: /* * EAGAIN means the gpu is hung and we'll wait for the error diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index b36ad269f4ea..65eb430cedba 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -268,7 +268,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, switch (type) { default: MISSING_CASE(type); - /* fallthrough to use PAGE_KERNEL anyway */ + /* fallthrough - to use PAGE_KERNEL anyway */ case I915_MAP_WB: pgprot = PAGE_KERNEL; break; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index b7e9fddef270..41a511d5267f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1194,6 +1194,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, switch (engine->id) { default: MISSING_CASE(engine->id); + /* fall through */ case RCS0: mmio = RENDER_HWS_PGA_GEN7; break; -- cgit v1.2.3 From edad25476ba353cfc0f3b34b150888b46f18e8a6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 25 Jul 2019 10:46:55 -0700 Subject: drm/i915/guc: init submission structures as part of guc_init guc->stage_desc_pool is required as part of the init parameters and there is no reason we have to init them after HuC. This fixes a NULL ptr dereference due to guc->stage_desc_pool not being set (no fixes tag since GuC submission can't be enabled yet). Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725174655.24382-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 15 +++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_uc.c | 16 ---------------- 2 files changed, 15 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 1ea6a9e50c02..13fbbffd05c7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -290,6 +290,16 @@ int intel_guc_init(struct intel_guc *guc) if (ret) goto err_ads; + if (intel_uc_is_using_guc_submission(>->uc)) { + /* + * This is stuff we need to have available at fw load time + * if we are planning to enable submission later + */ + ret = intel_guc_submission_init(guc); + if (ret) + goto err_ct; + } + /* now that everything is perma-pinned, initialize the parameters */ guc_init_params(guc); @@ -298,6 +308,8 @@ int intel_guc_init(struct intel_guc *guc) return 0; +err_ct: + intel_guc_ct_fini(&guc->ct); err_ads: intel_guc_ads_destroy(guc); err_log: @@ -317,6 +329,9 @@ void intel_guc_fini(struct intel_guc *guc) i915_ggtt_disable_guc(gt->ggtt); + if (intel_uc_is_using_guc_submission(>->uc)) + intel_guc_submission_fini(guc); + intel_guc_ct_fini(&guc->ct); intel_guc_ads_destroy(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 8205b3c81048..5b9b20d1cb6d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -397,21 +397,8 @@ int intel_uc_init(struct intel_uc *uc) goto err_guc; } - if (intel_uc_is_using_guc_submission(uc)) { - /* - * This is stuff we need to have available at fw load time - * if we are planning to enable submission later - */ - ret = intel_guc_submission_init(guc); - if (ret) - goto err_huc; - } - return 0; -err_huc: - if (intel_uc_is_using_huc(uc)) - intel_huc_fini(huc); err_guc: intel_guc_fini(guc); return ret; @@ -426,9 +413,6 @@ void intel_uc_fini(struct intel_uc *uc) GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - if (intel_uc_is_using_guc_submission(uc)) - intel_guc_submission_fini(guc); - if (intel_uc_is_using_huc(uc)) intel_huc_fini(&uc->huc); -- cgit v1.2.3 From 1f961807925032daa90267d8a23ff730e7ede07a Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 25 Jul 2019 17:56:44 -0700 Subject: mm/hmm: replace hmm_update with mmu_notifier_range The hmm_mirror_ops callback function sync_cpu_device_pagetables() passes a struct hmm_update which is a simplified version of struct mmu_notifier_range. This is unnecessary so replace hmm_update with mmu_notifier_range directly. Link: https://lore.kernel.org/r/20190726005650.2566-2-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Reviewed: Christoph Hellwig Reviewed-by: Jason Gunthorpe [jgg: white space tuning] Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 14 ++++++++------ drivers/gpu/drm/nouveau/nouveau_svm.c | 4 ++-- include/linux/hmm.h | 34 ++++++---------------------------- mm/hmm.c | 13 ++++--------- 4 files changed, 20 insertions(+), 45 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 3971c201f320..b698b423b25d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -195,13 +195,14 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ -static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, - const struct hmm_update *update) +static int +amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, + const struct mmu_notifier_range *update) { struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); unsigned long start = update->start; unsigned long end = update->end; - bool blockable = update->blockable; + bool blockable = mmu_notifier_range_blockable(update); struct interval_tree_node *it; /* notification is exclusive, but interval is inclusive */ @@ -243,13 +244,14 @@ static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, * necessitates evicting all user-mode queues of the process. The BOs * are restorted in amdgpu_mn_invalidate_range_end_hsa. */ -static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror, - const struct hmm_update *update) +static int +amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror, + const struct mmu_notifier_range *update) { struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); unsigned long start = update->start; unsigned long end = update->end; - bool blockable = update->blockable; + bool blockable = mmu_notifier_range_blockable(update); struct interval_tree_node *it; /* notification is exclusive, but interval is inclusive */ diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 545100f7c594..79b29c918717 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -252,13 +252,13 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) static int nouveau_svmm_sync_cpu_device_pagetables(struct hmm_mirror *mirror, - const struct hmm_update *update) + const struct mmu_notifier_range *update) { struct nouveau_svmm *svmm = container_of(mirror, typeof(*svmm), mirror); unsigned long start = update->start; unsigned long limit = update->end; - if (!update->blockable) + if (!mmu_notifier_range_blockable(update)) return -EAGAIN; SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit); diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 9f32586684c9..4f870f7017cb 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -340,29 +340,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, struct hmm_mirror; -/* - * enum hmm_update_event - type of update - * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) - */ -enum hmm_update_event { - HMM_UPDATE_INVALIDATE, -}; - -/* - * struct hmm_update - HMM update information for callback - * - * @start: virtual start address of the range to update - * @end: virtual end address of the range to update - * @event: event triggering the update (what is happening) - * @blockable: can the callback block/sleep ? - */ -struct hmm_update { - unsigned long start; - unsigned long end; - enum hmm_update_event event; - bool blockable; -}; - /* * struct hmm_mirror_ops - HMM mirror device operations callback * @@ -383,9 +360,9 @@ struct hmm_mirror_ops { /* sync_cpu_device_pagetables() - synchronize page tables * * @mirror: pointer to struct hmm_mirror - * @update: update information (see struct hmm_update) - * Return: -EAGAIN if update.blockable false and callback need to - * block, 0 otherwise. + * @update: update information (see struct mmu_notifier_range) + * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false + * and callback needs to block, 0 otherwise. * * This callback ultimately originates from mmu_notifiers when the CPU * page table is updated. The device driver must update its page table @@ -396,8 +373,9 @@ struct hmm_mirror_ops { * page tables are completely updated (TLBs flushed, etc); this is a * synchronous call. */ - int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, - const struct hmm_update *update); + int (*sync_cpu_device_pagetables)( + struct hmm_mirror *mirror, + const struct mmu_notifier_range *update); }; /* diff --git a/mm/hmm.c b/mm/hmm.c index 54b3a4162ae9..4040b4427635 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -165,7 +165,6 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn, { struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); struct hmm_mirror *mirror; - struct hmm_update update; struct hmm_range *range; unsigned long flags; int ret = 0; @@ -173,15 +172,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn, if (!kref_get_unless_zero(&hmm->kref)) return 0; - update.start = nrange->start; - update.end = nrange->end; - update.event = HMM_UPDATE_INVALIDATE; - update.blockable = mmu_notifier_range_blockable(nrange); - spin_lock_irqsave(&hmm->ranges_lock, flags); hmm->notifiers++; list_for_each_entry(range, &hmm->ranges, list) { - if (update.end < range->start || update.start >= range->end) + if (nrange->end < range->start || nrange->start >= range->end) continue; range->valid = false; @@ -198,9 +192,10 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn, list_for_each_entry(mirror, &hmm->mirrors, list) { int rc; - rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update); + rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); if (rc) { - if (WARN_ON(update.blockable || rc != -EAGAIN)) + if (WARN_ON(mmu_notifier_range_blockable(nrange) || + rc != -EAGAIN)) continue; ret = -EAGAIN; break; -- cgit v1.2.3 From 9a4903e49e495bfd2650862dfae4178bebe4db9c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 25 Jul 2019 17:56:46 -0700 Subject: mm/hmm: replace the block argument to hmm_range_fault with a flags value This allows easier expansion to other flags, and also makes the callers a little easier to read. Link: https://lore.kernel.org/r/20190726005650.2566-4-rcampbell@nvidia.com Signed-off-by: Christoph Hellwig Signed-off-by: Ralph Campbell Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/nouveau/nouveau_svm.c | 2 +- include/linux/hmm.h | 11 ++++- mm/hmm.c | 74 ++++++++++++++++----------------- 4 files changed, 48 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e51b48ac48eb..12a59ac83f72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -832,7 +832,7 @@ retry: down_read(&mm->mmap_sem); - r = hmm_range_fault(range, true); + r = hmm_range_fault(range, 0); if (unlikely(r < 0)) { if (likely(r == -EAGAIN)) { /* diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 79b29c918717..49b520c60fc5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -505,7 +505,7 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) return -EBUSY; } - ret = hmm_range_fault(range, true); + ret = hmm_range_fault(range, 0); if (ret <= 0) { if (ret == 0) ret = -EBUSY; diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 4f870f7017cb..89a141060e5b 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -407,12 +407,19 @@ int hmm_range_register(struct hmm_range *range, unsigned long end, unsigned page_shift); void hmm_range_unregister(struct hmm_range *range); + +/* + * Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case. + */ +#define HMM_FAULT_ALLOW_RETRY (1 << 0) + long hmm_range_snapshot(struct hmm_range *range); -long hmm_range_fault(struct hmm_range *range, bool block); +long hmm_range_fault(struct hmm_range *range, unsigned int flags); + long hmm_range_dma_map(struct hmm_range *range, struct device *device, dma_addr_t *daddrs, - bool block); + unsigned int flags); long hmm_range_dma_unmap(struct hmm_range *range, struct vm_area_struct *vma, struct device *device, diff --git a/mm/hmm.c b/mm/hmm.c index 362944b0fbca..84f2791d3510 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -281,7 +281,7 @@ struct hmm_vma_walk { struct dev_pagemap *pgmap; unsigned long last; bool fault; - bool block; + unsigned int flags; }; static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, @@ -293,8 +293,11 @@ static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, struct vm_area_struct *vma = walk->vma; vm_fault_t ret; - flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY; - flags |= write_fault ? FAULT_FLAG_WRITE : 0; + if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY) + flags |= FAULT_FLAG_ALLOW_RETRY; + if (write_fault) + flags |= FAULT_FLAG_WRITE; + ret = handle_mm_fault(vma, addr, flags); if (ret & VM_FAULT_RETRY) { /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */ @@ -1012,26 +1015,26 @@ long hmm_range_snapshot(struct hmm_range *range) } EXPORT_SYMBOL(hmm_range_snapshot); -/* - * hmm_range_fault() - try to fault some address in a virtual address range - * @range: range being faulted - * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) - * Return: number of valid pages in range->pfns[] (from range start - * address). This may be zero. If the return value is negative, - * then one of the following values may be returned: +/** + * hmm_range_fault - try to fault some address in a virtual address range + * @range: range being faulted + * @flags: HMM_FAULT_* flags * - * -EINVAL invalid arguments or mm or virtual address are in an - * invalid vma (for instance device file vma). - * -ENOMEM: Out of memory. - * -EPERM: Invalid permission (for instance asking for write and - * range is read only). - * -EAGAIN: If you need to retry and mmap_sem was drop. This can only - * happens if block argument is false. - * -EBUSY: If the the range is being invalidated and you should wait - * for invalidation to finish. - * -EFAULT: Invalid (ie either no valid vma or it is illegal to access - * that range), number of valid pages in range->pfns[] (from - * range start address). + * Return: the number of valid pages in range->pfns[] (from range start + * address), which may be zero. On error one of the following status codes + * can be returned: + * + * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma + * (e.g., device file vma). + * -ENOMEM: Out of memory. + * -EPERM: Invalid permission (e.g., asking for write and range is read + * only). + * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped. + * -EBUSY: The range has been invalidated and the caller needs to wait for + * the invalidation to finish. + * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access + * that range) number of valid pages in range->pfns[] (from + * range start address). * * This is similar to a regular CPU page fault except that it will not trigger * any memory migration if the memory being faulted is not accessible by CPUs @@ -1040,7 +1043,7 @@ EXPORT_SYMBOL(hmm_range_snapshot); * On error, for one virtual address in the range, the function will mark the * corresponding HMM pfn entry with an error flag. */ -long hmm_range_fault(struct hmm_range *range, bool block) +long hmm_range_fault(struct hmm_range *range, unsigned int flags) { const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; unsigned long start = range->start, end; @@ -1086,7 +1089,7 @@ long hmm_range_fault(struct hmm_range *range, bool block) hmm_vma_walk.pgmap = NULL; hmm_vma_walk.last = start; hmm_vma_walk.fault = true; - hmm_vma_walk.block = block; + hmm_vma_walk.flags = flags; hmm_vma_walk.range = range; mm_walk.private = &hmm_vma_walk; end = min(range->end, vma->vm_end); @@ -1125,25 +1128,22 @@ long hmm_range_fault(struct hmm_range *range, bool block) EXPORT_SYMBOL(hmm_range_fault); /** - * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one. - * @range: range being faulted - * @device: device against to dma map page to - * @daddrs: dma address of mapped pages - * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) - * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been - * drop and you need to try again, some other error value otherwise + * hmm_range_dma_map - hmm_range_fault() and dma map page all in one. + * @range: range being faulted + * @device: device to map page to + * @daddrs: array of dma addresses for the mapped pages + * @flags: HMM_FAULT_* * - * Note same usage pattern as hmm_range_fault(). + * Return: the number of pages mapped on success (including zero), or any + * status return from hmm_range_fault() otherwise. */ -long hmm_range_dma_map(struct hmm_range *range, - struct device *device, - dma_addr_t *daddrs, - bool block) +long hmm_range_dma_map(struct hmm_range *range, struct device *device, + dma_addr_t *daddrs, unsigned int flags) { unsigned long i, npages, mapped; long ret; - ret = hmm_range_fault(range, block); + ret = hmm_range_fault(range, flags); if (ret <= 0) return ret ? ret : -EBUSY; -- cgit v1.2.3 From 5cca503817d0e96f8d902c6598d5105dca5e718a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 26 Jul 2019 14:14:58 +0100 Subject: drm/i915/perf: Initialise err to 0 before looping over ce->engines Smatch warning that the loop may be empty causing us to check err before it had been set. Ensure that it is initialised to 0, just in case. v2: Refactor the inner loop for better scooping and clarity Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly") Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Cc: Tvrtko Ursulin Reviewed-by: Lionel Landwerlin Link: https://patchwork.freedesktop.org/patch/msgid/20190726131458.8310-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_perf.c | 61 +++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index ab82ccba896b..d8941a791cd6 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1792,6 +1792,38 @@ static int gen8_modify_self(struct intel_context *ce, return err; } +static int gen8_configure_context(struct i915_gem_context *ctx, + struct flex *flex, unsigned int count) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + int err = 0; + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + GEM_BUG_ON(ce == ce->engine->kernel_context); + + if (ce->engine->class != RENDER_CLASS) + continue; + + err = intel_context_lock_pinned(ce); + if (err) + break; + + flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); + + /* Otherwise OA settings will be set upon first use */ + if (intel_context_is_pinned(ce)) + err = gen8_modify_context(ce, flex, count); + + intel_context_unlock_pinned(ce); + if (err) + break; + } + i915_gem_context_unlock_engines(ctx); + + return err; +} + /* * Manages updating the per-context aspects of the OA stream * configuration across all contexts. @@ -1846,7 +1878,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915, struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; - int err; int i; for (i = 2; i < ARRAY_SIZE(regs); i++) @@ -1871,35 +1902,12 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915, * trapped behind the barrier. */ list_for_each_entry(ctx, &i915->contexts.list, link) { - struct i915_gem_engines_iter it; - struct intel_context *ce; + int err; if (ctx == i915->kernel_context) continue; - for_each_gem_engine(ce, - i915_gem_context_lock_engines(ctx), - it) { - GEM_BUG_ON(ce == ce->engine->kernel_context); - - if (ce->engine->class != RENDER_CLASS) - continue; - - err = intel_context_lock_pinned(ce); - if (err) - break; - - regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); - - /* Otherwise OA settings will be set upon first use */ - if (intel_context_is_pinned(ce)) - err = gen8_modify_context(ce, regs, ARRAY_SIZE(regs)); - - intel_context_unlock_pinned(ce); - if (err) - break; - } - i915_gem_context_unlock_engines(ctx); + err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs)); if (err) return err; } @@ -1911,6 +1919,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915, */ for_each_engine(engine, i915, id) { struct intel_context *ce = engine->kernel_context; + int err; if (engine->class != RENDER_CLASS) continue; -- cgit v1.2.3 From cc374377a19d2a49d693997b62dc3a6f5fac6d61 Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 25 Jul 2019 17:56:50 -0700 Subject: mm/hmm: remove hmm_range vma Since hmm_range_fault() doesn't use the struct hmm_range vma field, remove it. Link: https://lore.kernel.org/r/20190726005650.2566-8-rcampbell@nvidia.com Suggested-by: Jason Gunthorpe Signed-off-by: Ralph Campbell Reviewed-by: Christoph Hellwig Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/nouveau/nouveau_svm.c | 7 +++---- include/linux/hmm.h | 1 - mm/hmm.c | 1 - 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 49b520c60fc5..a74530b5a523 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -496,12 +496,12 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) range->start, range->end, PAGE_SHIFT); if (ret) { - up_read(&range->vma->vm_mm->mmap_sem); + up_read(&range->hmm->mm->mmap_sem); return (int)ret; } if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { - up_read(&range->vma->vm_mm->mmap_sem); + up_read(&range->hmm->mm->mmap_sem); return -EBUSY; } @@ -509,7 +509,7 @@ nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) if (ret <= 0) { if (ret == 0) ret = -EBUSY; - up_read(&range->vma->vm_mm->mmap_sem); + up_read(&range->hmm->mm->mmap_sem); hmm_range_unregister(range); return ret; } @@ -682,7 +682,6 @@ nouveau_svm_fault(struct nvif_notify *notify) args.i.p.addr + args.i.p.size, fn - fi); /* Have HMM fault pages within the fault window to the GPU. */ - range.vma = vma; range.start = args.i.p.addr; range.end = args.i.p.addr + args.i.p.size; range.pfns = args.phys; diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 90dc5944b1bc..82265118d94a 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -164,7 +164,6 @@ enum hmm_pfn_value_e { */ struct hmm_range { struct hmm *hmm; - struct vm_area_struct *vma; struct list_head list; unsigned long start; unsigned long end; diff --git a/mm/hmm.c b/mm/hmm.c index 6111c0a3c12d..9a908902e4cc 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -1002,7 +1002,6 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags) return -EPERM; } - range->vma = vma; hmm_vma_walk.pgmap = NULL; hmm_vma_walk.last = start; hmm_vma_walk.flags = flags; -- cgit v1.2.3 From 3f99a61441e5d44b6dc00461a74d759c78191682 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 25 Jul 2019 13:50:56 +0100 Subject: drm/i915: Do not rely on for loop caching the mask for_each_engine_masked caches the engine mask but what does the caller know. Cache it explicitly for clarity and while at it correct the type to match. Signed-off-by: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725125056.11942-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_active.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 22341c62c204..d32db8a4db5c 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -378,12 +378,12 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; + intel_engine_mask_t tmp, mask = engine->mask; struct llist_node *pos, *next; - unsigned long tmp; int err; - GEM_BUG_ON(!engine->mask); - for_each_engine_masked(engine, i915, engine->mask, tmp) { + GEM_BUG_ON(!mask); + for_each_engine_masked(engine, i915, mask, tmp) { struct intel_context *kctx = engine->kernel_context; struct active_node *node; -- cgit v1.2.3 From f91bf7382856eb9b35664f92aab4c49b0407ac02 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 25 Jul 2019 20:51:06 +0000 Subject: drm/i915/uc: Don't sanitize guc_log_level modparam We are already storing runtime value of log level in private field, so there is no need to modify modparam. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190725205106.36148-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 29 ++++++++++++++++- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 50 ------------------------------ 2 files changed, 28 insertions(+), 51 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 77fda1e85d3b..3460deca12c8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -443,6 +443,29 @@ static void guc_log_capture_logs(struct intel_guc_log *log) guc_action_flush_log_complete(guc); } +static u32 __get_default_log_level(struct intel_guc_log *log) +{ + /* A negative value means "use platform/config default" */ + if (i915_modparams.guc_log_level < 0) { + return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? + GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; + } + + if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { + DRM_WARN("Incompatible option detected: %s=%d, %s!\n", + "guc_log_level", i915_modparams.guc_log_level, + "verbosity too high"); + return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? + GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; + } + + GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED); + GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX); + return i915_modparams.guc_log_level; +} + int intel_guc_log_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); @@ -482,7 +505,11 @@ int intel_guc_log_create(struct intel_guc_log *log) log->vma = vma; - log->level = i915_modparams.guc_log_level; + log->level = __get_default_log_level(log); + DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", + log->level, enableddisabled(log->level), + yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), + GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); return 0; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 5b9b20d1cb6d..fafa9be1e12a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -74,23 +74,6 @@ static int __get_platform_enable_guc(struct intel_uc *uc) return enable_guc; } -static int __get_default_guc_log_level(struct intel_uc *uc) -{ - int guc_log_level; - - if (!intel_uc_fw_supported(&uc->guc.fw) || !intel_uc_is_using_guc(uc)) - guc_log_level = GUC_LOG_LEVEL_DISABLED; - else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || - IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - guc_log_level = GUC_LOG_LEVEL_MAX; - else - guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE; - - /* Any platform specific fine-tuning can be done here */ - - return guc_log_level; -} - /** * sanitize_options_early - sanitize uC related modparam options * @uc: the intel_uc structure @@ -100,13 +83,6 @@ static int __get_default_guc_log_level(struct intel_uc *uc) * modparam varies between platforms and it is hardcoded in driver code. * Any other modparam value is only monitored against availability of the * related hardware or firmware definitions. - * - * In case of "guc_log_level" option this function will attempt to modify - * it only if it was initially set to "auto(-1)" or if initial value was - * "enable(1..4)" on platforms without the GuC. Default value for this - * modparam varies between platforms and is usually set to "disable(0)" - * unless GuC is enabled on given platform and the driver is compiled with - * debug config when this modparam will default to "enable(1..4)". */ static void sanitize_options_early(struct intel_uc *uc) { @@ -149,34 +125,8 @@ static void sanitize_options_early(struct intel_uc *uc) i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; } - /* A negative value means "use platform/config default" */ - if (i915_modparams.guc_log_level < 0) - i915_modparams.guc_log_level = - __get_default_guc_log_level(uc); - - if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(uc)) { - DRM_WARN("Incompatible option detected: guc_log_level=%d, " - "but GuC is not enabled!\n", - i915_modparams.guc_log_level); - i915_modparams.guc_log_level = 0; - } - - if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) { - DRM_WARN("Incompatible option detected: %s=%d, %s!\n", - "guc_log_level", i915_modparams.guc_log_level, - "verbosity too high"); - i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX; - } - - DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n", - i915_modparams.guc_log_level, - yesno(i915_modparams.guc_log_level), - yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)), - GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level)); - /* Make sure that sanitization was done */ GEM_BUG_ON(i915_modparams.enable_guc < 0); - GEM_BUG_ON(i915_modparams.guc_log_level < 0); } void intel_uc_init_early(struct intel_uc *uc) -- cgit v1.2.3 From 340c4c8daad95cc1064d88901a03bcb68d0f0185 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 25 Jul 2019 14:14:46 +0100 Subject: drm/i915/gt: Add to timeline requires the timeline mutex Modifying a remote context requires careful serialisation with requests on that context, and that serialisation requires us to take their timeline->mutex. Make it so. Note that while struct_mutex rules, we can't create more than one request in parallel, but that age is soon coming to an end. v2: Though it doesn't affect the current users, contexts may share timelines so check if we already hold the right mutex. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190725131447.27515-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 9292b6ca5e9c..d64b45f7ec6d 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -254,10 +254,18 @@ int intel_context_prepare_remote_request(struct intel_context *ce, /* Only suitable for use in remotely modifying this context */ GEM_BUG_ON(rq->hw_context == ce); - /* Queue this switch after all other activity by this context. */ - err = i915_active_request_set(&tl->last_request, rq); - if (err) - return err; + if (rq->timeline != tl) { /* beware timeline sharing */ + err = mutex_lock_interruptible_nested(&tl->mutex, + SINGLE_DEPTH_NESTING); + if (err) + return err; + + /* Queue this switch after current activity by this context. */ + err = i915_active_request_set(&tl->last_request, rq); + if (err) + goto unlock; + } + lockdep_assert_held(&tl->mutex); /* * Guarantee context image and the timeline remains pinned until the @@ -267,7 +275,12 @@ int intel_context_prepare_remote_request(struct intel_context *ce, * words transfer the pinned ce object to tracked active request. */ GEM_BUG_ON(i915_active_is_idle(&ce->active)); - return i915_active_ref(&ce->active, rq->fence.context, rq); + err = i915_active_ref(&ce->active, rq->fence.context, rq); + +unlock: + if (rq->timeline != tl) + mutex_unlock(&tl->mutex); + return err; } struct i915_request *intel_context_create_request(struct intel_context *ce) -- cgit v1.2.3 From 3a8c63d28a12dec948439dcb1fd0e60066dc3897 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 26 Jul 2019 18:42:10 +0000 Subject: drm/i915/uc: Remove redundant header_offset/size definitions According to Firmware layout definition, CSS header is located in front of the firmware blob, so header offset is always 0. Similarly, size of the CSS header is constant and currently used version is exactly 128. While here, move type/status enums up and keep them together. v2: use sizeof consistently (Daniele), update commit message Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190726184212.1836-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 23 +++++++++-------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 9 ++++----- drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h | 2 ++ 3 files changed, 15 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 5fbdd17a864b..b526bab5b27a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -218,21 +218,18 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) css = (struct uc_css_header *)fw->data; - /* Firmware bits always start from header */ - uc_fw->header_offset = 0; - uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw - - css->key_size_dw - css->exponent_size_dw) * - sizeof(u32); - - if (uc_fw->header_size != sizeof(struct uc_css_header)) { + /* Check integrity of size values inside CSS header */ + size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw - + css->exponent_size_dw) * sizeof(u32); + if (size != sizeof(struct uc_css_header)) { DRM_WARN("%s: Mismatched firmware header definition\n", intel_uc_fw_type_repr(uc_fw->type)); err = -ENOEXEC; goto fail; } - /* then, uCode */ - uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size; + /* uCode size must calculated from other sizes */ + uc_fw->ucode_offset = sizeof(struct uc_css_header); uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); /* now RSA */ @@ -246,7 +243,7 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) uc_fw->rsa_size = css->key_size_dw * sizeof(u32); /* At least, it should have header, uCode and RSA. Size of all three. */ - size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size; + size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size; if (fw->size < size) { DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n", intel_uc_fw_type_repr(uc_fw->type), fw->size, size); @@ -371,7 +368,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Set the source address for the uCode */ - offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt) + uc_fw->header_offset; + offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt); GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000); intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset)); intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset)); @@ -385,7 +382,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, * via DMA, excluding any other components */ intel_uncore_write_fw(uncore, DMA_COPY_SIZE, - uc_fw->header_size + uc_fw->ucode_size); + sizeof(struct uc_css_header) + uc_fw->ucode_size); /* Start the DMA */ intel_uncore_write_fw(uncore, DMA_CTRL, @@ -539,8 +536,6 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, uc_fw->major_ver_found, uc_fw->minor_ver_found); - drm_printf(p, "\theader: offset %u, size %u\n", - uc_fw->header_offset, uc_fw->header_size); drm_printf(p, "\tuCode: offset %u, size %u\n", uc_fw->ucode_offset, uc_fw->ucode_size); drm_printf(p, "\tRSA: offset %u, size %u\n", diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index eddbb237fabe..a8048f91f0da 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -26,6 +26,7 @@ #define _INTEL_UC_FW_H_ #include +#include "intel_uc_fw_abi.h" #include "i915_gem.h" struct drm_printer; @@ -57,10 +58,11 @@ enum intel_uc_fw_type { * of fetching, caching, and loading the firmware image into the uC. */ struct intel_uc_fw { + enum intel_uc_fw_type type; + enum intel_uc_fw_status status; const char *path; size_t size; struct drm_i915_gem_object *obj; - enum intel_uc_fw_status status; /* * The firmware build process will generate a version header file with major and @@ -72,9 +74,6 @@ struct intel_uc_fw { u16 major_ver_found; u16 minor_ver_found; - enum intel_uc_fw_type type; - u32 header_size; - u32 header_offset; u32 rsa_size; u32 rsa_offset; u32 ucode_size; @@ -163,7 +162,7 @@ static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw) if (!intel_uc_fw_is_available(uc_fw)) return 0; - return uc_fw->header_size + uc_fw->ucode_size; + return sizeof(struct uc_css_header) + uc_fw->ucode_size; } void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h index 545e86c52a9e..ae58e8a8c53b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h @@ -7,6 +7,7 @@ #define _INTEL_UC_FW_ABI_H #include +#include /** * DOC: Firmware Layout @@ -76,5 +77,6 @@ struct uc_css_header { u32 reserved[14]; u32 header_info; } __packed; +static_assert(sizeof(struct uc_css_header) == 128); #endif /* _INTEL_UC_FW_ABI_H */ -- cgit v1.2.3 From 5de51fa0b9399e53a352437a9fdbd50a2e5912c5 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 26 Jul 2019 18:42:11 +0000 Subject: drm/i915/uc: Remove redundant ucode offset definition According to Firmware layout definition, uCode is located right after CSS header, so ucode offset is always same as header size. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190726184212.1836-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 6 ++---- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 1 - 2 files changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index b526bab5b27a..16ab9bc92919 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -229,7 +229,6 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) } /* uCode size must calculated from other sizes */ - uc_fw->ucode_offset = sizeof(struct uc_css_header); uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32); /* now RSA */ @@ -239,7 +238,7 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) err = -ENOEXEC; goto fail; } - uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size; + uc_fw->rsa_offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; uc_fw->rsa_size = css->key_size_dw * sizeof(u32); /* At least, it should have header, uCode and RSA. Size of all three. */ @@ -536,8 +535,7 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n", uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, uc_fw->major_ver_found, uc_fw->minor_ver_found); - drm_printf(p, "\tuCode: offset %u, size %u\n", - uc_fw->ucode_offset, uc_fw->ucode_size); + drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size); drm_printf(p, "\tRSA: offset %u, size %u\n", uc_fw->rsa_offset, uc_fw->rsa_size); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index a8048f91f0da..6a04bc6d419f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -77,7 +77,6 @@ struct intel_uc_fw { u32 rsa_size; u32 rsa_offset; u32 ucode_size; - u32 ucode_offset; }; static inline -- cgit v1.2.3 From 08f0e4a7ecb9e20f27b9d410bcba84e662fffe3b Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 26 Jul 2019 18:42:12 +0000 Subject: drm/i915/uc: Remove redundant RSA offset definition According to Firmware layout definition, RSA signature is located after CSS header and uCode so actual RSA offset in the blob can be easily calculated when needed (and we need it only once). Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190726184212.1836-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 8 +++----- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 1 - 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 16ab9bc92919..0bad9b858501 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -238,7 +238,6 @@ void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) err = -ENOEXEC; goto fail; } - uc_fw->rsa_offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; uc_fw->rsa_size = css->key_size_dw * sizeof(u32); /* At least, it should have header, uCode and RSA. Size of all three. */ @@ -512,11 +511,11 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len) { struct sg_table *pages = uc_fw->obj->mm.pages; u32 size = min_t(u32, uc_fw->rsa_size, max_len); + u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size; GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw)); - return sg_pcopy_to_buffer(pages->sgl, pages->nents, - dst, size, uc_fw->rsa_offset); + return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset); } /** @@ -536,6 +535,5 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p) uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted, uc_fw->major_ver_found, uc_fw->minor_ver_found); drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size); - drm_printf(p, "\tRSA: offset %u, size %u\n", - uc_fw->rsa_offset, uc_fw->rsa_size); + drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 6a04bc6d419f..c2ab2803715d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -75,7 +75,6 @@ struct intel_uc_fw { u16 minor_ver_found; u32 rsa_size; - u32 rsa_offset; u32 ucode_size; }; -- cgit v1.2.3 From 98a5c2a3582a86d5acdcdeabbe0e6278e712201e Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 12 Jul 2019 18:09:19 -0700 Subject: drm/i915/tgl: skip setting PORT_CL_DW12_* on initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to the spec when initializing the display in TGL we should not set PORT_CL_DW12 for the Aux channel of the combo PHYs. We will re-use the power well hooks from ICL so only set this register on gen < 12. v2: Generalize check for gen 12 (suggested by José) v3: Rebase after enum phy introduction Cc: Imre Deak Cc: Matt Roper Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Reviewed-by: Matt Atwood Link: https://patchwork.freedesktop.org/patch/msgid/20190713010940.17711-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 93a148684c53..dd2a50b8ba0a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -458,8 +458,10 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, val = I915_READ(regs->driver); I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx)); - val = I915_READ(ICL_PORT_CL_DW12(phy)); - I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); + if (INTEL_GEN(dev_priv) < 12) { + val = I915_READ(ICL_PORT_CL_DW12(phy)); + I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX); + } hsw_wait_for_power_well_enable(dev_priv, power_well); @@ -487,8 +489,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); u32 val; - val = I915_READ(ICL_PORT_CL_DW12(phy)); - I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); + if (INTEL_GEN(dev_priv) < 12) { + val = I915_READ(ICL_PORT_CL_DW12(phy)); + I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX); + } val = I915_READ(regs->driver); I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); -- cgit v1.2.3 From df16b6361d888409a9e2f9ae777c8d5dde2e76b8 Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Fri, 12 Jul 2019 18:09:20 -0700 Subject: drm/i915/tgl: select correct bit for port select MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bit definitions for port-select got changed for TRANS_CLK_SEL & TRANS_DDI_FUNC_CTL registers in TGL. v2 (Lucas): - Nuke TRANS_DDI_PORT_NONE since it's 0: we are already clearing {TGL_,}TRANS_DDI_PORT_MASK (suggested by Ville) - Also cover haswell_get_ddi_port_state() in intel_display.c that was missing - Define macros using the _SHIFT macros so we don't lose other users Cc: Ville Syrjälä Signed-off-by: Mahesh Kumar Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Reviewed-by: Matt Atwood Link: https://patchwork.freedesktop.org/patch/msgid/20190713010940.17711-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 47 ++++++++++++++++++++++------ drivers/gpu/drm/i915/display/intel_display.c | 6 +++- drivers/gpu/drm/i915/i915_reg.h | 11 +++++-- 3 files changed, 50 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 18bc0f2690c9..c6f38c7b397d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1773,7 +1773,10 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */ temp = TRANS_DDI_FUNC_ENABLE; - temp |= TRANS_DDI_SELECT_PORT(port); + if (INTEL_GEN(dev_priv) >= 12) + temp |= TGL_TRANS_DDI_SELECT_PORT(port); + else + temp |= TRANS_DDI_SELECT_PORT(port); switch (crtc_state->pipe_bpp) { case 18: @@ -1853,8 +1856,13 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); u32 val = I915_READ(reg); - val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC); - val |= TRANS_DDI_PORT_NONE; + if (INTEL_GEN(dev_priv) >= 12) { + val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK | + TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + } else { + val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | + TRANS_DDI_DP_VC_PAYLOAD_ALLOC); + } I915_WRITE(reg, val); if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME && @@ -2006,10 +2014,19 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, mst_pipe_mask = 0; for_each_pipe(dev_priv, p) { enum transcoder cpu_transcoder = (enum transcoder)p; + unsigned int port_mask, ddi_select; + + if (INTEL_GEN(dev_priv) >= 12) { + port_mask = TGL_TRANS_DDI_PORT_MASK; + ddi_select = TGL_TRANS_DDI_SELECT_PORT(port); + } else { + port_mask = TRANS_DDI_PORT_MASK; + ddi_select = TRANS_DDI_SELECT_PORT(port); + } tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port)) + if ((tmp & port_mask) != ddi_select) continue; if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == @@ -2126,9 +2143,14 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state) enum port port = encoder->port; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_PORT(port)); + if (cpu_transcoder != TRANSCODER_EDP) { + if (INTEL_GEN(dev_priv) >= 12) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TGL_TRANS_CLK_SEL_PORT(port)); + else + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_PORT(port)); + } } void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) @@ -2136,9 +2158,14 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - if (cpu_transcoder != TRANSCODER_EDP) - I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_DISABLED); + if (cpu_transcoder != TRANSCODER_EDP) { + if (INTEL_GEN(dev_priv) >= 12) + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TGL_TRANS_CLK_SEL_DISABLED); + else + I915_WRITE(TRANS_CLK_SEL(cpu_transcoder), + TRANS_CLK_SEL_DISABLED); + } } static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e25b82d07d4f..51e4f6798a6b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10353,7 +10353,11 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); - port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; + if (INTEL_GEN(dev_priv) >= 12) + port = (tmp & TGL_TRANS_DDI_PORT_MASK) >> + TGL_TRANS_DDI_PORT_SHIFT; + else + port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; if (INTEL_GEN(dev_priv) >= 11) icelake_get_ddi_pll(dev_priv, port, pipe_config); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 24f2a52a2b42..3035a48a2527 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9384,10 +9384,12 @@ enum skl_power_gate { #define TRANS_DDI_FUNC_ENABLE (1 << 31) /* Those bits are ignored by pipe EDP since it can only connect to DDI A */ -#define TRANS_DDI_PORT_MASK (7 << 28) #define TRANS_DDI_PORT_SHIFT 28 -#define TRANS_DDI_SELECT_PORT(x) ((x) << 28) -#define TRANS_DDI_PORT_NONE (0 << 28) +#define TGL_TRANS_DDI_PORT_SHIFT 27 +#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT) +#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT) +#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT) +#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT) #define TRANS_DDI_MODE_SELECT_MASK (7 << 24) #define TRANS_DDI_MODE_SELECT_HDMI (0 << 24) #define TRANS_DDI_MODE_SELECT_DVI (1 << 24) @@ -9597,6 +9599,9 @@ enum skl_power_gate { /* For each transcoder, we need to select the corresponding port clock */ #define TRANS_CLK_SEL_DISABLED (0x0 << 29) #define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29) +#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28) +#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28) + #define CDCLK_FREQ _MMIO(0x46200) -- cgit v1.2.3 From aaf70b90a4f196b1abd67f97dace64a60308234e Mon Sep 17 00:00:00 2001 From: Mahesh Kumar Date: Fri, 12 Jul 2019 18:09:21 -0700 Subject: drm/i915/tgl: update ddi/tc clock_off bits In GEN 12 PORT_C DDI clk_off bit is not equally distanced to A/B, it's at offset 24. Similarly TC port (5/6) clk off bits are at offset 22/23. Extend the macros to cover the additional ports. Cc: Matt Roper Signed-off-by: Mahesh Kumar Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Reviewed-by: Matt Atwood Link: https://patchwork.freedesktop.org/patch/msgid/20190713010940.17711-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3035a48a2527..d2b76121d863 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9742,8 +9742,9 @@ enum skl_power_gate { #define ICL_DPCLKA_CFGCR0 _MMIO(0x164280) #define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24)) -#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \ - 21 : (tc_port) + 12)) +#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \ + (tc_port) + 12 : \ + (tc_port) - PORT_TC4 + 21)) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) #define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)) -- cgit v1.2.3 From 62336cc666b0e0f1f2bebd682c1741b9a439095a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 27 Jul 2019 10:54:41 +0100 Subject: drm/i915/uc: Fixup kerneldoc after params were flipped and renamed drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c:194: warning: Function parameter or member 'i915' not described in 'intel_uc_fw_fetch' drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c:194: warning: Excess function parameter 'dev_priv' description in 'intel_uc_fw_fetch' Fixes: 97dee74bb34a ("drm/i915/uc: Reorder params in intel_uc_fw_fetch") Signed-off-by: Chris Wilson Cc: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190727101055.5300-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 0bad9b858501..0f9badf44837 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -185,8 +185,8 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, /** * intel_uc_fw_fetch - fetch uC firmware * - * @dev_priv: device private * @uc_fw: uC firmware + * @i915: device private * * Fetch uC firmware into GEM obj. */ -- cgit v1.2.3 From 76c5399fe2e4d7a5013280629de05c8ab6301311 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 29 Jul 2019 09:59:44 +0100 Subject: drm/i915/selftests: Careful not to flush hang_fini on error setups Smatch spotted that we test at the start of hang_fini for a valid (h->gt is only set after a request is created) but then used it regardless later on. v2: Alternatively, we do not need to check as we now always prime h->gt in hang_init() References: cb823ed9915b ("drm/i915/gt: Use intel_gt as the primary object for handling resets") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190729085944.2179-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index e2fa38a1ff0f..4484b4447db1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -272,9 +272,7 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq) static void hang_fini(struct hang *h) { *h->batch = MI_BATCH_BUFFER_END; - - if (h->gt) - intel_gt_chipset_flush(h->gt); + intel_gt_chipset_flush(h->gt); i915_gem_object_unpin_map(h->obj); i915_gem_object_put(h->obj); -- cgit v1.2.3 From ac65bdfef14a902b40ff69a35f5c604dba096547 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 19 Jun 2019 18:01:35 +0100 Subject: drm/i915: Keep rings pinned while the context is active Remember to keep the rings pinned as well as the context image until the GPU is no longer active. v2: Introduce a ring->pin_count primarily to hide the mock_ring that doesn't fit into the normal GGTT vma picture. v3: Order is important in teardown, ringbuffer submission needs to drop the pin count on the engine->kernel_context before it can gleefully free its ring. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110946 Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190619170135.15281-1-chris@chris-wilson.co.uk (cherry picked from commit 09c5ab384f6fb30f834a5777888b4486dd7f015d) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_context.c | 27 ++++++++++++++++-------- drivers/gpu/drm/i915/gt/intel_engine_types.h | 12 +++++++++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 10 ++------- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 31 ++++++++++++++++++---------- drivers/gpu/drm/i915/gt/mock_engine.c | 1 + 5 files changed, 53 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 2c454f227c2e..23120901c55f 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active) if (ce->state) __context_unpin_state(ce->state); + intel_ring_unpin(ce->ring); intel_context_put(ce); } @@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags) intel_context_get(ce); + err = intel_ring_pin(ce->ring); + if (err) + goto err_put; + if (!ce->state) return 0; err = __context_pin_state(ce->state, flags); - if (err) { - i915_active_cancel(&ce->active); - intel_context_put(ce); - return err; - } + if (err) + goto err_ring; /* Preallocate tracking nodes */ if (!i915_gem_context_is_kernel(ce->gem_context)) { err = i915_active_acquire_preallocate_barrier(&ce->active, ce->engine); - if (err) { - i915_active_release(&ce->active); - return err; - } + if (err) + goto err_state; } return 0; + +err_state: + __context_unpin_state(ce->state); +err_ring: + intel_ring_unpin(ce->ring); +err_put: + intel_context_put(ce); + i915_active_cancel(&ce->active); + return err; } void intel_context_active_release(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 868b220214f8..43e975a26016 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -70,6 +70,18 @@ struct intel_ring { struct list_head request_list; struct list_head active_link; + /* + * As we have two types of rings, one global to the engine used + * by ringbuffer submission and those that are exclusive to a + * context used by execlists, we have to play safe and allow + * atomic updates to the pin_count. However, the actual pinning + * of the context is either done during initialisation for + * ringbuffer submission or serialised as part of the context + * pinning for execlists, and so we do not need a mutex ourselves + * to serialise intel_ring_pin/intel_ring_unpin. + */ + atomic_t pin_count; + u32 head; u32 tail; u32 emit; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index b42b5f158295..82b7ace62d97 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref) { struct intel_context *ce = container_of(kref, typeof(*ce), ref); + GEM_BUG_ON(!i915_active_is_idle(&ce->active)); GEM_BUG_ON(intel_context_is_pinned(ce)); if (ce->state) @@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce) { i915_gem_context_unpin_hw_id(ce->gem_context); i915_gem_object_unpin_map(ce->state->obj); - intel_ring_unpin(ce->ring); } static void @@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce, goto unpin_active; } - ret = intel_ring_pin(ce->ring); - if (ret) - goto unpin_map; - ret = i915_gem_context_pin_hw_id(ce->gem_context); if (ret) - goto unpin_ring; + goto unpin_map; ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; @@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce, return 0; -unpin_ring: - intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_active: diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index c6023bc9452d..12010e798868 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq, int intel_ring_pin(struct intel_ring *ring) { struct i915_vma *vma = ring->vma; - enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); unsigned int flags; void *addr; int ret; - GEM_BUG_ON(ring->vaddr); + if (atomic_fetch_inc(&ring->pin_count)) + return 0; ret = i915_timeline_pin(ring->timeline); if (ret) - return ret; + goto err_unpin; flags = PIN_GLOBAL; @@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring) ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) - goto unpin_timeline; + goto err_timeline; if (i915_vma_is_map_and_fenceable(vma)) addr = (void __force *)i915_vma_pin_iomap(vma); else - addr = i915_gem_object_pin_map(vma->obj, map); + addr = i915_gem_object_pin_map(vma->obj, + i915_coherent_map_type(vma->vm->i915)); if (IS_ERR(addr)) { ret = PTR_ERR(addr); - goto unpin_ring; + goto err_ring; } vma->obj->pin_global++; + GEM_BUG_ON(ring->vaddr); ring->vaddr = addr; + return 0; -unpin_ring: +err_ring: i915_vma_unpin(vma); -unpin_timeline: +err_timeline: i915_timeline_unpin(ring->timeline); +err_unpin: + atomic_dec(&ring->pin_count); return ret; } @@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail) void intel_ring_unpin(struct intel_ring *ring) { - GEM_BUG_ON(!ring->vma); - GEM_BUG_ON(!ring->vaddr); + if (!atomic_dec_and_test(&ring->pin_count)) + return; /* Discard any unused bytes beyond that submitted to hw. */ intel_ring_reset(ring, ring->tail); + GEM_BUG_ON(!ring->vma); if (i915_vma_is_map_and_fenceable(ring->vma)) i915_vma_unpin_iomap(ring->vma); else i915_gem_object_unpin_map(ring->vma->obj); + + GEM_BUG_ON(!ring->vaddr); ring->vaddr = NULL; ring->vma->obj->pin_global--; @@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine) WARN_ON(INTEL_GEN(dev_priv) > 2 && (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + intel_engine_cleanup_common(engine); + intel_ring_unpin(engine->buffer); intel_ring_put(engine->buffer); - intel_engine_cleanup_common(engine); kfree(engine); } diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 086801b51441..486c6953dcb1 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->base.effective_size = sz; ring->base.vaddr = (void *)(ring + 1); ring->base.timeline = &ring->timeline; + atomic_set(&ring->base.pin_count, 1); INIT_LIST_HEAD(&ring->base.request_list); intel_ring_update_space(&ring->base); -- cgit v1.2.3 From 248f883db61283b4f5a1c92a5e27277377b09f16 Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Tue, 25 Jun 2019 10:06:55 +0100 Subject: drm/i915: Disable SAMPLER_STATE prefetching on all Gen11 steppings. The Demand Prefetch workaround (binding table prefetching) only applies to Icelake A0/B0. But the Sampler Prefetch workaround needs to be applied to all Gen11 steppings, according to a programming note in the SARCHKMD documentation. Using the Intel Gallium driver, I have seen intermittent failures in the dEQP-GLES31.functional.copy_image.non_compressed.* tests. After applying this workaround, the tests reliably pass. v2: Remove the overlap with a pre-production w/a BSpec: 9663 Signed-off-by: Kenneth Graunke Signed-off-by: Chris Wilson Cc: stable@vger.kernel.org Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190625090655.19220-1-chris@chris-wilson.co.uk (cherry picked from commit f9a393875d3af13cc3267477746608dadb7f17c1) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 15e90fd2cfdc..50c0060509a6 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1258,8 +1258,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) wa_write_or(wal, GEN7_SARCHKMD, - GEN7_DISABLE_DEMAND_PREFETCH | - GEN7_DISABLE_SAMPLER_PREFETCH); + GEN7_DISABLE_DEMAND_PREFETCH); + + /* Wa_1606682166:icl */ + wa_write_or(wal, + GEN7_SARCHKMD, + GEN7_DISABLE_SAMPLER_PREFETCH); } if (IS_GEN_RANGE(i915, 9, 11)) { -- cgit v1.2.3 From 95eef14cdad150fed43147bcd4f29eea3d0a3f03 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Mon, 10 Jun 2019 11:19:14 +0300 Subject: drm/i915/perf: fix ICL perf register offsets We got the wrong offsets (could they have changed?). New values were computed off an error state by looking up the register offset in the context image as written by the HW. Signed-off-by: Lionel Landwerlin Fixes: 1de401c08fa805 ("drm/i915/perf: enable perf support on ICL") Cc: # v4.18+ Acked-by: Kenneth Graunke Link: https://patchwork.freedesktop.org/patch/msgid/20190610081914.25428-1-lionel.g.landwerlin@intel.com (cherry picked from commit 8dcfdfb4501012a8d36d2157dc73925715f2befb) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_perf.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index a700c5c3d167..1ae06a1b6749 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -3477,9 +3477,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set; dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set; - dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; - dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; - + if (IS_GEN(dev_priv, 10)) { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de; + } else { + dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124; + dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e; + } dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16); } } -- cgit v1.2.3 From 7d3cd66261665da491d0ee582beabe23df60f983 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jun 2019 20:08:39 +0300 Subject: drm/i915: Fix various tracepoints for gen2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gen2 doesn't have a frame counter and apparently we no longer provide a fake .get_vblank_counter() hook for it. That means all tracepoints calling that hook will oops. Update the tracepoints to use intel_crtc_get_vblank_counter() which will gracefully fall back to using the software counter. This is actually a better approach since we now get (hopefully accurate) frame numbers in the traces. This also gets rid of the raw driver->get_vblank_counter() calls, which we need to do in order to switch to the per-crtc vblank vfuncs. v2: Deal with new tracepoints v3: Use a distinct variable name for the internal crtc iterator (Chris) Cc: Shawn Guo Cc: Daniel Vetter Fixes: 967dd4841787 ("drm: remove drm_vblank_no_hw_counter assignment from driver code") Signed-off-by: Ville Syrjälä Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190619170842.20579-2-ville.syrjala@linux.intel.com (cherry picked from commit 4c888e7bd26f58deb27c2e6ddc90000b89ee9393) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 4 +- drivers/gpu/drm/i915/i915_trace.h | 76 ++++++++++++---------------- 2 files changed, 35 insertions(+), 45 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 30b97ded6fdd..592b92782fab 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) /* FIXME: assert CPU port conditions for SNB+ */ } - trace_intel_pipe_enable(dev_priv, pipe); + trace_intel_pipe_enable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); @@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - trace_intel_pipe_disable(dev_priv, pipe); + trace_intel_pipe_disable(crtc); reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f4ce643b3bc3..cce426b23a24 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -21,24 +21,22 @@ /* watermark/fifo updates */ TRACE_EVENT(intel_pipe_enable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) __array(u32, scanline, 3) __field(enum pipe, pipe) ), - TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable, ); TRACE_EVENT(intel_pipe_disable, - TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), - TP_ARGS(dev_priv, pipe), + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), TP_STRUCT__entry( __array(u32, frame, 3) @@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable, ), TP_fast_assign( - enum pipe _pipe; - for_each_pipe(dev_priv, _pipe) { - __entry->frame[_pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); - __entry->scanline[_pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc *it__; + for_each_intel_crtc(&dev_priv->drm, it__) { + __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__); + __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__); } - __entry->pipe = pipe; + __entry->pipe = crtc->pipe; ), TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", @@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); ), @@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun, ), TP_fast_assign( + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pipe %c, frame=%u, scanline=%u", @@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun, TP_fast_assign( enum pipe pipe = pch_transcoder; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); __entry->pipe = pipe; - __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); ), TP_printk("pch transcoder %c, frame=%u, scanline=%u", @@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr, ), TP_fast_assign( - enum pipe pipe; - for_each_pipe(dev_priv, pipe) { - __entry->frame[pipe] = - dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe); - __entry->scanline[pipe] = - intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe)); + struct intel_crtc *crtc; + for_each_intel_crtc(&dev_priv->drm, crtc) { + __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc); + __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc); } __entry->old = old; __entry->new = new; @@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY]; __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0]; @@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->level = wm->level; __entry->cxsr = wm->cxsr; @@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->sprite0_start = sprite0_start; __entry->sprite1_start = sprite1_start; @@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); @@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->name = plane->name; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); ), @@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start, TP_fast_assign( __entry->pipe = crtc->pipe; - __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + __entry->frame = intel_crtc_get_vblank_counter(crtc); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; __entry->max = crtc->debug.max_vbl; -- cgit v1.2.3 From c270cac40828eca4fb8d7c27cab1d0ac7765ff3d Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Sat, 29 Jun 2019 14:13:50 +0100 Subject: drm/i915: fix whitelist selftests with readonly registers When a register is readonly there is not much we can tell about its value (apart from its default value?). This can be covered by tests exercising the value of the register from userspace. For PS_INVOCATION_COUNT we've got the following piglit tests : KHR-GL45.pipeline_statistics_query_tests_ARB.functional_fragment_shader_invocations Vulkan CTS tests : dEQP-VK.query_pool.statistics_query.fragment_shader_invocations.* v2: Use a local to shrink under 80cols. Signed-off-by: Lionel Landwerlin Fixes: 86554f48e511 ("drm/i915/selftests: Verify whitelist of context registers") Tested-by: Anuj Phogat Signed-off-by: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190629131350.31185-1-chris@chris-wilson.co.uk (cherry picked from commit 361b69051326ed0e07553315227678d00d651a9e) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 9eaf030affd0..44becd9538be 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine, err = 0; for (i = 0; i < engine->whitelist.count; i++) { - if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg)) + const struct i915_wa *wa = &engine->whitelist.list[i]; + + if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD) + continue; + + if (!fn(engine, a[i], b[i], wa->reg)) err = -EINVAL; } -- cgit v1.2.3 From 6ce5bfe936ac31d5c52c4b1328d0bfda5f97e7ca Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Fri, 28 Jun 2019 15:07:19 +0300 Subject: drm/i915: whitelist PS_(DEPTH|INVOCATION)_COUNT CFL:C0+ changed the status of those registers which are now blacklisted by default. This is breaking a number of CTS tests on GL & Vulkan : KHR-GL45.pipeline_statistics_query_tests_ARB.functional_fragment_shader_invocations (GL) dEQP-VK.query_pool.statistics_query.fragment_shader_invocations.* (Vulkan) v2: Only use one whitelist entry (Lionel) Bspec: 14091 Signed-off-by: Lionel Landwerlin Cc: stable@vger.kernel.org # 6883eab27481: drm/i915: Support flags in whitlist WAs Cc: stable@vger.kernel.org Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190628120720.21682-3-lionel.g.landwerlin@intel.com (cherry picked from commit 2c903da50f5a9522b134e488bd0f92646c46f3c0) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 50c0060509a6..b26c3549429e 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine) static void cfl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + if (engine->class != RENDER_CLASS) return; - gen9_whitelist_build(&engine->whitelist); + gen9_whitelist_build(w); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); } static void cnl_whitelist_build(struct intel_engine_cs *engine) -- cgit v1.2.3 From cf8f9aa1eda7d916bd23f6b8c226404deb11690c Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Fri, 28 Jun 2019 15:07:20 +0300 Subject: drm/i915/icl: whitelist PS_(DEPTH|INVOCATION)_COUNT The same tests failing on CFL+ platforms are also failing on ICL. Documentation doesn't list the WaAllowPMDepthAndInvocationCountAccessFromUMD workaround for ICL but applying it fixes the same tests as CFL. v2: Use only one whitelist entry (Lionel) Signed-off-by: Lionel Landwerlin Tested-by: Anuj Phogat Cc: stable@vger.kernel.org # 6883eab27481: drm/i915: Support flags in whitlist WAs Cc: stable@vger.kernel.org Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190628120720.21682-4-lionel.g.landwerlin@intel.com (cherry picked from commit 3fe0107e45ab396342497e06b8924cdd485cde3b) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index b26c3549429e..98dfb086320f 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1144,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* WaEnableStateCacheRedirectToCS:icl */ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); + + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl + * + * This covers 4 register which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); break; case VIDEO_DECODE_CLASS: -- cgit v1.2.3 From fdcc789a4a0bb2ef01857095752be12b03cbb341 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Mon, 1 Jul 2019 13:44:42 +0300 Subject: drm/i915: Fix memleak in runtime wakeref tracking If we untrack wakerefs, the actual count may reach zero. However the krealloced owners array is still there and needs to be taken care of. Free the owners unconditionally to fix the leak. Fixes: bd780f37a361 ("drm/i915: Track all held rpm wakerefs") Reported-by: Juha-Pekka Heikkila Cc: Juha-Pekka Heikkila Cc: Chris Wilson Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190701104442.9319-1-mika.kuoppala@linux.intel.com (cherry picked from commit c5f846eed2a1856b78e988eeef08215c70598ecd) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_runtime_pm.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 502c54428570..8d1aebc3e857 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug, static void dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug) { - struct drm_printer p; + if (debug->count) { + struct drm_printer p = drm_debug_printer("i915"); - if (!debug->count) - return; - - p = drm_debug_printer("i915"); - __print_intel_runtime_pm_wakeref(&p, debug); + __print_intel_runtime_pm_wakeref(&p, debug); + } kfree(debug->owners); } -- cgit v1.2.3 From d1b739f326b960631827f0ea350002c5bc8df443 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 6 Jun 2019 15:42:10 +0300 Subject: drm/i915: Deal with machines that expose less than three QGV points MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When SAGV is forced to disabled/min/med/max in the BIOS pcode will only hand us a single QGV point instead of the normal three. Fix the code to deal with that instead declaring the bandwidth limit to be 0 MB/s (and thus preventing any planes from being enabled). Also shrink the max_bw sturct a bit while at it, and change the deratedbw type to unsigned since the code returns the bw as an unsigned int. Since we now keep track of how many qgv points we got from pcode we can drop the earlier check added for the "pcode doesn't support the memory subsystem query" case. Cc: felix.j.degrood@intel.com Cc: Mark Janes Cc: Matt Roper Cc: Clint Taylor Fixes: c457d9cf256e ("drm/i915: Make sure we have enough memory bandwidth on ICL") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110838 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190606124210.3482-1-ville.syrjala@linux.intel.com Reviewed-by: Matt Roper (cherry picked from commit 56e9371bc3f3e7d6c1a197a45d550b2ce6af25f6) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_bw.c | 15 ++++++++++----- drivers/gpu/drm/i915/i915_drv.h | 5 +++-- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 753ac3165061..7b908e10d32e 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) clpchgroup = (sa->deburst * deinterleave / num_channels) << i; bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + bi->num_qgv_points = qi.num_points; + for (j = 0; j < qi.num_points; j++) { const struct intel_qgv_point *sp = &qi.points[j]; int ct, bw; @@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv) bi->deratedbw[j] = min(maxdebw, bw * 9 / 10); /* 90% */ - DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n", + DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n", i, j, bi->num_planes, bi->deratedbw[j]); } @@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, { int i; - /* Did we initialize the bw limits successfully? */ - if (dev_priv->max_bw[0].num_planes == 0) - return UINT_MAX; - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { const struct intel_bw_info *bi = &dev_priv->max_bw[i]; + /* + * Pcode will not expose all QGV points when + * SAGV is forced to off/min/med/max. + */ + if (qgv_point >= bi->num_qgv_points) + return UINT_MAX; + if (num_planes >= bi->num_planes) return bi->deratedbw[qgv_point]; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bc909ec5d9c3..fe7a6ec2c199 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1674,8 +1674,9 @@ struct drm_i915_private { } dram_info; struct intel_bw_info { - int num_planes; - int deratedbw[3]; + unsigned int deratedbw[3]; /* for each QGV point */ + u8 num_qgv_points; + u8 num_planes; } max_bw[6]; struct drm_private_obj bw_obj; -- cgit v1.2.3 From f691eaa4801484fffc8a2bcb24caa27fb2edcce3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 18:19:12 +0100 Subject: drm/i915/gtt: Defer the free for alloc error paths If we hit an error while allocating the page tables, we have to unwind the incomplete updates, and wish to free the unused pd. However, we are not allowed to be hoding the spinlock at that point, and so must use the later free to defer it until after we drop the lock. <3> [414.363795] BUG: sleeping function called from invalid context at drivers/gpu/drm/i915/i915_gem_gtt.c:472 <3> [414.364167] in_atomic(): 1, irqs_disabled(): 0, pid: 3905, name: i915_selftest <4> [414.364406] 3 locks held by i915_selftest/3905: <4> [414.364408] #0: 0000000034fe8aa8 (&dev->mutex){....}, at: device_driver_attach+0x18/0x50 <4> [414.364415] #1: 000000006bd8a560 (&dev->struct_mutex){+.+.}, at: igt_ctx_exec+0xb7/0x410 [i915] <4> [414.364476] #2: 000000003dfdc766 (&(&pd->lock)->rlock){+.+.}, at: gen8_ppgtt_alloc_pdp+0x448/0x540 [i915] <3> [414.364529] Preemption disabled at: <4> [414.364530] [<0000000000000000>] 0x0 <4> [414.364696] CPU: 0 PID: 3905 Comm: i915_selftest Tainted: G U 5.2.0-rc7-CI-CI_DRM_6403+ #1 <4> [414.364698] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.1-0-g8891697-prebuilt.qemu-project.org 04/01/2014 <4> [414.364699] Call Trace: <4> [414.364704] dump_stack+0x67/0x9b <4> [414.364708] ___might_sleep+0x167/0x250 <4> [414.364777] vm_free_page+0x24/0xc0 [i915] <4> [414.364852] free_pd+0xf/0x20 [i915] <4> [414.364897] gen8_ppgtt_alloc_pdp+0x489/0x540 [i915] <4> [414.364946] gen8_ppgtt_alloc_4lvl+0x8e/0x2e0 [i915] <4> [414.364992] ppgtt_bind_vma+0x2e/0x60 [i915] <4> [414.365039] i915_vma_bind+0xe8/0x2c0 [i915] <4> [414.365088] __i915_vma_do_pin+0xa1/0xd20 [i915] Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111050 Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190703171913.16585-3-chris@chris-wilson.co.uk (cherry picked from commit 068610895ebd4bd86f496f01eb7b97e56d7269b2) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 8ab820145ea6..50fe72d40d8b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1446,7 +1446,8 @@ unwind_pd: gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); GEM_BUG_ON(!atomic_read(&pdp->used)); atomic_dec(&pdp->used); - free_pd(vm, pd); + GEM_BUG_ON(alloc); + alloc = pd; /* defer the free to after the lock */ } spin_unlock(&pdp->lock); unwind: @@ -1515,7 +1516,8 @@ unwind_pdp: spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); - free_pd(vm, pdp); + GEM_BUG_ON(alloc); + alloc = pdp; /* defer the free until after the lock */ } spin_unlock(&pml4->lock); unwind: -- cgit v1.2.3 From 5f4c82c89ff0e11b31561aa7e547acb10bf650c2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 4 Jul 2019 21:16:56 +0100 Subject: drm/i915/gtt: Mark the freed page table entries with scratch On unwinding the allocation error path and having freed the page table entry, it is imperative that we mark it as scratch. <4> [416.075569] general protection fault: 0000 [#1] PREEMPT SMP PTI <4> [416.075801] CPU: 0 PID: 2385 Comm: kworker/u2:11 Tainted: G U 5.2.0-rc7-CI-Patchwork_13534+ #1 <4> [416.076162] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.1-0-g8891697-prebuilt.qemu-project.org 04/01/2014 <4> [416.076522] Workqueue: i915 __i915_vm_release [i915] <4> [416.076754] RIP: 0010:gen8_ppgtt_cleanup_3lvl+0x58/0xb0 [i915] <4> [416.077023] Code: 81 e2 04 fe ff ff 81 c2 ff 01 00 00 4c 8d 74 d6 58 4d 8b 65 00 4d 3b a7 28 02 00 00 74 40 49 8d 5c 24 50 49 81 c4 50 10 00 00 <48> 8b 2b 49 3b af 20 02 00 00 74 13 4c 89 ff 48 89 ee e8 01 fb ff <4> [416.077445] RSP: 0018:ffffc9000046bd98 EFLAGS: 00010206 <4> [416.077625] RAX: 0001000000000000 RBX: 6b6b6b6b6b6b6bbb RCX: 8b4b56d500000000 <4> [416.077838] RDX: 00000000000001ff RSI: ffff88805a578008 RDI: ffff88805bd0efc8 <4> [416.078167] RBP: ffff88805bd0efc8 R08: 0000000004e42b93 R09: 0000000000000001 <4> [416.078381] R10: 0000000000000000 R11: ffff888077a1b0b8 R12: 6b6b6b6b6b6b7bbb <4> [416.078594] R13: ffff88805a578058 R14: ffff88805a579058 R15: ffff88805bd0efc8 <4> [416.078815] FS: 0000000000000000(0000) GS:ffff88807da00000(0000) knlGS:0000000000000000 <4> [416.079395] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [416.079851] CR2: 000056160fec2b14 CR3: 0000000071bbc003 CR4: 00000000003606f0 <4> [416.080388] Call Trace: <4> [416.080828] gen8_ppgtt_cleanup+0x64/0x100 [i915] <4> [416.081399] __i915_vm_release+0xfc/0x1d0 [i915] Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Mika Kuoppala Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190704201656.15775-1-chris@chris-wilson.co.uk (cherry picked from commit e7539b79f703a6b533385088fc15cb5c9ab3f56f) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 50fe72d40d8b..7015a97b1097 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1444,6 +1444,7 @@ unwind_pd: spin_lock(&pdp->lock); if (atomic_dec_and_test(&pd->used)) { gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe); + pdp->entry[pdpe] = vm->scratch_pd; GEM_BUG_ON(!atomic_read(&pdp->used)); atomic_dec(&pdp->used); GEM_BUG_ON(alloc); @@ -1516,6 +1517,7 @@ unwind_pdp: spin_lock(&pml4->lock); if (atomic_dec_and_test(&pdp->used)) { gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); + pml4->entry[pml4e] = vm->scratch_pdp; GEM_BUG_ON(alloc); alloc = pdp; /* defer the free until after the lock */ } -- cgit v1.2.3 From aa56a292ce623734ddd30f52d73f527d1f3529b5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Jul 2019 15:03:27 +0100 Subject: drm/i915/userptr: Acquire the page lock around set_page_dirty() set_page_dirty says: For pages with a mapping this should be done under the page lock for the benefit of asynchronous memory errors who prefer a consistent dirty state. This rule can be broken in some special cases, but should be better not to. Under those rules, it is only safe for us to use the plain set_page_dirty calls for shmemfs/anonymous memory. Userptr may be used with real mappings and so needs to use the locked version (set_page_dirty_lock). Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203317 Fixes: 5cc9ed4b9a7a ("drm/i915: Introduce mapping of user pages into video memory (userptr) ioctl") References: 6dcc693bc57f ("ext4: warn when page is dirtied without buffers") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: stable@vger.kernel.org Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190708140327.26825-1-chris@chris-wilson.co.uk (cherry picked from commit cb6d7c7dc7ff8cace666ddec66334117a6068ce2) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 528b61678334..2caa594322bc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, for_each_sgt_page(page, sgt_iter, pages) { if (obj->mm.dirty) - set_page_dirty(page); + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + */ + set_page_dirty_lock(page); mark_page_accessed(page); put_page(page); -- cgit v1.2.3 From 06c12ae3b401238477e65e8c4e04e065699a6115 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Tue, 9 Jul 2019 15:33:39 +0300 Subject: drm/i915/perf: ensure we keep a reference on the driver The i915 perf stream has its own file descriptor and is tied to reference of the driver. We haven't taken care of keep the driver alive. Signed-off-by: Lionel Landwerlin Suggested-by: Chris Wilson Fixes: eec688e1420da5 ("drm/i915: Add i915 perf infrastructure") Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190709123351.5645-2-lionel.g.landwerlin@intel.com (cherry picked from commit a5af1df716c123a09341351008fc497bea137b77) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_perf.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 1ae06a1b6749..629511ea9a18 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2515,6 +2515,9 @@ static int i915_perf_release(struct inode *inode, struct file *file) i915_perf_destroy_locked(stream); mutex_unlock(&dev_priv->perf.lock); + /* Release the reference the perf stream kept on the driver. */ + drm_dev_put(&dev_priv->drm); + return 0; } @@ -2650,6 +2653,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, if (!(param->flags & I915_PERF_FLAG_DISABLED)) i915_perf_enable_locked(stream); + /* Take a reference on the driver that will be kept with stream_fd + * until its release. + */ + drm_dev_get(&dev_priv->drm); + return stream_fd; err_open: -- cgit v1.2.3 From 8f48de49795ca52f70c96558ccc6a0c174504779 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Wed, 10 Jul 2019 11:55:24 +0100 Subject: drm/i915/perf: add missing delay for OA muxes configuration This was dropped from the original patch series, we weren't sure whether it was needed at the time. More recent tests show it's definitely needed to have acurate performance data. Signed-off-by: Lionel Landwerlin Fixes: 19f81df2859eb1 ("drm/i915/perf: Add OA unit support for Gen 8+") Acked-by: Chris Wilson [ickle: combine duplicate code and comments] Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190710105524.23017-1-chris@chris-wilson.co.uk (cherry picked from commit 14bfcd3e0daeb0f757a02aac85fd03e0933ab37e) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_perf.c | 49 +++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 629511ea9a18..5140017f9a39 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv, } } -static int hsw_enable_metric_set(struct i915_perf_stream *stream) +static void delay_after_mux(void) { - struct drm_i915_private *dev_priv = stream->dev_priv; - const struct i915_oa_config *oa_config = stream->oa_config; - - /* PRM: - * - * OA unit is using “crclk” for its functionality. When trunk - * level clock gating takes place, OA clock would be gated, - * unable to count the events from non-render clock domain. - * Render clock gating must be disabled when OA is enabled to - * count the events from non-render domain. Unit level clock - * gating for RCS should also be disabled. - */ - I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & - ~GEN7_DOP_CLOCK_GATE_ENABLE)); - I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | - GEN6_CSUNIT_CLOCK_GATE_DISABLE)); - - config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); - - /* It apparently takes a fairly long time for a new MUX + /* + * It apparently takes a fairly long time for a new MUX * configuration to be be applied after these register writes. * This delay duration was derived empirically based on the * render_basic config but hopefully it covers the maximum @@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream) * a delay at this location would mitigate any invalid reports. */ usleep_range(15000, 20000); +} + +static int hsw_enable_metric_set(struct i915_perf_stream *stream) +{ + struct drm_i915_private *dev_priv = stream->dev_priv; + const struct i915_oa_config *oa_config = stream->oa_config; + + /* + * PRM: + * + * OA unit is using “crclk” for its functionality. When trunk + * level clock gating takes place, OA clock would be gated, + * unable to count the events from non-render clock domain. + * Render clock gating must be disabled when OA is enabled to + * count the events from non-render domain. Unit level clock + * gating for RCS should also be disabled. + */ + I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & + ~GEN7_DOP_CLOCK_GATE_ENABLE)); + I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | + GEN6_CSUNIT_CLOCK_GATE_DISABLE)); + + config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); @@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) return ret; config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); + delay_after_mux(); config_oa_regs(dev_priv, oa_config->b_counter_regs, oa_config->b_counter_regs_len); -- cgit v1.2.3 From 982b1d002f16c2695871e005c4132060c836db56 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 15 Jul 2019 09:09:28 +0100 Subject: drm/i915: Lock the engine while dumping the active request We cannot let the request be retired and freed while we are trying to dump it during error capture. It is not sufficient just to grab a reference to the request, as during retirement we may free the ring which we are also dumping. So take the engine lock to prevent retiring and freeing of the request. Reported-by: Alex Shumsky Fixes: 83c317832eb1 ("drm/i915: Dump the ringbuffer of the active request for debugging") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Joonas Lahtinen Cc: Alex Shumsky Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190715080946.15593-6-chris@chris-wilson.co.uk (cherry picked from commit cfe7288c276e359eebf057699fe86c2f8af14224) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 11 ++++------- drivers/gpu/drm/i915/i915_gpu_error.c | 6 ++++-- 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7fd33e81c2d9..aa5a1f11a91b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1471,6 +1471,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct i915_gpu_error * const error = &engine->i915->gpu_error; struct i915_request *rq; intel_wakeref_t wakeref; + unsigned long flags; if (header) { va_list ap; @@ -1490,10 +1491,9 @@ void intel_engine_dump(struct intel_engine_cs *engine, i915_reset_engine_count(error, engine), i915_reset_count(error)); - rcu_read_lock(); - drm_printf(m, "\tRequests:\n"); + spin_lock_irqsave(&engine->active.lock, flags); rq = intel_engine_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); @@ -1513,8 +1513,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, print_request_ring(m, rq); } - - rcu_read_unlock(); + spin_unlock_irqrestore(&engine->active.lock, flags); wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); if (wakeref) { @@ -1672,7 +1671,6 @@ struct i915_request * intel_engine_find_active_request(struct intel_engine_cs *engine) { struct i915_request *request, *active = NULL; - unsigned long flags; /* * We are called by the error capture, reset and to dump engine @@ -1685,7 +1683,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) * At all other times, we must assume the GPU is still running, but * we only care about the snapshot of this moment. */ - spin_lock_irqsave(&engine->active.lock, flags); + lockdep_assert_held(&engine->active.lock); list_for_each_entry(request, &engine->active.requests, sched.link) { if (i915_request_completed(request)) continue; @@ -1700,7 +1698,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine) active = request; break; } - spin_unlock_irqrestore(&engine->active.lock, flags); return active; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 41a511d5267f..8bc76fcff70d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1418,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error) struct intel_engine_cs *engine = i915->engine[i]; struct drm_i915_error_engine *ee = &error->engine[i]; struct i915_request *request; + unsigned long flags; ee->engine_id = -1; @@ -1429,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error) error_record_engine_registers(error, engine, ee); error_record_engine_execlists(engine, ee); + spin_lock_irqsave(&engine->active.lock, flags); request = intel_engine_find_active_request(engine); if (request) { struct i915_gem_context *ctx = request->gem_context; - struct intel_ring *ring; + struct intel_ring *ring = request->ring; ee->vm = ctx->vm ?: &ggtt->vm; @@ -1462,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error) ee->rq_post = request->postfix; ee->rq_tail = request->tail; - ring = request->ring; ee->cpu_ring_head = ring->head; ee->cpu_ring_tail = ring->tail; ee->ringbuffer = @@ -1470,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error) engine_record_requests(engine, request, ee); } + spin_unlock_irqrestore(&engine->active.lock, flags); ee->hws_page = i915_error_object_create(i915, -- cgit v1.2.3 From a8f196a0fa6391a436f63f360a1fb57031fdf26c Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 17 Jul 2019 14:45:36 +0300 Subject: drm/i915: Make sure cdclk is high enough for DP audio on VLV/CHV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On VLV/CHV there is some kind of linkage between the cdclk frequency and the DP link frequency. The spec says: "For DP audio configuration, cdclk frequency shall be set to meet the following requirements: DP Link Frequency(MHz) | Cdclk frequency(MHz) 270 | 320 or higher 162 | 200 or higher" I suspect that would more accurately be expressed as "cdclk >= DP link clock", and in any case we can express it like that in the code because of the limited set of cdclk (200, 266, 320, 400 MHz) and link frequencies (162 and 270 MHz) we support. Without this we can end up in a situation where the cdclk is too low and enabling DP audio will kill the pipe. Happens eg. with 2560x1440 modes where the 266MHz cdclk is sufficient to pump the pixels (241.5 MHz dotclock) but is too low for the DP audio due to the link frequency being 270 MHz. v2: Spell out the cdclk and link frequencies we actually support Cc: stable@vger.kernel.org Tested-by: Stefan Gottwald Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111149 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190717114536.22937-1-ville.syrjala@linux.intel.com Acked-by: Chris Wilson (cherry picked from commit bffb31f73b29a60ef693842d8744950c2819851d) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_cdclk.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 8993ab283562..0d19bbd08122 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2239,6 +2239,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); + /* + * "For DP audio configuration, cdclk frequency shall be set to + * meet the following requirements: + * DP Link Frequency(MHz) | Cdclk frequency(MHz) + * 270 | 320 or higher + * 162 | 200 or higher" + */ + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio) + min_cdclk = max(crtc_state->port_clock, min_cdclk); + /* * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. -- cgit v1.2.3 From 6d61f716a01ec0e134de38ae97e71d6fec5a6ff6 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Wed, 17 Jul 2019 15:34:51 -0700 Subject: drm/i915/vbt: Fix VBT parsing for the PSR section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A single 32-bit PSR2 training pattern field follows the sixteen element array of PSR table entries in the VBT spec. But, we incorrectly define this PSR2 field for each of the PSR table entries. As a result, the PSR1 training pattern duration for any panel_type != 0 will be parsed incorrectly. Secondly, PSR2 training pattern durations for VBTs with bdb version >= 226 will also be wrong. Cc: Rodrigo Vivi Cc: José Roberto de Souza Cc: stable@vger.kernel.org Cc: stable@vger.kernel.org #v5.2 Fixes: 88a0d9606aff ("drm/i915/vbt: Parse and use the new field with PSR2 TP2/3 wakeup time") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111088 Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204183 Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Acked-by: Rodrigo Vivi Tested-by: François Guerraz Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190717223451.2595-1-dhinakaran.pandiyan@intel.com (cherry picked from commit b5ea9c9337007d6e700280c8a60b4e10d070fb53) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_bios.c | 2 +- drivers/gpu/drm/i915/display/intel_vbt_defs.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c4710889cb32..3ef4e9f573cf 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) } if (bdb->version >= 226) { - u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; + u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time; wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; switch (wakeup_time) { diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 2f4894e9a03d..5ddbe71ab423 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -478,13 +478,13 @@ struct psr_table { /* TP wake up time in multiple of 100 */ u16 tp1_wakeup_time; u16 tp2_tp3_wakeup_time; - - /* PSR2 TP2/TP3 wakeup time for 16 panels */ - u32 psr2_tp2_tp3_wakeup_time; } __packed; struct bdb_psr { struct psr_table psr_table[16]; + + /* PSR2 TP2/TP3 wakeup time for 16 panels */ + u32 psr2_tp2_tp3_wakeup_time; } __packed; /* -- cgit v1.2.3 From 0bbfdce345c8cf01a3a985fa99fefd2146dcc748 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 17 Jul 2019 19:06:19 +0100 Subject: drm/i915: Fix GEN8_MCR_SELECTOR programming fls returns bit positions starting from one for the lsb and the MCR register expects zero based (sub)slice addressing. Incorrent MCR programming can have the effect of directing MMIO reads of registers in the 0xb100-0xb3ff range to invalid subslice returning zeroes instead of actual content. Signed-off-by: Tvrtko Ursulin Fixes: 1e40d4aea57b ("drm/i915/cnl: Implement WaProgramMgsrForCorrectSliceSpecificMmioReads") Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190717180624.20354-2-tvrtko.ursulin@linux.intel.com (cherry picked from commit 15160879d47213c32f357bc67b6014d9aaf14ed7) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index aa5a1f11a91b..f25632c9b292 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type) u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + unsigned int slice = fls(sseu->slice_mask) - 1; + unsigned int subslice; u32 mcr_s_ss_select; - u32 slice = fls(sseu->slice_mask); - u32 subslice = fls(sseu->subslice_mask[slice]); + + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = fls(sseu->subslice_mask[slice]); + GEM_BUG_ON(!subslice); + subslice--; if (IS_GEN(dev_priv, 10)) mcr_s_ss_select = GEN8_MCR_SLICE(slice) | -- cgit v1.2.3 From 89f5752307cf53010d97503ac501b2ca1b089922 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 28 Jun 2019 17:36:18 +0300 Subject: drm/i915: Fix the TBT AUX power well enabling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the mapping from a TBT AUX power well index to the DP_AUX_CH_CTL register. Fixes: c7375d9542f1 ("drm/i915: Configure AUX_CH_CTL when enabling the AUX power domain") Cc: José Roberto de Souza Cc: Rodrigo Vivi Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190628143635.22066-7-imre.deak@intel.com (cherry picked from commit 29ae36abf08f943b76a2959f5000c44efa335be7) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display_power.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c93ad512014c..2d1939db108f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, #define ICL_AUX_PW_TO_CH(pw_idx) \ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) +#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ + ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); + int pw_idx = power_well->desc->hsw.idx; + bool is_tbt = power_well->desc->hsw.is_tc_tbt; + enum aux_ch aux_ch; u32 val; + aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : + ICL_AUX_PW_TO_CH(pw_idx); val = I915_READ(DP_AUX_CH_CTL(aux_ch)); val &= ~DP_AUX_CH_CTL_TBT_IO; - if (power_well->desc->hsw.is_tc_tbt) + if (is_tbt) val |= DP_AUX_CH_CTL_TBT_IO; I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); -- cgit v1.2.3 From 60a4233a4952729089e4df152e730f8f4d0e82ce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 29 Jul 2019 14:24:12 +0100 Subject: drm/i915: Flush the i915_vm_release before ggtt shutdown As the gen6_ppgtt may refer back to the GGTT for their page-directory slots, make sure those __i915_vm_release are completed prior to shutting down the GGTT. Fixes: b32fa8111563 ("drm/i915/gtt: Defer address space cleanup to an RCU worker") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190729132412.23380-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 4dd1fa956143..423c890d03bf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2721,6 +2721,9 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) ggtt->vm.closed = true; + rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ + flush_workqueue(i915->wq); + mutex_lock(&i915->drm.struct_mutex); list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) -- cgit v1.2.3 From 05aed9412b0bd0d9a985d94010c42ff0a5c6cc29 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:25 +0100 Subject: iommu/io-pgtable: Hook up ->tlb_flush_walk() and ->tlb_flush_leaf() in drivers Hook up ->tlb_flush_walk() and ->tlb_flush_leaf() in drivers using the io-pgtable API so that we can start making use of them in the page-table code. For now, they can just wrap the implementations of ->tlb_add_flush and ->tlb_sync pending future optimisation in each driver. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 14 ++++++++++++++ drivers/iommu/arm-smmu-v3.c | 22 ++++++++++++++++++++++ drivers/iommu/arm-smmu.c | 24 ++++++++++++++++++++++++ drivers/iommu/ipmmu-vmsa.c | 8 ++++++++ drivers/iommu/msm_iommu.c | 16 ++++++++++++++++ drivers/iommu/mtk_iommu.c | 16 ++++++++++++++++ drivers/iommu/qcom_iommu.c | 16 ++++++++++++++++ 7 files changed, 116 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 17bceb11e708..651858147bd6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -257,8 +257,22 @@ static void mmu_tlb_sync_context(void *cookie) // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X } +static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, + void *cookie) +{ + mmu_tlb_sync_context(cookie); +} + +static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule, + void *cookie) +{ + mmu_tlb_sync_context(cookie); +} + static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, + .tlb_flush_walk = mmu_tlb_flush_walk, + .tlb_flush_leaf = mmu_tlb_flush_leaf, .tlb_add_flush = mmu_tlb_inv_range_nosync, .tlb_sync = mmu_tlb_sync_context, }; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 80753b8ca054..79819b003b07 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1603,8 +1603,30 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } +static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + + arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie); + arm_smmu_cmdq_issue_sync(smmu); +} + +static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + struct arm_smmu_device *smmu = smmu_domain->smmu; + + arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie); + arm_smmu_cmdq_issue_sync(smmu); +} + static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync, }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e535ae2a9e65..e9f01b860ae3 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -547,20 +547,44 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID); } +static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + + smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie); + smmu_domain->tlb_ops->tlb_sync(cookie); +} + +static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + + smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie); + smmu_domain->tlb_ops->tlb_sync(cookie); +} + static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, }; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index a9332b893ce2..9cc7bcb7e39d 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -361,6 +361,12 @@ static void ipmmu_tlb_flush_all(void *cookie) ipmmu_tlb_invalidate(domain); } +static void ipmmu_tlb_flush(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + ipmmu_tlb_flush_all(cookie); +} + static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { @@ -369,6 +375,8 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, + .tlb_flush_walk = ipmmu_tlb_flush, + .tlb_flush_leaf = ipmmu_tlb_flush, .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, }; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 681ab3d3376d..64132093751a 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -178,8 +178,24 @@ static void __flush_iotlb_sync(void *cookie) */ } +static void __flush_iotlb_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, size, granule, false, cookie); + __flush_iotlb_sync(cookie); +} + +static void __flush_iotlb_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, size, granule, true, cookie); + __flush_iotlb_sync(cookie); +} + static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, + .tlb_flush_walk = __flush_iotlb_walk, + .tlb_flush_leaf = __flush_iotlb_leaf, .tlb_add_flush = __flush_iotlb_range, .tlb_sync = __flush_iotlb_sync, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index c870f1674903..85a7176bf9ae 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -188,8 +188,24 @@ static void mtk_iommu_tlb_sync(void *cookie) } } +static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie); + mtk_iommu_tlb_sync(cookie); +} + +static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie); + mtk_iommu_tlb_sync(cookie); +} + static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, + .tlb_flush_walk = mtk_iommu_tlb_flush_walk, + .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, .tlb_sync = mtk_iommu_tlb_sync, }; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index a7432991fa04..643079e52e69 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -164,8 +164,24 @@ static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, } } +static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie); + qcom_iommu_tlb_sync(cookie); +} + +static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie); + qcom_iommu_tlb_sync(cookie); +} + static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, + .tlb_flush_walk = qcom_iommu_tlb_flush_walk, + .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, .tlb_sync = qcom_iommu_tlb_sync, }; -- cgit v1.2.3 From abfd6fe0cd535d31ee83b668be6eb59ce6a8469d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:41 +0100 Subject: iommu/io-pgtable: Replace ->tlb_add_flush() with ->tlb_add_page() The ->tlb_add_flush() callback in the io-pgtable API now looks a bit silly: - It takes a size and a granule, which are always the same - It takes a 'bool leaf', which is always true - It only ever flushes a single page With that in mind, replace it with an optional ->tlb_add_page() callback that drops the useless parameters. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 5 -- drivers/iommu/arm-smmu-v3.c | 8 ++- drivers/iommu/arm-smmu.c | 88 +++++++++++++++++++++------------ drivers/iommu/io-pgtable-arm-v7s.c | 12 ++--- drivers/iommu/io-pgtable-arm.c | 11 ++--- drivers/iommu/ipmmu-vmsa.c | 7 --- drivers/iommu/msm_iommu.c | 7 ++- drivers/iommu/mtk_iommu.c | 8 ++- drivers/iommu/qcom_iommu.c | 8 ++- include/linux/io-pgtable.h | 22 ++++----- 10 files changed, 105 insertions(+), 71 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 651858147bd6..ff9af320cacc 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -247,10 +247,6 @@ static void mmu_tlb_inv_context_s1(void *cookie) mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM); } -static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) -{} - static void mmu_tlb_sync_context(void *cookie) { //struct panfrost_device *pfdev = cookie; @@ -273,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_flush_walk = mmu_tlb_flush_walk, .tlb_flush_leaf = mmu_tlb_flush_leaf, - .tlb_add_flush = mmu_tlb_inv_range_nosync, .tlb_sync = mmu_tlb_sync_context, }; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 79819b003b07..98c90a1b4b22 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1603,6 +1603,12 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, } while (size -= granule); } +static void arm_smmu_tlb_inv_page_nosync(unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); +} + static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { @@ -1627,7 +1633,7 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, + .tlb_add_page = arm_smmu_tlb_inv_page_nosync, .tlb_sync = arm_smmu_tlb_sync, }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index e9f01b860ae3..f056164a94b0 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -248,10 +248,16 @@ enum arm_smmu_domain_stage { ARM_SMMU_DOMAIN_BYPASS, }; +struct arm_smmu_flush_ops { + struct iommu_flush_ops tlb; + void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule, + bool leaf, void *cookie) +}; + struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - const struct iommu_flush_ops *tlb_ops; + const struct arm_smmu_flush_ops *flush_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; bool non_strict; @@ -551,42 +557,62 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; + const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; - smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, false, cookie); - smmu_domain->tlb_ops->tlb_sync(cookie); + ops->tlb_inv_range(iova, size, granule, false, cookie); + ops->tlb.tlb_sync(cookie); } static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, size_t granule, void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; + const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + + ops->tlb_inv_range(iova, size, granule, true, cookie); + ops->tlb.tlb_sync(cookie); +} + +static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule, + void *cookie) +{ + struct arm_smmu_domain *smmu_domain = cookie; + const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; - smmu_domain->tlb_ops->tlb_add_flush(iova, size, granule, true, cookie); - smmu_domain->tlb_ops->tlb_sync(cookie); + ops->tlb_inv_range(iova, granule, granule, true, cookie); } -static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync_context, +static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { + .tlb = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s1, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, + .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_sync = arm_smmu_tlb_sync_context, + }, + .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, }; -static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, - .tlb_sync = arm_smmu_tlb_sync_context, +static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { + .tlb = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, + .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_sync = arm_smmu_tlb_sync_context, + }, + .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, }; -static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { - .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_flush = arm_smmu_tlb_inv_vmid_nosync, - .tlb_sync = arm_smmu_tlb_sync_vmid, +static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { + .tlb = { + .tlb_flush_all = arm_smmu_tlb_inv_context_s2, + .tlb_flush_walk = arm_smmu_tlb_inv_walk, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, + .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_sync = arm_smmu_tlb_sync_vmid, + }, + .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -866,7 +892,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ias = min(ias, 32UL); oas = min(oas, 32UL); } - smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops; + smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; break; case ARM_SMMU_DOMAIN_NESTED: /* @@ -886,9 +912,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, oas = min(oas, 40UL); } if (smmu->version == ARM_SMMU_V2) - smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2; + smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; else - smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1; + smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; break; default: ret = -EINVAL; @@ -917,7 +943,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .ias = ias, .oas = oas, .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, - .tlb = smmu_domain->tlb_ops, + .tlb = &smmu_domain->flush_ops->tlb, .iommu_dev = smmu->dev, }; @@ -1346,9 +1372,9 @@ static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->tlb_ops) { + if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->tlb_ops->tlb_flush_all(smmu_domain); + smmu_domain->flush_ops->tlb.tlb_flush_all(smmu_domain); arm_smmu_rpm_put(smmu); } } @@ -1359,9 +1385,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->tlb_ops) { + if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->tlb_ops->tlb_sync(smmu_domain); + smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain); arm_smmu_rpm_put(smmu); } } diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8d4914fe73bc..b3f975c95f76 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -584,7 +584,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, return __arm_v7s_unmap(data, iova, size, 2, tablep); } - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + io_pgtable_tlb_add_page(&data->iop, iova, size); return size; } @@ -647,8 +647,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_flush(iop, iova, blk_size, - blk_size, true); + io_pgtable_tlb_add_page(iop, iova, blk_size); } iova += blk_size; } @@ -809,10 +808,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) { - dummy_tlb_flush(iova, size, granule, cookie); + dummy_tlb_flush(iova, granule, granule, cookie); } static void dummy_tlb_sync(void *cookie) @@ -824,7 +822,7 @@ static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, - .tlb_add_flush = dummy_tlb_add_flush, + .tlb_add_page = dummy_tlb_add_page, .tlb_sync = dummy_tlb_sync, }; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index b58338c86323..a5c0db01533e 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -582,7 +582,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, tablep = iopte_deref(pte, data); } else if (unmap_idx >= 0) { - io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); + io_pgtable_tlb_add_page(&data->iop, iova, size); return size; } @@ -623,7 +623,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, */ smp_wmb(); } else { - io_pgtable_tlb_add_flush(iop, iova, size, size, true); + io_pgtable_tlb_add_page(iop, iova, size); } return size; @@ -1075,10 +1075,9 @@ static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); } -static void dummy_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) { - dummy_tlb_flush(iova, size, granule, cookie); + dummy_tlb_flush(iova, granule, granule, cookie); } static void dummy_tlb_sync(void *cookie) @@ -1090,7 +1089,7 @@ static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, - .tlb_add_flush = dummy_tlb_add_flush, + .tlb_add_page = dummy_tlb_add_page, .tlb_sync = dummy_tlb_sync, }; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 9cc7bcb7e39d..c4da271af90e 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -367,17 +367,10 @@ static void ipmmu_tlb_flush(unsigned long iova, size_t size, ipmmu_tlb_flush_all(cookie); } -static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) -{ - /* The hardware doesn't support selective TLB flush. */ -} - static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_flush_walk = ipmmu_tlb_flush, .tlb_flush_leaf = ipmmu_tlb_flush, - .tlb_add_flush = ipmmu_tlb_add_flush, .tlb_sync = ipmmu_tlb_flush_all, }; diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 64132093751a..2cd83295a841 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -192,11 +192,16 @@ static void __flush_iotlb_leaf(unsigned long iova, size_t size, __flush_iotlb_sync(cookie); } +static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie) +{ + __flush_iotlb_range(iova, granule, granule, true, cookie); +} + static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, .tlb_flush_walk = __flush_iotlb_walk, .tlb_flush_leaf = __flush_iotlb_leaf, - .tlb_add_flush = __flush_iotlb_range, + .tlb_add_page = __flush_iotlb_page, .tlb_sync = __flush_iotlb_sync, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 85a7176bf9ae..a0b4b4dc4b90 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -202,11 +202,17 @@ static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, mtk_iommu_tlb_sync(cookie); } +static void mtk_iommu_tlb_flush_page_nosync(unsigned long iova, size_t granule, + void *cookie) +{ + mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); +} + static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_flush_walk = mtk_iommu_tlb_flush_walk, .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, - .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync, + .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, .tlb_sync = mtk_iommu_tlb_sync, }; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 643079e52e69..7d8411dee4cf 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -178,11 +178,17 @@ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, qcom_iommu_tlb_sync(cookie); } +static void qcom_iommu_tlb_add_page(unsigned long iova, size_t granule, + void *cookie) +{ + qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie); +} + static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, .tlb_flush_walk = qcom_iommu_tlb_flush_walk, .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, - .tlb_add_flush = qcom_iommu_tlb_inv_range_nosync, + .tlb_add_page = qcom_iommu_tlb_add_page, .tlb_sync = qcom_iommu_tlb_sync, }; diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 0618aac59e74..99e04bd2baa1 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -25,12 +25,11 @@ enum io_pgtable_fmt { * address range. * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual * address range. - * @tlb_add_flush: Optional callback to queue up leaf TLB invalidation for a - * virtual address range. This function exists purely as an - * optimisation for IOMMUs that cannot batch TLB invalidation - * operations efficiently and are therefore better suited to - * issuing them early rather than deferring them until - * iommu_tlb_sync(). + * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a + * single page. This function exists purely as an optimisation + * for IOMMUs that cannot batch TLB invalidation operations + * efficiently and are therefore better suited to issuing them + * early rather than deferring them until iommu_tlb_sync(). * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and * any corresponding page table updates are visible to the * IOMMU. @@ -44,8 +43,7 @@ struct iommu_flush_ops { void *cookie); void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, void *cookie); - void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie); + void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie); void (*tlb_sync)(void *cookie); }; @@ -212,10 +210,12 @@ io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); } -static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, - unsigned long iova, size_t size, size_t granule, bool leaf) +static inline void +io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova, + size_t granule) { - iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); + if (iop->cfg.tlb->tlb_add_page) + iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie); } static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) -- cgit v1.2.3 From e953f7f2fa78d1c7fd064171f88457c6b1e21af9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:50 +0100 Subject: iommu/io-pgtable: Remove unused ->tlb_sync() callback The ->tlb_sync() callback is no longer used, so it can be removed. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 1 - drivers/iommu/arm-smmu-v3.c | 8 -------- drivers/iommu/arm-smmu.c | 17 +++++++++-------- drivers/iommu/io-pgtable-arm-v7s.c | 6 ------ drivers/iommu/io-pgtable-arm.c | 6 ------ drivers/iommu/ipmmu-vmsa.c | 1 - drivers/iommu/msm_iommu.c | 20 +++++++------------- drivers/iommu/mtk_iommu.c | 1 - drivers/iommu/qcom_iommu.c | 1 - include/linux/io-pgtable.h | 9 --------- 10 files changed, 16 insertions(+), 54 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index ff9af320cacc..de22a2276e00 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -269,7 +269,6 @@ static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_flush_walk = mmu_tlb_flush_walk, .tlb_flush_leaf = mmu_tlb_flush_leaf, - .tlb_sync = mmu_tlb_sync_context, }; static const char *access_type_name(struct panfrost_device *pfdev, diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 98c90a1b4b22..231093413ff9 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1545,13 +1545,6 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, } /* IO_PGTABLE API */ -static void arm_smmu_tlb_sync(void *cookie) -{ - struct arm_smmu_domain *smmu_domain = cookie; - - arm_smmu_cmdq_issue_sync(smmu_domain->smmu); -} - static void arm_smmu_tlb_inv_context(void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; @@ -1634,7 +1627,6 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_inv_page_nosync, - .tlb_sync = arm_smmu_tlb_sync, }; /* IOMMU API */ diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index f056164a94b0..07a267c437d6 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -251,7 +251,8 @@ enum arm_smmu_domain_stage { struct arm_smmu_flush_ops { struct iommu_flush_ops tlb; void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule, - bool leaf, void *cookie) + bool leaf, void *cookie); + void (*tlb_sync)(void *cookie); }; struct arm_smmu_domain { @@ -539,7 +540,7 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears * almost negligible, but the benefit of getting the first one in as far ahead * of the sync as possible is significant, hence we don't just make this a - * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think. + * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think. */ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) @@ -560,7 +561,7 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; ops->tlb_inv_range(iova, size, granule, false, cookie); - ops->tlb.tlb_sync(cookie); + ops->tlb_sync(cookie); } static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, @@ -570,7 +571,7 @@ static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; ops->tlb_inv_range(iova, size, granule, true, cookie); - ops->tlb.tlb_sync(cookie); + ops->tlb_sync(cookie); } static void arm_smmu_tlb_add_page(unsigned long iova, size_t granule, @@ -588,9 +589,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_add_page, - .tlb_sync = arm_smmu_tlb_sync_context, }, .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, + .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { @@ -599,9 +600,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_add_page, - .tlb_sync = arm_smmu_tlb_sync_context, }, .tlb_inv_range = arm_smmu_tlb_inv_range_nosync, + .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { @@ -610,9 +611,9 @@ static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_add_page, - .tlb_sync = arm_smmu_tlb_sync_vmid, }, .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, + .tlb_sync = arm_smmu_tlb_sync_vmid, }; static irqreturn_t arm_smmu_context_fault(int irq, void *dev) @@ -1387,7 +1388,7 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, if (smmu_domain->flush_ops) { arm_smmu_rpm_get(smmu); - smmu_domain->flush_ops->tlb.tlb_sync(smmu_domain); + smmu_domain->flush_ops->tlb_sync(smmu_domain); arm_smmu_rpm_put(smmu); } } diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index b3f975c95f76..203894fb6765 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -813,17 +813,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) dummy_tlb_flush(iova, granule, granule, cookie); } -static void dummy_tlb_sync(void *cookie) -{ - WARN_ON(cookie != cfg_cookie); -} - static const struct iommu_flush_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_page = dummy_tlb_add_page, - .tlb_sync = dummy_tlb_sync, }; #define __FAIL(ops) ({ \ diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a5c0db01533e..f35516744965 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -1080,17 +1080,11 @@ static void dummy_tlb_add_page(unsigned long iova, size_t granule, void *cookie) dummy_tlb_flush(iova, granule, granule, cookie); } -static void dummy_tlb_sync(void *cookie) -{ - WARN_ON(cookie != cfg_cookie); -} - static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_page = dummy_tlb_add_page, - .tlb_sync = dummy_tlb_sync, }; static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index c4da271af90e..a2b8eff4c1f7 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -371,7 +371,6 @@ static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_flush_walk = ipmmu_tlb_flush, .tlb_flush_leaf = ipmmu_tlb_flush, - .tlb_sync = ipmmu_tlb_flush_all, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 2cd83295a841..ccfc7ed230ef 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -168,28 +168,16 @@ fail: return; } -static void __flush_iotlb_sync(void *cookie) -{ - /* - * Nothing is needed here, the barrier to guarantee - * completion of the tlb sync operation is implicitly - * taken care when the iommu client does a writel before - * kick starting the other master. - */ -} - static void __flush_iotlb_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { __flush_iotlb_range(iova, size, granule, false, cookie); - __flush_iotlb_sync(cookie); } static void __flush_iotlb_leaf(unsigned long iova, size_t size, size_t granule, void *cookie) { __flush_iotlb_range(iova, size, granule, true, cookie); - __flush_iotlb_sync(cookie); } static void __flush_iotlb_page(unsigned long iova, size_t granule, void *cookie) @@ -202,7 +190,6 @@ static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_walk = __flush_iotlb_walk, .tlb_flush_leaf = __flush_iotlb_leaf, .tlb_add_page = __flush_iotlb_page, - .tlb_sync = __flush_iotlb_sync, }; static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) @@ -712,6 +699,13 @@ static struct iommu_ops msm_iommu_ops = { .detach_dev = msm_iommu_detach_dev, .map = msm_iommu_map, .unmap = msm_iommu_unmap, + /* + * Nothing is needed here, the barrier to guarantee + * completion of the tlb sync operation is implicitly + * taken care when the iommu client does a writel before + * kick starting the other master. + */ + .iotlb_sync = NULL, .iova_to_phys = msm_iommu_iova_to_phys, .add_device = msm_iommu_add_device, .remove_device = msm_iommu_remove_device, diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index a0b4b4dc4b90..3785750bdb44 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -213,7 +213,6 @@ static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_walk = mtk_iommu_tlb_flush_walk, .tlb_flush_leaf = mtk_iommu_tlb_flush_leaf, .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, - .tlb_sync = mtk_iommu_tlb_sync, }; static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 7d8411dee4cf..0b8a6d6bb475 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -189,7 +189,6 @@ static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_walk = qcom_iommu_tlb_flush_walk, .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, .tlb_add_page = qcom_iommu_tlb_add_page, - .tlb_sync = qcom_iommu_tlb_sync, }; static irqreturn_t qcom_iommu_fault(int irq, void *dev) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 99e04bd2baa1..843310484fe2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -30,9 +30,6 @@ enum io_pgtable_fmt { * for IOMMUs that cannot batch TLB invalidation operations * efficiently and are therefore better suited to issuing them * early rather than deferring them until iommu_tlb_sync(). - * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and - * any corresponding page table updates are visible to the - * IOMMU. * * Note that these can all be called in atomic context and must therefore * not block. @@ -44,7 +41,6 @@ struct iommu_flush_ops { void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, void *cookie); void (*tlb_add_page)(unsigned long iova, size_t granule, void *cookie); - void (*tlb_sync)(void *cookie); }; /** @@ -218,11 +214,6 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop, unsigned long iova, iop->cfg.tlb->tlb_add_page(iova, granule, iop->cookie); } -static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) -{ - iop->cfg.tlb->tlb_sync(iop->cookie); -} - /** * struct io_pgtable_init_fns - Alloc/free a set of page tables for a * particular format. -- cgit v1.2.3 From a2d3a382d6c682e22b263c9e7f0d857c3fa6c9d6 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 2 Jul 2019 16:44:58 +0100 Subject: iommu/io-pgtable: Pass struct iommu_iotlb_gather to ->unmap() Update the io-pgtable ->unmap() function to take an iommu_iotlb_gather pointer as an argument, and update the callers as appropriate. Signed-off-by: Will Deacon --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/io-pgtable-arm-v7s.c | 6 +++--- drivers/iommu/io-pgtable-arm.c | 7 +++---- drivers/iommu/ipmmu-vmsa.c | 2 +- drivers/iommu/msm_iommu.c | 2 +- drivers/iommu/mtk_iommu.c | 2 +- drivers/iommu/qcom_iommu.c | 2 +- include/linux/io-pgtable.h | 4 +++- 10 files changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index de22a2276e00..6e8145c36e93 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -222,7 +222,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) size_t unmapped_page; size_t pgsize = get_pgsize(iova, len - unmapped_len); - unmapped_page = ops->unmap(ops, iova, pgsize); + unmapped_page = ops->unmap(ops, iova, pgsize, NULL); if (!unmapped_page) break; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 231093413ff9..8e2e53079f48 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -2015,7 +2015,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, if (!ops) return 0; - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); if (ret && arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size)) return 0; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 07a267c437d6..f6689956ab6e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1362,7 +1362,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, return 0; arm_smmu_rpm_get(smmu); - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); arm_smmu_rpm_put(smmu); return ret; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 203894fb6765..a7776e982b6c 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -666,7 +666,7 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, } static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); @@ -892,7 +892,7 @@ static int __init arm_v7s_do_selftests(void) size = 1UL << __ffs(cfg.pgsize_bitmap); while (i < loopnr) { iova_start = i * SZ_16M; - if (ops->unmap(ops, iova_start + size, size) != size) + if (ops->unmap(ops, iova_start + size, size, NULL) != size) return __FAIL(ops); /* Remap of partial unmap */ @@ -910,7 +910,7 @@ static int __init arm_v7s_do_selftests(void) for_each_set_bit(i, &cfg.pgsize_bitmap, BITS_PER_LONG) { size = 1UL << i; - if (ops->unmap(ops, iova, size) != size) + if (ops->unmap(ops, iova, size, NULL) != size) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42)) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index f35516744965..325430f8a0a1 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -642,7 +641,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, } static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, - size_t size) + size_t size, struct iommu_iotlb_gather *gather) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); arm_lpae_iopte *ptep = data->pgd; @@ -1167,7 +1166,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) /* Partial unmap */ size = 1UL << __ffs(cfg->pgsize_bitmap); - if (ops->unmap(ops, SZ_1G + size, size) != size) + if (ops->unmap(ops, SZ_1G + size, size, NULL) != size) return __FAIL(ops, i); /* Remap of partial unmap */ @@ -1182,7 +1181,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) { size = 1UL << j; - if (ops->unmap(ops, iova, size) != size) + if (ops->unmap(ops, iova, size, NULL) != size) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42)) diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index a2b8eff4c1f7..76a8ec343d53 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -737,7 +737,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, { struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); - return domain->iop->unmap(domain->iop, iova, size); + return domain->iop->unmap(domain->iop, iova, size, gather); } static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain) diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index ccfc7ed230ef..8a0dcaf0a9e9 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -523,7 +523,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, unsigned long flags; spin_lock_irqsave(&priv->pgtlock, flags); - len = priv->iop->unmap(priv->iop, iova, len); + len = priv->iop->unmap(priv->iop, iova, len, gather); spin_unlock_irqrestore(&priv->pgtlock, flags); return len; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 3785750bdb44..b73cffd63262 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -400,7 +400,7 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, size_t unmapsz; spin_lock_irqsave(&dom->pgtlock, flags); - unmapsz = dom->iop->unmap(dom->iop, iova, size); + unmapsz = dom->iop->unmap(dom->iop, iova, size, gather); spin_unlock_irqrestore(&dom->pgtlock, flags); return unmapsz; diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 0b8a6d6bb475..48b288ef74b4 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -455,7 +455,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, */ pm_runtime_get_sync(qcom_domain->iommu->dev); spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); + ret = ops->unmap(ops, iova, size, gather); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); pm_runtime_put_sync(qcom_domain->iommu->dev); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 843310484fe2..fe27d93c8ad9 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -1,7 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IO_PGTABLE_H #define __IO_PGTABLE_H + #include +#include /* * Public API for use by IOMMU drivers @@ -136,7 +138,7 @@ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, - size_t size); + size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); }; -- cgit v1.2.3 From 301efe96f777c0197502b9399694ae83779579b3 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 29 Jul 2019 11:26:12 +0000 Subject: drm/i915/uc: Don't fail on HuC firmware failure HuC is usually not a critical component, so we can safely ignore firmware load or authentication failures unless HuC was explicitly requested by the user. v2: add convenient way to disable loading (Chris) Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Joonas Lahtinen Reviewed-by: Chris Wilson #v1 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190729112612.37476-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 8 ++++---- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 5 +++-- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 6 ++++++ 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index fafa9be1e12a..6eb8bb3fa252 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -422,7 +422,7 @@ int intel_uc_init_hw(struct intel_uc *uc) if (intel_uc_is_using_huc(uc)) { ret = intel_huc_fw_upload(huc); - if (ret) + if (ret && intel_uc_fw_is_overridden(&huc->fw)) goto err_out; } @@ -444,9 +444,9 @@ int intel_uc_init_hw(struct intel_uc *uc) if (ret) goto err_log_capture; - if (intel_uc_is_using_huc(uc)) { + if (intel_uc_fw_is_loaded(&huc->fw)) { ret = intel_huc_auth(huc); - if (ret) + if (ret && intel_uc_fw_is_overridden(&huc->fw)) goto err_communication; } @@ -465,7 +465,7 @@ int intel_uc_init_hw(struct intel_uc *uc) dev_info(i915->drm.dev, "GuC submission %s\n", enableddisabled(intel_uc_is_using_guc_submission(uc))); dev_info(i915->drm.dev, "HuC %s\n", - enableddisabled(intel_uc_is_using_huc(uc))); + enableddisabled(intel_huc_is_authenticated(huc))); return 0; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 0f9badf44837..ac91e3efd02b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -146,7 +146,8 @@ __uc_fw_override(struct intel_uc_fw *uc_fw) break; } - return uc_fw->path; + uc_fw->user_overridden = uc_fw->path; + return uc_fw->user_overridden; } /** @@ -176,7 +177,7 @@ void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw, __uc_fw_auto_select(uc_fw, INTEL_INFO(i915)->platform, INTEL_REVID(i915)); - if (uc_fw->path) + if (uc_fw->path && *uc_fw->path) uc_fw->status = INTEL_UC_FIRMWARE_SELECTED; else uc_fw->status = INTEL_UC_FIRMWARE_NOT_SUPPORTED; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index c2ab2803715d..6b64b8073703 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -61,6 +61,7 @@ struct intel_uc_fw { enum intel_uc_fw_type type; enum intel_uc_fw_status status; const char *path; + bool user_overridden; size_t size; struct drm_i915_gem_object *obj; @@ -141,6 +142,11 @@ static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw) return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED; } +static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw) +{ + return uc_fw->user_overridden; +} + static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) { if (intel_uc_fw_is_loaded(uc_fw)) -- cgit v1.2.3 From c00f9c6b79f7e1c5caf774c38e9fd5dad2d2ef1c Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Wed, 12 Jun 2019 11:17:46 +0800 Subject: drm/i915/gvt: remove duplicate include of trace.h This removes duplicate include of trace.h. Found by Hariprasad Kelam with includecheck. Reported-by: Hariprasad Kelam Reviewed-by: Yan Zhao Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/trace_points.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c index a3deed692b9c..fe552e877e09 100644 --- a/drivers/gpu/drm/i915/gvt/trace_points.c +++ b/drivers/gpu/drm/i915/gvt/trace_points.c @@ -28,8 +28,6 @@ * */ -#include "trace.h" - #ifndef __CHECKER__ #define CREATE_TRACE_POINTS #include "trace.h" -- cgit v1.2.3 From d18fd0576e05a4b03b588e131093b0437fccb75f Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Mon, 27 May 2019 13:45:51 +0800 Subject: drm/i915/gvt: Warning for invalid ggtt access Instead of silently return virtual ggtt entries that guest is allowed to access, this patch add extra range check. If guest read out of range, it will print a warning and return 0. If guest write out of range, the write will be dropped without any message. Reviewed-by: Zhenyu Wang Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 53115bdae12b..4b04af569c05 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; unsigned long index = off >> info->gtt_entry_size_shift; + unsigned long gma; struct intel_gvt_gtt_entry e; if (bytes != 4 && bytes != 8) return -EINVAL; + gma = index << I915_GTT_PAGE_SHIFT; + if (!intel_gvt_ggtt_validate_range(vgpu, + gma, 1 << I915_GTT_PAGE_SHIFT)) { + gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); + memset(p_data, 0, bytes); + return 0; + } + ggtt_get_guest_entry(ggtt_mm, &e, index); memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), bytes); -- cgit v1.2.3 From c25144098bee19b089c8a37c54517bf467f06403 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Mon, 27 May 2019 13:45:52 +0800 Subject: drm/i915/gvt: Don't use ggtt_validdate_range() with size=0 Use vgpu_gmadr_is_valid() directly instead. Reviewed-by: Zhenyu Wang Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/fb_decoder.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 65e847392aea..8bb292b01271 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); @@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) + if (!vgpu_gmadr_is_valid(vgpu, plane->base)) return -EINVAL; plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); -- cgit v1.2.3 From 2089a76ade9005a06c5e08e8454f45f3625fdc1c Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Mon, 27 May 2019 13:45:53 +0800 Subject: drm/i915/gvt: Checking workload's gma earlier Workload contains RB and WA_CTX which are in ggtt space, if they aren't in valid ggtt space, the workload shouldn't be shadowed and scanned. So checking them earlier to avoid shadow them. Reviewed-by: Zhenyu Wang Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 10 ---------- drivers/gpu/drm/i915/gvt/scheduler.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 6ea88270c818..b09dc315e2da 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload) gma_head == gma_tail) return 0; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; @@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.workload = workload; s.is_ctx_wa = true; - if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { - ret = -EINVAL; - goto out; - } - ret = ip_gma_set(&s, gma_head); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 2144fb46d0e1..6469366c1753 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1492,6 +1492,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); + if (!intel_gvt_ggtt_validate_range(vgpu, start, + _RING_CTL_BUF_SIZE(ctl))) { + gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start); + return ERR_PTR(-EINVAL); + } + workload = alloc_workload(vgpu); if (IS_ERR(workload)) return workload; @@ -1516,9 +1522,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->wa_ctx.indirect_ctx.size = (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * CACHELINE_BYTES; + + if (workload->wa_ctx.indirect_ctx.size != 0) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.indirect_ctx.guest_gma, + workload->wa_ctx.indirect_ctx.size)) { + kmem_cache_free(s->workloads, workload); + gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n", + workload->wa_ctx.indirect_ctx.guest_gma); + return ERR_PTR(-EINVAL); + } + } + workload->wa_ctx.per_ctx.guest_gma = per_ctx & PER_CTX_ADDR_MASK; workload->wa_ctx.per_ctx.valid = per_ctx & 1; + if (workload->wa_ctx.per_ctx.valid) { + if (!intel_gvt_ggtt_validate_range(vgpu, + workload->wa_ctx.per_ctx.guest_gma, + CACHELINE_BYTES)) { + kmem_cache_free(s->workloads, workload); + gvt_vgpu_err("invalid per_ctx at: 0x%lx\n", + workload->wa_ctx.per_ctx.guest_gma); + return ERR_PTR(-EINVAL); + } + } } gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", -- cgit v1.2.3 From 7366aeb77cd840f3edea02c65065d40affaa7f45 Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Thu, 18 Jul 2019 01:10:24 +0800 Subject: drm/i915/gvt: fix incorrect cache entry for guest page mapping GPU hang observed during the guest OCL conformance test which is caused by THP GTT feature used durning the test. It was observed the same GFN with different size (4K and 2M) requested from the guest in GVT. So during the guest page dma map stage, it is required to unmap first with orginal size and then remap again with requested size. Fixes: b901b252b6cf ("drm/i915/gvt: Add 2M huge gtt support") Cc: stable@vger.kernel.org Reviewed-by: Zhenyu Wang Signed-off-by: Xiaolin Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 144301b778df..23aa3e50cbf8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1904,6 +1904,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, entry = __gvt_cache_find_gfn(info->vgpu, gfn); if (!entry) { + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); + if (ret) + goto err_unlock; + + ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size); + if (ret) + goto err_unmap; + } else if (entry->size != size) { + /* the same gfn with different size: unmap and re-map */ + gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size); + __gvt_cache_remove_entry(vgpu, entry); + ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size); if (ret) goto err_unlock; -- cgit v1.2.3 From ef5b0b444e6297d03ac0bdc0c82f65396ef4dccd Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Thu, 20 Jun 2019 10:29:24 -0400 Subject: drm/i915/gvt: grab runtime pm first for forcewake use in workload_thread, it should grab runtime pm wakelock and later uncore forcewake get will check rpm wakelock held successfully. otherwise, sometimes, rpm wakelock not hold and print call trace below: Call Trace: intel_uncore_forcewake_get+0x15/0x20 [i915] workload_thread+0x5f9/0x16f0 [i915] ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 ? __switch_to_asm+0x34/0x70 ? __switch_to+0x85/0x3f0 ? __switch_to_asm+0x40/0x70 ? do_wait_intr_irq+0x90/0x90 kthread+0x121/0x140 ? intel_vgpu_clean_workloads+0x100/0x100 [i915] ? kthread_park+0x90/0x90 ret_from_fork+0x35/0x40 --[ end trace 86525f742a02e12c ]-- v2: adapted to use rpm structure. Fixes: 251d46b0875c ("drm/i915/gvt: Pin the per-engine GVT shadow contexts") Reviewed-by: Zhenyu Wang Signed-off-by: Xiaolin Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 6469366c1753..196b4155a309 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -990,6 +990,7 @@ static int workload_thread(void *priv) int ret; bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9); DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm; kfree(p); @@ -1013,6 +1014,8 @@ static int workload_thread(void *priv) workload->ring_id, workload, workload->vgpu->id); + intel_runtime_pm_get(rpm); + gvt_dbg_sched("ring id %d will dispatch workload %p\n", workload->ring_id, workload); @@ -1042,6 +1045,7 @@ complete: intel_uncore_forcewake_put(&gvt->dev_priv->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put_unchecked(rpm); if (ret && (vgpu_is_vm_unhealthy(ret))) enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); } -- cgit v1.2.3 From 4187414808095f645ca0661f8dde77617e2e7cb3 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Thu, 4 Jul 2019 16:45:06 +0800 Subject: drm/i915/gvt: Adding ppgtt to GVT GEM context after shadow pdps settled. Windows guest can't run after force-TDR with host log: ... gvt: vgpu 1: workload shadow ppgtt isn't ready gvt: vgpu 1: fail to dispatch workload, skip ... The error is raised by set_context_ppgtt_from_shadow(), when it checks and found the shadow_mm isn't marked as shadowed. In work thread before each submission, a shadow_mm is set to shadowed in: shadow_ppgtt_mm() <-intel_vgpu_pin_mm() <-prepare_workload() <-dispatch_workload() <-workload_thread() However checking whether or not shadow_mm is shadowed is prior to it: set_context_ppgtt_from_shadow() <-dispatch_workload() <-workload_thread() In normal case, create workload will check the existence of shadow_mm, if not it will create a new one and marked as shadowed. If already exist it will reuse the old one. Since shadow_mm is reused, checking of shadowed in set_context_ppgtt_from_shadow() actually always see the state set in creation, but not the state set in intel_vgpu_pin_mm(). When force-TDR, all engines are reset, since it's not dmlr level, all ppgtt_mm are invalidated but not destroyed. Invalidation will mark all reused shadow_mm as not shadowed but still keeps in ppgtt_mm_list_head. If workload submission phase those shadow_mm are reused with shadowed not set, then set_context_ppgtt_from_shadow() will report error. Pin for context after shadow_mm pinned and shadow pdps settled. v2: Move set_context_ppgtt_from_shadow() after prepare_workload(). (zhenyu) v3: Move set_context_ppgtt_from_shadow() after shadow pdps updated.(zhenyu) Fixes: 4f15665ccbba ("drm/i915: Add ppgtt to GVT GEM context") Cc: stable@vger.kernel.org Signed-off-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 196b4155a309..9f3fd7d96a69 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.shadow_va = NULL; } -static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, - struct i915_gem_context *ctx) +static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, + struct i915_gem_context *ctx) { struct intel_vgpu_mm *mm = workload->shadow_mm; struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); int i = 0; - if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) - return -EINVAL; - if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0]; } else { @@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } - - return 0; } static int @@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static int prepare_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; + int ring = workload->ring_id; int ret = 0; ret = intel_vgpu_pin_mm(workload->shadow_mm); @@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload) return ret; } + if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT || + !workload->shadow_mm->ppgtt_mm.shadowed) { + gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); + return -EINVAL; + } + update_shadow_pdps(workload); + set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); + ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { gvt_vgpu_err("fail to vgpu sync oos pages\n"); @@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_vgpu_submission *s = &vgpu->submission; struct i915_request *rq; int ring_id = workload->ring_id; int ret; @@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) mutex_lock(&vgpu->vgpu_lock); mutex_lock(&dev_priv->drm.struct_mutex); - ret = set_context_ppgtt_from_shadow(workload, - s->shadow[ring_id]->gem_context); - if (ret < 0) { - gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); - goto err_req; - } - ret = intel_gvt_workload_req_alloc(workload); if (ret) goto err_req; -- cgit v1.2.3 From 1032a2af93f58ff12996f6122a8a488979ea22a8 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 29 Jul 2019 17:08:47 +0300 Subject: drm/i915: use upstream version of header tests Throw out our local hacks of header tests now that the more generic kbuild versions are upstream. At least for now, continue to keep the header tests behind CONFIG_DRM_I915_WERROR=y knob. Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190729140847.18557-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/Kconfig.debug | 1 + drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/Makefile.header-test | 27 ----------------------- drivers/gpu/drm/i915/display/Makefile | 3 ++- drivers/gpu/drm/i915/display/Makefile.header-test | 16 -------------- drivers/gpu/drm/i915/gem/Makefile | 2 +- drivers/gpu/drm/i915/gem/Makefile.header-test | 16 -------------- drivers/gpu/drm/i915/gt/Makefile | 2 +- drivers/gpu/drm/i915/gt/Makefile.header-test | 16 -------------- drivers/gpu/drm/i915/gt/uc/Makefile | 2 +- drivers/gpu/drm/i915/gt/uc/Makefile.header-test | 16 -------------- 11 files changed, 7 insertions(+), 96 deletions(-) delete mode 100644 drivers/gpu/drm/i915/Makefile.header-test delete mode 100644 drivers/gpu/drm/i915/display/Makefile.header-test delete mode 100644 drivers/gpu/drm/i915/gem/Makefile.header-test delete mode 100644 drivers/gpu/drm/i915/gt/Makefile.header-test delete mode 100644 drivers/gpu/drm/i915/gt/uc/Makefile.header-test (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 4cdc0181a093..87a38c6aaa41 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -7,6 +7,7 @@ config DRM_I915_WERROR # We use the dependency on !COMPILE_TEST to not be enabled in # allmodconfig or allyesconfig configurations depends on !COMPILE_TEST + select HEADER_TEST default n help Add -Werror to the build flags for (and only for) i915.ko. diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 524516251a40..654bdcbd23c7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -33,7 +33,7 @@ subdir-ccflags-y += \ $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA) # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h subdir-ccflags-y += -I$(srctree)/$(src) diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test deleted file mode 100644 index 59908b067942..000000000000 --- a/drivers/gpu/drm/i915/Makefile.header-test +++ /dev/null @@ -1,27 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header-test-$(CONFIG_DRM_I915_WERROR) := \ - i915_active_types.h \ - i915_debugfs.h \ - i915_drv.h \ - i915_fixed.h \ - i915_gem_gtt.h \ - i915_globals.h \ - i915_irq.h \ - i915_params.h \ - i915_priolist_types.h \ - i915_pvinfo.h \ - i915_reg.h \ - i915_scheduler_types.h \ - i915_utils.h \ - i915_vgpu.h \ - intel_csr.h \ - intel_drv.h \ - intel_gvt.h \ - intel_pm.h \ - intel_runtime_pm.h \ - intel_sideband.h \ - intel_uncore.h \ - intel_wakeref.h diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile index eec6961015a1..173c305d7866 100644 --- a/drivers/gpu/drm/i915/display/Makefile +++ b/drivers/gpu/drm/i915/display/Makefile @@ -2,4 +2,5 @@ subdir-ccflags-y += -I$(srctree)/$(src)/.. # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h +header-test- := intel_vbt_defs.h diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test deleted file mode 100644 index fc7d4e5bd2c6..000000000000 --- a/drivers/gpu/drm/i915/display/Makefile.header-test +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h))) - -quiet_cmd_header_test = HDRTEST $@ - cmd_header_test = echo "\#include \"$( $@ - -header_test_%.c: %.h - $(call cmd,header_test) - -extra-$(CONFIG_DRM_I915_WERROR) += \ - $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) - -clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile index eec6961015a1..7e73aa587967 100644 --- a/drivers/gpu/drm/i915/gem/Makefile +++ b/drivers/gpu/drm/i915/gem/Makefile @@ -2,4 +2,4 @@ subdir-ccflags-y += -I$(srctree)/$(src)/.. # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test deleted file mode 100644 index 61e06cbb4b32..000000000000 --- a/drivers/gpu/drm/i915/gem/Makefile.header-test +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header_test := $(notdir $(wildcard $(src)/*.h)) - -quiet_cmd_header_test = HDRTEST $@ - cmd_header_test = echo "\#include \"$( $@ - -header_test_%.c: %.h - $(call cmd,header_test) - -extra-$(CONFIG_DRM_I915_WERROR) += \ - $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) - -clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile index eec6961015a1..7e73aa587967 100644 --- a/drivers/gpu/drm/i915/gt/Makefile +++ b/drivers/gpu/drm/i915/gt/Makefile @@ -2,4 +2,4 @@ subdir-ccflags-y += -I$(srctree)/$(src)/.. # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/Makefile.header-test b/drivers/gpu/drm/i915/gt/Makefile.header-test deleted file mode 100644 index 61e06cbb4b32..000000000000 --- a/drivers/gpu/drm/i915/gt/Makefile.header-test +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header_test := $(notdir $(wildcard $(src)/*.h)) - -quiet_cmd_header_test = HDRTEST $@ - cmd_header_test = echo "\#include \"$( $@ - -header_test_%.c: %.h - $(call cmd,header_test) - -extra-$(CONFIG_DRM_I915_WERROR) += \ - $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) - -clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile index db9718aa3ee9..bec94d434cb6 100644 --- a/drivers/gpu/drm/i915/gt/uc/Makefile +++ b/drivers/gpu/drm/i915/gt/uc/Makefile @@ -2,4 +2,4 @@ subdir-ccflags-y += -I$(srctree)/$(src)/../.. # Extra header tests -include $(src)/Makefile.header-test +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile.header-test b/drivers/gpu/drm/i915/gt/uc/Makefile.header-test deleted file mode 100644 index 61e06cbb4b32..000000000000 --- a/drivers/gpu/drm/i915/gt/uc/Makefile.header-test +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: MIT -# Copyright © 2019 Intel Corporation - -# Test the headers are compilable as standalone units -header_test := $(notdir $(wildcard $(src)/*.h)) - -quiet_cmd_header_test = HDRTEST $@ - cmd_header_test = echo "\#include \"$( $@ - -header_test_%.c: %.h - $(call cmd,header_test) - -extra-$(CONFIG_DRM_I915_WERROR) += \ - $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h))) - -clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h))) -- cgit v1.2.3 From a56277216637c359e9125b9c16be09f21ed2a395 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 29 Jul 2019 12:37:20 +0100 Subject: drm/i915: Inline engine->init_context into its caller We only use the init_context vfunc once while recording the default context state, and we use the same sequence in each backend (eliding steps that do not apply). Remove the vfunc for simplicity and de-duplication. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190729113720.24830-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 - drivers/gpu/drm/i915/gt/intel_lrc.c | 21 --------------------- drivers/gpu/drm/i915/gt/intel_mocs.c | 5 ++++- drivers/gpu/drm/i915/gt/intel_mocs.h | 3 ++- drivers/gpu/drm/i915/gt/intel_renderstate.c | 2 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 18 ------------------ drivers/gpu/drm/i915/i915_gem.c | 21 ++++++++++++++++++--- 7 files changed, 25 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 8be63019d707..da61dd329210 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -383,7 +383,6 @@ struct intel_engine_cs { const struct intel_context_ops *cops; int (*request_alloc)(struct i915_request *rq); - int (*init_context)(struct i915_request *rq); int (*emit_flush)(struct i915_request *request, u32 mode); #define EMIT_INVALIDATE BIT(0) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 884dfc1cb033..4d7c4d0dbf75 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -141,7 +141,6 @@ #include "intel_gt.h" #include "intel_lrc_reg.h" #include "intel_mocs.h" -#include "intel_renderstate.h" #include "intel_reset.h" #include "intel_workarounds.h" @@ -2727,25 +2726,6 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) return gen8_emit_wa_tail(request, cs); } -static int gen8_init_rcs_context(struct i915_request *rq) -{ - int ret; - - ret = intel_engine_emit_ctx_wa(rq); - if (ret) - return ret; - - ret = intel_rcs_context_init_mocs(rq); - /* - * Failing to program the MOCS is non-fatal.The system will not - * run at peak performance. So generate an error and carry on. - */ - if (ret) - DRM_ERROR("MOCS failed to program: expect performance issues.\n"); - - return intel_renderstate_emit(rq); -} - static void execlists_park(struct intel_engine_cs *engine) { del_timer_sync(&engine->execlists.timer); @@ -2853,7 +2833,6 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) logical_ring_default_irqs(engine); if (engine->class == RENDER_CLASS) { - engine->init_context = gen8_init_rcs_context; engine->emit_flush = gen8_emit_flush_render; engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; } diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 290a5e9b90b9..e082b25d2db1 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -568,11 +568,14 @@ void intel_mocs_init_l3cc_table(struct intel_gt *gt) * * Return: 0 on success, otherwise the error status. */ -int intel_rcs_context_init_mocs(struct i915_request *rq) +int intel_mocs_emit(struct i915_request *rq) { struct drm_i915_mocs_table t; int ret; + if (rq->engine->class != RENDER_CLASS) + return 0; + if (get_mocs_settings(rq->engine->gt, &t)) { /* Program the RCS control registers */ ret = emit_mocs_control_table(rq, &t); diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index 8b9813e6f9ac..a334db2d6d6b 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -54,8 +54,9 @@ struct i915_request; struct intel_engine_cs; struct intel_gt; -int intel_rcs_context_init_mocs(struct i915_request *rq); void intel_mocs_init_l3cc_table(struct intel_gt *gt); void intel_mocs_init_engine(struct intel_engine_cs *engine); +int intel_mocs_emit(struct i915_request *rq); + #endif diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 06a8dc40b19f..be37d4501c67 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -41,7 +41,7 @@ struct intel_renderstate { static const struct intel_renderstate_rodata * render_state_get_rodata(const struct intel_engine_cs *engine) { - if (engine->id != RCS0) + if (engine->class != RENDER_CLASS) return NULL; switch (INTEL_GEN(engine->i915)) { diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 1de19dac4a14..5c7f2fdc5ec3 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -37,7 +37,6 @@ #include "i915_trace.h" #include "intel_context.h" #include "intel_gt.h" -#include "intel_renderstate.h" #include "intel_reset.h" #include "intel_workarounds.h" @@ -849,21 +848,6 @@ static void reset_finish(struct intel_engine_cs *engine) { } -static int intel_rcs_ctx_init(struct i915_request *rq) -{ - int ret; - - ret = intel_engine_emit_ctx_wa(rq); - if (ret != 0) - return ret; - - ret = intel_renderstate_emit(rq); - if (ret) - return ret; - - return 0; -} - static int rcs_resume(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; @@ -2227,11 +2211,9 @@ static void setup_rcs(struct intel_engine_cs *engine) engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; if (INTEL_GEN(i915) >= 7) { - engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen7_render_ring_flush; engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; } else if (IS_GEN(i915, 6)) { - engine->init_context = intel_rcs_ctx_init; engine->emit_flush = gen6_render_ring_flush; engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; } else if (IS_GEN(i915, 5)) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 01dd0d1d9bf6..65863e955f40 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -50,6 +50,7 @@ #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" #include "gt/intel_reset.h" +#include "gt/intel_renderstate.h" #include "gt/intel_workarounds.h" #include "i915_drv.h" @@ -1294,10 +1295,24 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; } - err = 0; - if (rq->engine->init_context) - err = rq->engine->init_context(rq); + err = intel_engine_emit_ctx_wa(rq); + if (err) + goto err_rq; + + /* + * Failing to program the MOCS is non-fatal.The system will not + * run at peak performance. So warn the user and carry on. + */ + err = intel_mocs_emit(rq); + if (err) + dev_notice(i915->drm.dev, + "Failed to program MOCS registers; expect performance issues.\n"); + + err = intel_renderstate_emit(rq); + if (err) + goto err_rq; +err_rq: i915_request_add(rq); if (err) goto err_active; -- cgit v1.2.3 From cfba5de9b99f8bbb8b4ea11b3049784e78b8759b Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Tue, 23 Jul 2019 23:18:33 +0100 Subject: drivers: Introduce device lookup variants by of_node Introduce wrappers for {bus/driver/class}_find_device() to locate devices by its of_node. Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: dri-devel@lists.freedesktop.org Cc: David Airlie Cc: Daniel Vetter Cc: devicetree@vger.kernel.org Cc: Florian Fainelli Cc: Frank Rowand Cc: Heiko Stuebner Cc: Liam Girdwood Cc: linux-i2c@vger.kernel.org Cc: linux-rockchip@lists.infradead.org Cc: linux-spi@vger.kernel.org Cc: Mathieu Poirier Cc: Rob Herring Cc: Srinivas Kandagatla Cc: Takashi Iwai Cc: Alan Tull Cc: linux-fpga@vger.kernel.org Cc: Peter Rosin Cc: Florian Fainelli Cc: Heiner Kallweit Cc: "David S. Miller" Cc: Andrew Lunn Cc: Liam Girdwood Cc: "Rafael J. Wysocki" Cc: Thor Thayer Cc: Jiri Slaby Cc: Andrew Lunn Cc: Peter Rosin Signed-off-by: Suzuki K Poulose Acked-by: Lee Jones Acked-by: Wolfram Sang # I2C part Acked-by: Moritz Fischer # For FPGA part Acked-by: Mark Brown Link: https://lore.kernel.org/r/20190723221838.12024-3-suzuki.poulose@arm.com Signed-off-by: Greg Kroah-Hartman --- drivers/amba/tegra-ahb.c | 11 +---------- drivers/fpga/fpga-bridge.c | 8 +------- drivers/fpga/fpga-mgr.c | 8 +------- drivers/gpu/drm/drm_mipi_dsi.c | 7 +------ drivers/i2c/i2c-core-of.c | 7 +------ drivers/mfd/altera-sysmgr.c | 14 ++----------- drivers/mux/core.c | 7 +------ drivers/net/phy/mdio_bus.c | 9 +-------- drivers/nvmem/core.c | 7 +------ drivers/of/of_mdio.c | 8 +------- drivers/of/platform.c | 7 +------ drivers/regulator/of_regulator.c | 7 +------ drivers/spi/spi.c | 20 ++++--------------- include/linux/device.h | 37 +++++++++++++++++++++++++++++++++++ sound/soc/rockchip/rk3399_gru_sound.c | 9 ++------- 15 files changed, 56 insertions(+), 110 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index aa64eece77a6..57d3b2e2d007 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c @@ -134,22 +134,13 @@ static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset) } #ifdef CONFIG_TEGRA_IOMMU_SMMU -static int tegra_ahb_match_by_smmu(struct device *dev, const void *data) -{ - struct tegra_ahb *ahb = dev_get_drvdata(dev); - const struct device_node *dn = data; - - return (ahb->dev->of_node == dn) ? 1 : 0; -} - int tegra_ahb_enable_smmu(struct device_node *dn) { struct device *dev; u32 val; struct tegra_ahb *ahb; - dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn, - tegra_ahb_match_by_smmu); + dev = driver_find_device_by_of_node(&tegra_ahb_driver.driver, dn); if (!dev) return -EPROBE_DEFER; ahb = dev_get_drvdata(dev); diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 80bd8f1b2aa6..4bab9028940a 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -19,11 +19,6 @@ static struct class *fpga_bridge_class; /* Lock for adding/removing bridges to linked lists*/ static spinlock_t bridge_list_lock; -static int fpga_bridge_of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * fpga_bridge_enable - Enable transactions on the bridge * @@ -104,8 +99,7 @@ struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, { struct device *dev; - dev = class_find_device(fpga_bridge_class, NULL, np, - fpga_bridge_of_node_match); + dev = class_find_device_by_of_node(fpga_bridge_class, np); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index c3866816456a..e05104f5e40c 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -482,11 +482,6 @@ struct fpga_manager *fpga_mgr_get(struct device *dev) } EXPORT_SYMBOL_GPL(fpga_mgr_get); -static int fpga_mgr_of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_fpga_mgr_get - Given a device node, get a reference to a fpga mgr. * @@ -498,8 +493,7 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node) { struct device *dev; - dev = class_find_device(fpga_mgr_class, NULL, node, - fpga_mgr_of_node_match); + dev = class_find_device_by_of_node(fpga_mgr_class, node); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index ad19df0686c9..bd2498bbd74a 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -93,11 +93,6 @@ static struct bus_type mipi_dsi_bus_type = { .pm = &mipi_dsi_device_pm_ops, }; -static int of_device_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a * device tree node @@ -110,7 +105,7 @@ struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np) { struct device *dev; - dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match); + dev = bus_find_device_by_of_node(&mipi_dsi_bus_type, np); return dev ? to_mipi_dsi_device(dev) : NULL; } diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c index d1c48dec7118..6f632d543fcc 100644 --- a/drivers/i2c/i2c-core-of.c +++ b/drivers/i2c/i2c-core-of.c @@ -113,11 +113,6 @@ void of_i2c_register_devices(struct i2c_adapter *adap) of_node_put(bus); } -static int of_dev_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - static int of_dev_or_parent_node_match(struct device *dev, const void *data) { if (dev->of_node == data) @@ -135,7 +130,7 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) struct device *dev; struct i2c_client *client; - dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); + dev = bus_find_device_by_of_node(&i2c_bus_type, node); if (!dev) return NULL; diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c index 2ee14d8a6d31..d2a13a547a3c 100644 --- a/drivers/mfd/altera-sysmgr.c +++ b/drivers/mfd/altera-sysmgr.c @@ -87,16 +87,6 @@ static struct regmap_config altr_sysmgr_regmap_cfg = { .use_single_write = true, }; -/** - * sysmgr_match_phandle - * Matching function used by driver_find_device(). - * Return: True if match is found, otherwise false. - */ -static int sysmgr_match_phandle(struct device *dev, const void *data) -{ - return dev->of_node == (const struct device_node *)data; -} - /** * altr_sysmgr_regmap_lookup_by_phandle * Find the sysmgr previous configured in probe() and return regmap property. @@ -117,8 +107,8 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np, if (!sysmgr_np) return ERR_PTR(-ENODEV); - dev = driver_find_device(&altr_sysmgr_driver.driver, NULL, - (void *)sysmgr_np, sysmgr_match_phandle); + dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver, + (void *)sysmgr_np); of_node_put(sysmgr_np); if (!dev) return ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/mux/core.c b/drivers/mux/core.c index d1271c1ee23c..1fb22388e7e0 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c @@ -405,17 +405,12 @@ int mux_control_deselect(struct mux_control *mux) } EXPORT_SYMBOL_GPL(mux_control_deselect); -static int of_dev_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* Note this function returns a reference to the mux_chip dev. */ static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) { struct device *dev; - dev = class_find_device(&mux_class, NULL, np, of_dev_node_match); + dev = class_find_device_by_of_node(&mux_class, np); return dev ? to_mux_chip(dev) : NULL; } diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index bd04fe762056..ce940871331e 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -262,11 +262,6 @@ static struct class mdio_bus_class = { }; #if IS_ENABLED(CONFIG_OF_MDIO) -/* Helper function for of_mdio_find_bus */ -static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np) -{ - return dev->of_node == mdio_bus_np; -} /** * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. * @mdio_bus_np: Pointer to the mii_bus. @@ -287,9 +282,7 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) if (!mdio_bus_np) return NULL; - d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np, - of_mdio_bus_match); - + d = class_find_device_by_of_node(&mdio_bus_class, mdio_bus_np); return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(of_mdio_find_bus); diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index ac5d945be88a..057d1ff87d5d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -76,11 +76,6 @@ static struct bus_type nvmem_bus_type = { .name = "nvmem", }; -static int of_nvmem_match(struct device *dev, const void *nvmem_np) -{ - return dev->of_node == nvmem_np; -} - static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) { struct device *d; @@ -88,7 +83,7 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) if (!nvmem_np) return NULL; - d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); + d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np); if (!d) return NULL; diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 44f53496cab1..000b95787df1 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -280,12 +280,6 @@ unregister: } EXPORT_SYMBOL(of_mdiobus_register); -/* Helper function for of_phy_find_device */ -static int of_phy_match(struct device *dev, const void *phy_np) -{ - return dev->of_node == phy_np; -} - /** * of_phy_find_device - Give a PHY node, find the phy_device * @phy_np: Pointer to the phy's device tree node @@ -301,7 +295,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np) if (!phy_np) return NULL; - d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match); + d = bus_find_device_by_of_node(&mdio_bus_type, phy_np); if (d) { mdiodev = to_mdio_device(d); if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 7801e25e6895..b47a2292fe8e 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -37,11 +37,6 @@ static const struct of_device_id of_skipped_node_table[] = { {} /* Empty terminated list */ }; -static int of_dev_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /** * of_find_device_by_node - Find the platform_device associated with a node * @np: Pointer to device tree node @@ -55,7 +50,7 @@ struct platform_device *of_find_device_by_node(struct device_node *np) { struct device *dev; - dev = bus_find_device(&platform_bus_type, NULL, np, of_dev_node_match); + dev = bus_find_device_by_of_node(&platform_bus_type, np); return dev ? to_platform_device(dev) : NULL; } EXPORT_SYMBOL(of_find_device_by_node); diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 397918ebba55..20dcc9c03adc 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -460,16 +460,11 @@ error: return NULL; } -static int of_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - struct regulator_dev *of_find_regulator_by_node(struct device_node *np) { struct device *dev; - dev = class_find_device(®ulator_class, NULL, np, of_node_match); + dev = class_find_device_by_of_node(®ulator_class, np); return dev ? dev_to_rdev(dev) : NULL; } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 75ac046cae52..a591da87981a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -3652,37 +3652,25 @@ EXPORT_SYMBOL_GPL(spi_write_then_read); /*-------------------------------------------------------------------------*/ #if IS_ENABLED(CONFIG_OF) -static int __spi_of_device_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* must call put_device() when done with returned spi_device device */ struct spi_device *of_find_spi_device_by_node(struct device_node *node) { - struct device *dev = bus_find_device(&spi_bus_type, NULL, node, - __spi_of_device_match); + struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node); + return dev ? to_spi_device(dev) : NULL; } EXPORT_SYMBOL_GPL(of_find_spi_device_by_node); #endif /* IS_ENABLED(CONFIG_OF) */ #if IS_ENABLED(CONFIG_OF_DYNAMIC) -static int __spi_of_controller_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - /* the spi controllers are not using spi_bus, so we find it with another way */ static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node) { struct device *dev; - dev = class_find_device(&spi_master_class, NULL, node, - __spi_of_controller_match); + dev = class_find_device_by_of_node(&spi_master_class, node); if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE)) - dev = class_find_device(&spi_slave_class, NULL, node, - __spi_of_controller_match); + dev = class_find_device_by_of_node(&spi_slave_class, node); if (!dev) return NULL; diff --git a/include/linux/device.h b/include/linux/device.h index 3ba376b8b456..29d8d7ad41e6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -186,6 +186,18 @@ static inline struct device *bus_find_device_by_name(struct bus_type *bus, return bus_find_device(bus, start, name, device_match_name); } +/** + * bus_find_device_by_of_node : device iterator for locating a particular device + * matching the of_node. + * @bus: bus type + * @np: of_node of the device to match. + */ +static inline struct device * +bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np) +{ + return bus_find_device(bus, NULL, np, device_match_of_node); +} + struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, struct device *hint); int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, @@ -366,6 +378,19 @@ static inline struct device *driver_find_device_by_name(struct device_driver *dr return driver_find_device(drv, NULL, name, device_match_name); } +/** + * driver_find_device_by_of_node- device iterator for locating a particular device + * by of_node pointer. + * @driver: the driver we're iterating + * @np: of_node pointer to match. + */ +static inline struct device * +driver_find_device_by_of_node(struct device_driver *drv, + const struct device_node *np) +{ + return driver_find_device(drv, NULL, np, device_match_of_node); +} + void driver_deferred_probe_add(struct device *dev); int driver_deferred_probe_check_state(struct device *dev); int driver_deferred_probe_check_state_continue(struct device *dev); @@ -507,6 +532,18 @@ static inline struct device *class_find_device_by_name(struct class *class, return class_find_device(class, NULL, name, device_match_name); } +/** + * class_find_device_by_of_node : device iterator for locating a particular device + * matching the of_node. + * @class: class type + * @np: of_node of the device to match. + */ +static inline struct device * +class_find_device_by_of_node(struct class *class, const struct device_node *np) +{ + return class_find_device(class, NULL, np, device_match_of_node); +} + struct class_attribute { struct attribute attr; ssize_t (*show)(struct class *class, struct class_attribute *attr, diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c index c16b0ffe8cfc..d951100bf770 100644 --- a/sound/soc/rockchip/rk3399_gru_sound.c +++ b/sound/soc/rockchip/rk3399_gru_sound.c @@ -422,11 +422,6 @@ static const struct dailink_match_data dailink_match[] = { }, }; -static int of_dev_node_match(struct device *dev, const void *data) -{ - return dev->of_node == data; -} - static int rockchip_sound_codec_node_match(struct device_node *np_codec) { struct device *dev; @@ -438,8 +433,8 @@ static int rockchip_sound_codec_node_match(struct device_node *np_codec) continue; if (dailink_match[i].bus_type) { - dev = bus_find_device(dailink_match[i].bus_type, NULL, - np_codec, of_dev_node_match); + dev = bus_find_device_by_of_node(dailink_match[i].bus_type, + np_codec); if (!dev) continue; put_device(dev); -- cgit v1.2.3 From 36f3313d6bff91ab2a9e47698c27d15363640a4e Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Tue, 23 Jul 2019 23:18:38 +0100 Subject: platform: Add platform_find_device_by_driver() helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide a helper to lookup platform devices by matching device driver in order to avoid drivers trying to use platform bus internals. Cc: Eric Anholt Cc: Greg Kroah-Hartman Cc: "Heiko Stübner" Cc: Inki Dae Cc: "Rafael J. Wysocki" Cc: Sandy Huang Cc: Seung-Woo Kim Tested-by: Heiko Stuebner Signed-off-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20190723221838.12024-8-suzuki.poulose@arm.com Signed-off-by: Greg Kroah-Hartman --- drivers/base/platform.c | 14 ++++++++++++++ drivers/gpu/drm/exynos/exynos_drm_drv.c | 9 +++------ drivers/gpu/drm/mcde/mcde_drv.c | 3 +-- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 3 +-- drivers/gpu/drm/vc4/vc4_drv.c | 3 +-- include/linux/platform_device.h | 3 +++ 6 files changed, 23 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 506a0175a5a7..a174ce5ea17c 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -1197,6 +1197,20 @@ struct bus_type platform_bus_type = { }; EXPORT_SYMBOL_GPL(platform_bus_type); +/** + * platform_find_device_by_driver - Find a platform device with a given + * driver. + * @start: The device to start the search from. + * @drv: The device driver to look for. + */ +struct device *platform_find_device_by_driver(struct device *start, + const struct device_driver *drv) +{ + return bus_find_device(&platform_bus_type, start, drv, + (void *)platform_match); +} +EXPORT_SYMBOL_GPL(platform_find_device_by_driver); + int __init platform_bus_init(void) { int error; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 58baf49d9926..badab94be2d6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -242,9 +242,7 @@ static struct component_match *exynos_drm_match_add(struct device *dev) if (!info->driver || !(info->flags & DRM_COMPONENT_DRIVER)) continue; - while ((d = bus_find_device(&platform_bus_type, p, - &info->driver->driver, - (void *)platform_bus_type.match))) { + while ((d = platform_find_device_by_driver(p, &info->driver->driver))) { put_device(p); if (!(info->flags & DRM_FIMC_DEVICE) || @@ -412,9 +410,8 @@ static void exynos_drm_unregister_devices(void) if (!info->driver || !(info->flags & DRM_VIRTUAL_DEVICE)) continue; - while ((dev = bus_find_device(&platform_bus_type, NULL, - &info->driver->driver, - (void *)platform_bus_type.match))) { + while ((dev = platform_find_device_by_driver(NULL, + &info->driver->driver))) { put_device(dev); platform_device_unregister(to_platform_device(dev)); } diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index baf63fb6850a..c07abf9e201c 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -477,8 +477,7 @@ static int mcde_probe(struct platform_device *pdev) struct device_driver *drv = &mcde_component_drivers[i]->driver; struct device *p = NULL, *d; - while ((d = bus_find_device(&platform_bus_type, p, drv, - (void *)platform_bus_type.match))) { + while ((d = platform_find_device_by_driver(p, drv))) { put_device(p); component_match_add(dev, &match, mcde_compare_dev, d); p = d; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 53d2c5bd61dc..38dc26376961 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -330,8 +330,7 @@ static struct component_match *rockchip_drm_match_add(struct device *dev) struct device *p = NULL, *d; do { - d = bus_find_device(&platform_bus_type, p, &drv->driver, - (void *)platform_bus_type.match); + d = platform_find_device_by_driver(p, &drv->driver); put_device(p); p = d; diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index bf11930e40e1..1551c8253bec 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -237,8 +237,7 @@ static void vc4_match_add_drivers(struct device *dev, struct device_driver *drv = &drivers[i]->driver; struct device *p = NULL, *d; - while ((d = bus_find_device(&platform_bus_type, p, drv, - (void *)platform_bus_type.match))) { + while ((d = platform_find_device_by_driver(p, drv))) { put_device(p); component_match_add(dev, match, compare_dev, d); p = d; diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 9bc36b589827..37e15a935a42 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -51,6 +51,9 @@ extern struct device platform_bus; extern void arch_setup_pdev_archdata(struct platform_device *); extern struct resource *platform_get_resource(struct platform_device *, unsigned int, unsigned int); +extern struct device * +platform_find_device_by_driver(struct device *start, + const struct device_driver *drv); extern void __iomem * devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index); -- cgit v1.2.3 From f4cc743a98136df3c3763050a0e8223b52d9a960 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 29 Jul 2019 15:12:16 +0800 Subject: drm/bridge: lvds-encoder: Fix build error while CONFIG_DRM_KMS_HELPER=m If DRM_LVDS_ENCODER=y but CONFIG_DRM_KMS_HELPER=m, build fails: drivers/gpu/drm/bridge/lvds-encoder.o: In function `lvds_encoder_probe': lvds-encoder.c:(.text+0x155): undefined reference to `devm_drm_panel_bridge_add' Reported-by: Hulk Robot Fixes: dbb58bfd9ae6 ("drm/bridge: Fix lvds-encoder since the panel_bridge rework.") Signed-off-by: YueHaibing Reviewed-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20190729071216.27488-1-yuehaibing@huawei.com --- drivers/gpu/drm/bridge/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index ee777469293a..cc62603b87c5 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC config DRM_LVDS_ENCODER tristate "Transparent parallel to LVDS encoder support" depends on OF + select DRM_KMS_HELPER select DRM_PANEL_BRIDGE help Support for transparent parallel to LVDS encoders that don't require -- cgit v1.2.3 From e1ae72a21e5f0d1846e26e3f5963930664702071 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 29 Jul 2019 17:05:20 +0800 Subject: drm/bridge: tc358764: Fix build error If CONFIG_DRM_TOSHIBA_TC358764=y but CONFIG_DRM_KMS_HELPER=m, building fails: drivers/gpu/drm/bridge/tc358764.o:(.rodata+0x228): undefined reference to `drm_atomic_helper_connector_reset' drivers/gpu/drm/bridge/tc358764.o:(.rodata+0x240): undefined reference to `drm_helper_probe_single_connector_modes' drivers/gpu/drm/bridge/tc358764.o:(.rodata+0x268): undefined reference to `drm_atomic_helper_connector_duplicate_state' drivers/gpu/drm/bridge/tc358764.o:(.rodata+0x270): undefined reference to `drm_atomic_helper_connector_destroy_state' Like TC358767, select DRM_KMS_HELPER to fix this, and change to select DRM_PANEL to avoid recursive dependency. Reported-by: Hulk Robot Fixes: f38b7cca6d0e ("drm/bridge: tc358764: Add DSI to LVDS bridge driver") Signed-off-by: YueHaibing Reviewed-by: Laurent Pinchart Reviewed-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20190729090520.25968-1-yuehaibing@huawei.com --- drivers/gpu/drm/bridge/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index cc62603b87c5..e4e22bbae2a7 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -117,9 +117,10 @@ config DRM_THINE_THC63LVD1024 config DRM_TOSHIBA_TC358764 tristate "TC358764 DSI/LVDS bridge" - depends on DRM && DRM_PANEL depends on OF select DRM_MIPI_DSI + select DRM_KMS_HELPER + select DRM_PANEL help Toshiba TC358764 DSI/LVDS bridge driver. -- cgit v1.2.3 From c082afac86cb31e2a5843336e81a9b89e3b0d664 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2019 15:32:08 +0100 Subject: drm/i915: Move aliasing_ppgtt underneath its i915_ggtt The aliasing_ppgtt provides a PIN_USER alias for the global gtt, so move it under the i915_ggtt to simplify later transformations to enable intel_context.vm. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190730143209.4549-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 7 +-- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 69 ++++++++++++++-------- drivers/gpu/drm/i915/i915_drv.h | 3 - drivers/gpu/drm/i915/i915_gem_gtt.c | 36 +++++------ drivers/gpu/drm/i915/i915_gem_gtt.h | 3 + drivers/gpu/drm/i915/i915_vma.c | 2 +- 7 files changed, 71 insertions(+), 51 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index ffb59d96d4d8..0f6b0678f548 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -459,8 +459,7 @@ __create_context(struct drm_i915_private *i915) i915_gem_context_set_recoverable(ctx); ctx->ring_size = 4 * PAGE_SIZE; - ctx->desc_template = - default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm); + ctx->desc_template = default_desc_template(i915, NULL); for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -2258,8 +2257,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->size = 0; if (ctx->vm) args->value = ctx->vm->total; - else if (to_i915(dev)->mm.aliasing_ppgtt) - args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; + else if (to_i915(dev)->ggtt.alias) + args->value = to_i915(dev)->ggtt.alias->vm.total; else args->value = to_i915(dev)->ggtt.vm.total; break; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index db7856f0f31e..bbd17d4b8ffd 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1190,7 +1190,7 @@ static int igt_ctx_readonly(void *arg) goto out_unlock; } - vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm; + vm = ctx->vm ?: &i915->ggtt.alias->vm; if (!vm || !vm->has_read_only) { err = 0; goto out_unlock; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 5c7f2fdc5ec3..11afe64cc256 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1376,30 +1376,41 @@ static void ring_context_destroy(struct kref *ref) intel_context_free(ce); } -static int __context_pin_ppgtt(struct i915_gem_context *ctx) +static struct i915_address_space *vm_alias(struct intel_context *ce) +{ + struct i915_address_space *vm; + + vm = ce->gem_context->vm; + if (!vm) + vm = &ce->engine->gt->ggtt->alias->vm; + + return vm; +} + +static int __context_pin_ppgtt(struct intel_context *ce) { struct i915_address_space *vm; int err = 0; - vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm; + vm = vm_alias(ce); if (vm) err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); return err; } -static void __context_unpin_ppgtt(struct i915_gem_context *ctx) +static void __context_unpin_ppgtt(struct intel_context *ce) { struct i915_address_space *vm; - vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm; + vm = vm_alias(ce); if (vm) gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); } static void ring_context_unpin(struct intel_context *ce) { - __context_unpin_ppgtt(ce->gem_context); + __context_unpin_ppgtt(ce); } static struct i915_vma * @@ -1493,7 +1504,7 @@ static int ring_context_pin(struct intel_context *ce) if (err) return err; - err = __context_pin_ppgtt(ce->gem_context); + err = __context_pin_ppgtt(ce); if (err) goto err_active; @@ -1685,7 +1696,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) return 0; } -static int remap_l3(struct i915_request *rq, int slice) +static int remap_l3_slice(struct i915_request *rq, int slice) { u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; int i; @@ -1713,15 +1724,34 @@ static int remap_l3(struct i915_request *rq, int slice) return 0; } +static int remap_l3(struct i915_request *rq) +{ + struct i915_gem_context *ctx = rq->gem_context; + int i, err; + + if (!ctx->remap_slice) + return 0; + + for (i = 0; i < MAX_L3_SLICES; i++) { + if (!(ctx->remap_slice & BIT(i))) + continue; + + err = remap_l3_slice(rq, i); + if (err) + return err; + } + + ctx->remap_slice = 0; + return 0; +} + static int switch_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; - struct i915_gem_context *ctx = rq->gem_context; - struct i915_address_space *vm = - ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm; + struct i915_address_space *vm = vm_alias(rq->hw_context); unsigned int unwind_mm = 0; u32 hw_flags = 0; - int ret, i; + int ret; GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); @@ -1765,7 +1795,7 @@ static int switch_context(struct i915_request *rq) * as nothing actually executes using the kernel context; it * is purely used for flushing user contexts. */ - if (i915_gem_context_is_kernel(ctx)) + if (i915_gem_context_is_kernel(rq->gem_context)) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); @@ -1799,18 +1829,9 @@ static int switch_context(struct i915_request *rq) goto err_mm; } - if (ctx->remap_slice) { - for (i = 0; i < MAX_L3_SLICES; i++) { - if (!(ctx->remap_slice & BIT(i))) - continue; - - ret = remap_l3(rq, i); - if (ret) - goto err_mm; - } - - ctx->remap_slice = 0; - } + ret = remap_l3(rq); + if (ret) + goto err_mm; return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 364a9fb543b6..d5ab69d19558 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -784,9 +784,6 @@ struct i915_gem_mm { */ struct vfsmount *gemfs; - /** PPGTT used for aliasing the PPGTT with the GTT */ - struct i915_ppgtt *aliasing_ppgtt; - struct notifier_block oom_notifier; struct notifier_block vmap_notifier; struct shrinker shrinker; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 423c890d03bf..c3028722d4e3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2446,18 +2446,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; if (flags & I915_VMA_LOCAL_BIND) { - struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt; + struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; if (!(vma->flags & I915_VMA_LOCAL_BIND)) { - ret = appgtt->vm.allocate_va_range(&appgtt->vm, - vma->node.start, - vma->size); + ret = alias->vm.allocate_va_range(&alias->vm, + vma->node.start, + vma->size); if (ret) return ret; } - appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level, - pte_flags); + alias->vm.insert_entries(&alias->vm, vma, + cache_level, pte_flags); } if (flags & I915_VMA_GLOBAL_BIND) { @@ -2485,7 +2485,8 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) } if (vma->flags & I915_VMA_LOCAL_BIND) { - struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm; + struct i915_address_space *vm = + &i915_vm_to_ggtt(vma->vm)->alias->vm; vm->clear_range(vm, vma->node.start, vma->size); } @@ -2542,13 +2543,12 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node, *end -= I915_GTT_PAGE_SIZE; } -static int init_aliasing_ppgtt(struct drm_i915_private *i915) +static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(i915); + ppgtt = i915_ppgtt_create(ggtt->vm.i915); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -2567,7 +2567,7 @@ static int init_aliasing_ppgtt(struct drm_i915_private *i915) if (err) goto err_ppgtt; - i915->mm.aliasing_ppgtt = ppgtt; + ggtt->alias = ppgtt; GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; @@ -2582,14 +2582,14 @@ err_ppgtt: return err; } -static void fini_aliasing_ppgtt(struct drm_i915_private *i915) +static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; + struct drm_i915_private *i915 = ggtt->vm.i915; struct i915_ppgtt *ppgtt; mutex_lock(&i915->drm.struct_mutex); - ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); + ppgtt = fetch_and_zero(&ggtt->alias); if (!ppgtt) goto out; @@ -2706,7 +2706,7 @@ int i915_init_ggtt(struct drm_i915_private *i915) return ret; if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) { - ret = init_aliasing_ppgtt(i915); + ret = init_aliasing_ppgtt(&i915->ggtt); if (ret) cleanup_init_ggtt(&i915->ggtt); } @@ -2755,7 +2755,7 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915) { struct pagevec *pvec; - fini_aliasing_ppgtt(i915); + fini_aliasing_ppgtt(&i915->ggtt); ggtt_cleanup_hw(&i915->ggtt); @@ -3588,7 +3588,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm, GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); GEM_BUG_ON(range_overflows(offset, size, vm->total)); - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); + GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); GEM_BUG_ON(drm_mm_node_allocated(node)); node->size = size; @@ -3685,7 +3685,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, GEM_BUG_ON(start >= end); GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); + GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); GEM_BUG_ON(drm_mm_node_allocated(node)); if (unlikely(range_overflows(start, size, end))) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index cea59ef1a365..51274483502e 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -394,6 +394,9 @@ struct i915_ggtt { void __iomem *gsm; void (*invalidate)(struct i915_ggtt *ggtt); + /** PPGTT used for aliasing the PPGTT with the GTT */ + struct i915_ppgtt *alias; + bool do_idle_maps; int mtrr; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index ee73baf29415..eb16a1a93bbc 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -104,7 +104,7 @@ vma_create(struct drm_i915_gem_object *obj, struct rb_node *rb, **p; /* The aliasing_ppgtt should never be used directly! */ - GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); + GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); vma = i915_vma_alloc(); if (vma == NULL) -- cgit v1.2.3 From f5d974f9d2a811ef08c044b6fce95c94a6a6e19b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2019 15:32:09 +0100 Subject: drm/i915/gt: Provide a local intel_context.vm Track the currently bound address space used by the HW context. Minor conversions to use the local intel_context.vm are made, leaving behind some more surgery required to make intel_context the primary through the selftests. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190730143209.4549-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 4 +--- drivers/gpu/drm/i915/gem/i915_gem_context.c | 15 +++++++++++---- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 11 +++-------- drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 6 +----- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_context.c | 4 ++++ drivers/gpu/drm/i915/gt/intel_context_types.h | 4 +++- drivers/gpu/drm/i915/gt/intel_lrc.c | 9 +++------ drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 6 +++--- drivers/gpu/drm/i915/gvt/scheduler.c | 2 +- 10 files changed, 31 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 6f537e8e4dea..2312a0c6af89 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -250,13 +250,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj, u32 value) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_gem_context *ctx = ce->gem_context; - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct clear_pages_work *work; struct i915_sleeve *sleeve; int err; - sleeve = create_sleeve(vm, obj, pages, page_sizes); + sleeve = create_sleeve(ce->vm, obj, pages, page_sizes); if (IS_ERR(sleeve)) return PTR_ERR(sleeve); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 0f6b0678f548..b28c7ca681a8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -475,10 +475,18 @@ static struct i915_address_space * __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) { struct i915_address_space *old = ctx->vm; + struct i915_gem_engines_iter it; + struct intel_context *ce; ctx->vm = i915_vm_get(vm); ctx->desc_template = default_desc_template(ctx->i915, vm); + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(vm); + } + i915_gem_context_unlock_engines(ctx); + return old; } @@ -1004,7 +1012,7 @@ static void set_ppgtt_barrier(void *data) static int emit_ppgtt_update(struct i915_request *rq, void *data) { - struct i915_address_space *vm = rq->gem_context->vm; + struct i915_address_space *vm = rq->hw_context->vm; struct intel_engine_cs *engine = rq->engine; u32 base = engine->mmio_base; u32 *cs; @@ -1113,9 +1121,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, set_ppgtt_barrier, old); if (err) { - ctx->vm = old; - ctx->desc_template = default_desc_template(ctx->i915, old); - i915_vm_put(vm); + i915_vm_put(__set_ppgtt(ctx, old)); + i915_vm_put(old); } unlock: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 8a2047c4e7c3..cbd7c6e3a1f8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -223,7 +223,6 @@ struct i915_execbuffer { struct intel_engine_cs *engine; /** engine to queue the request to */ struct intel_context *context; /* logical state for the request */ struct i915_gem_context *gem_context; /** caller's context */ - struct i915_address_space *vm; /** GTT and vma for the request */ struct i915_request *request; /** our request to build */ struct i915_vma *batch; /** identity of the batch obj/vma */ @@ -697,7 +696,7 @@ static int eb_reserve(struct i915_execbuffer *eb) case 1: /* Too fragmented, unbind everything and retry */ - err = i915_gem_evict_vm(eb->vm); + err = i915_gem_evict_vm(eb->context->vm); if (err) return err; break; @@ -725,12 +724,8 @@ static int eb_select_context(struct i915_execbuffer *eb) return -ENOENT; eb->gem_context = ctx; - if (ctx->vm) { - eb->vm = ctx->vm; + if (ctx->vm) eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; - } else { - eb->vm = &eb->i915->ggtt.vm; - } eb->context_flags = 0; if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags)) @@ -832,7 +827,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) goto err_vma; } - vma = i915_vma_instance(obj, eb->vm, NULL); + vma = i915_vma_instance(obj, eb->context->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index cb42e3a312e2..685064af32d1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -47,15 +47,11 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj, struct intel_context *ce, u32 value) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_gem_context *ctx = ce->gem_context; - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct i915_request *rq; struct i915_vma *vma; int err; - /* XXX: ce->vm please */ - vma = i915_vma_instance(obj, vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index bbd17d4b8ffd..7f9f6701b32c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -747,7 +747,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - vma = i915_vma_instance(obj, ce->gem_context->vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index d64b45f7ec6d..f30441a140f8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -191,6 +191,8 @@ intel_context_init(struct intel_context *ce, kref_init(&ce->ref); ce->gem_context = ctx; + ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm); + ce->engine = engine; ce->ops = engine->cops; ce->sseu = engine->sseu; @@ -206,6 +208,8 @@ intel_context_init(struct intel_context *ce, void intel_context_fini(struct intel_context *ce) { + i915_vm_put(ce->vm); + mutex_destroy(&ce->pin_mutex); i915_active_fini(&ce->active); } diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 4c0e211c715d..68a7e979b1a9 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -36,7 +36,6 @@ struct intel_context_ops { struct intel_context { struct kref ref; - struct i915_gem_context *gem_context; struct intel_engine_cs *engine; struct intel_engine_cs *inflight; #define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2) @@ -44,6 +43,9 @@ struct intel_context { #define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight) #define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight) + struct i915_address_space *vm; + struct i915_gem_context *gem_context; + struct list_head signal_link; struct list_head signals; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 4d7c4d0dbf75..d9061d9348cb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1605,8 +1605,6 @@ __execlists_context_pin(struct intel_context *ce, void *vaddr; int ret; - GEM_BUG_ON(!ce->gem_context->vm); - ret = execlists_context_deferred_alloc(ce, engine); if (ret) goto err; @@ -1716,8 +1714,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) static int emit_pdps(struct i915_request *rq) { const struct intel_engine_cs * const engine = rq->engine; - struct i915_ppgtt * const ppgtt = - i915_vm_to_ppgtt(rq->gem_context->vm); + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm); int err, i; u32 *cs; @@ -1790,7 +1787,7 @@ static int execlists_request_alloc(struct i915_request *request) */ /* Unconditionally invalidate GPU caches and TLBs. */ - if (i915_vm_is_4lvl(request->gem_context->vm)) + if (i915_vm_is_4lvl(request->hw_context->vm)) ret = request->engine->emit_flush(request, EMIT_INVALIDATE); else ret = emit_pdps(request); @@ -2920,7 +2917,7 @@ static void execlists_init_reg_state(u32 *regs, struct intel_engine_cs *engine, struct intel_ring *ring) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); bool rcs = engine->class == RENDER_CLASS; u32 base = engine->mmio_base; diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 11afe64cc256..8d24a49e5139 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1380,9 +1380,9 @@ static struct i915_address_space *vm_alias(struct intel_context *ce) { struct i915_address_space *vm; - vm = ce->gem_context->vm; - if (!vm) - vm = &ce->engine->gt->ggtt->alias->vm; + vm = ce->vm; + if (i915_is_ggtt(vm)) + vm = &i915_vm_to_ggtt(vm)->alias->vm; return vm; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 2144fb46d0e1..f68798ab1e7c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1156,7 +1156,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); - i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm)); + i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); for_each_engine(engine, vgpu->gvt->dev_priv, id) intel_context_unpin(s->shadow[id]); -- cgit v1.2.3 From e0e712fe42ef67bdf45fc348767d1d0a4eeba77f Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Tue, 30 Jul 2019 11:50:24 -0700 Subject: drm/i915: Update DRIVER_DATE to 20190730 Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d5ab69d19558..991eaeac9994 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -99,8 +99,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20190708" -#define DRIVER_TIMESTAMP 1562616546 +#define DRIVER_DATE "20190730" +#define DRIVER_TIMESTAMP 1564512624 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions -- cgit v1.2.3 From 52dfdba0a987689c56bf7013304cf1041d841064 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 25 Jul 2019 16:48:11 -0700 Subject: drm/i915/tgl: Add hpd interrupt handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add hotdplug detection for all ports on TGP. icp_hpd_detection_setup() is refactored to be shared with TGP. While we increase the number of pins, add a BUILD_BUG_ON() to avoid going over the number of bits allowed. v2: use BITS_PER_TYPE and correct type for BUILD_BUG_ON() check (requested by Ville) Cc: Ville Syrjälä Cc: Jose Souza Cc: Rodrigo Vivi Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190725234813.27179-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_hotplug.c | 6 ++ drivers/gpu/drm/i915/i915_drv.h | 4 + drivers/gpu/drm/i915/i915_irq.c | 128 ++++++++++++++++++++++++--- drivers/gpu/drm/i915/i915_reg.h | 28 +++++- 4 files changed, 154 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 342587d91d57..c844ae4480af 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -104,6 +104,12 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, if (IS_CNL_WITH_PORT_F(dev_priv)) return HPD_PORT_E; return HPD_PORT_F; + case PORT_G: + return HPD_PORT_G; + case PORT_H: + return HPD_PORT_H; + case PORT_I: + return HPD_PORT_I; default: MISSING_CASE(port); return HPD_NONE; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 991eaeac9994..9a69445f38c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -153,6 +153,10 @@ enum hpd_pin { HPD_PORT_D, HPD_PORT_E, HPD_PORT_F, + HPD_PORT_G, + HPD_PORT_H, + HPD_PORT_I, + HPD_NUM_PINS }; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a17d4fd17962..34527cdd9388 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -150,6 +150,18 @@ static const u32 hpd_mcc[HPD_NUM_PINS] = { [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP }; +static const u32 hpd_tgp[HPD_NUM_PINS] = { + [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, + [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, + [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP, + [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP, + [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP, + [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP, + [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP, + [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP, + [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP, +}; + static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, i915_reg_t iir, i915_reg_t ier) { @@ -1724,6 +1736,40 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) } } +static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_A: + return val & ICP_DDIA_HPD_LONG_DETECT; + case HPD_PORT_B: + return val & ICP_DDIB_HPD_LONG_DETECT; + case HPD_PORT_C: + return val & TGP_DDIC_HPD_LONG_DETECT; + default: + return false; + } +} + +static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_D: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); + case HPD_PORT_E: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); + case HPD_PORT_F: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); + case HPD_PORT_G: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); + case HPD_PORT_H: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5); + case HPD_PORT_I: + return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6); + default: + return false; + } +} + static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { @@ -1803,6 +1849,8 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, { enum hpd_pin pin; + BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS); + for_each_hpd_pin(pin) { if ((hpd[pin] & hotplug_trigger) == 0) continue; @@ -2561,6 +2609,43 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir, gmbus_irq_handler(dev_priv); } +static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +{ + u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; + u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; + u32 pin_mask = 0, long_mask = 0; + + if (ddi_hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); + I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + ddi_hotplug_trigger, + dig_hotplug_reg, hpd_tgp, + tgp_ddi_port_hotplug_long_detect); + } + + if (tc_hotplug_trigger) { + u32 dig_hotplug_reg; + + dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); + I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); + + intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + tc_hotplug_trigger, + dig_hotplug_reg, hpd_tgp, + tgp_tc_port_hotplug_long_detect); + } + + if (pin_mask) + intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + + if (pch_iir & SDE_GMBUS_ICP) + gmbus_irq_handler(dev_priv); +} + static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & @@ -2983,7 +3068,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) + tgp_irq_handler(dev_priv, iir); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC) icp_irq_handler(dev_priv, iir, hpd_mcc); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_handler(dev_priv, iir, hpd_icp); @@ -3778,20 +3865,18 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_detection_setup(dev_priv); } -static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, + u32 ddi_hotplug_enable_mask, + u32 tc_hotplug_enable_mask) { u32 hotplug; hotplug = I915_READ(SHOTPLUG_CTL_DDI); - hotplug |= ICP_DDIA_HPD_ENABLE | - ICP_DDIB_HPD_ENABLE; + hotplug |= ddi_hotplug_enable_mask; I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); hotplug = I915_READ(SHOTPLUG_CTL_TC); - hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | - ICP_TC_HPD_ENABLE(PORT_TC2) | - ICP_TC_HPD_ENABLE(PORT_TC3) | - ICP_TC_HPD_ENABLE(PORT_TC4); + hotplug |= tc_hotplug_enable_mask; I915_WRITE(SHOTPLUG_CTL_TC, hotplug); } @@ -3804,7 +3889,21 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - icp_hpd_detection_setup(dev_priv); + icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, + ICP_TC_HPD_ENABLE_MASK); +} + +static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, + TGP_TC_HPD_ENABLE_MASK); } static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -3841,7 +3940,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) gen11_hpd_detection_setup(dev_priv); - if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) + tgp_hpd_irq_setup(dev_priv); + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_hpd_irq_setup(dev_priv); } @@ -4291,7 +4392,12 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR); I915_WRITE(SDEIMR, ~mask); - icp_hpd_detection_setup(dev_priv); + if (HAS_PCH_TGP(dev_priv)) + icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, + TGP_TC_HPD_ENABLE_MASK); + else + icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, + ICP_TC_HPD_ENABLE_MASK); } static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d2b76121d863..f55dea63e6e5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7861,12 +7861,15 @@ enum { SDE_FDI_RXB_CPT | \ SDE_FDI_RXA_CPT) -/* south display engine interrupt: ICP */ +/* south display engine interrupt: ICP/TGP */ +#define SDE_TC6_HOTPLUG_TGP (1 << 29) +#define SDE_TC5_HOTPLUG_TGP (1 << 28) #define SDE_TC4_HOTPLUG_ICP (1 << 27) #define SDE_TC3_HOTPLUG_ICP (1 << 26) #define SDE_TC2_HOTPLUG_ICP (1 << 25) #define SDE_TC1_HOTPLUG_ICP (1 << 24) #define SDE_GMBUS_ICP (1 << 23) +#define SDE_DDIC_HOTPLUG_TGP (1 << 18) #define SDE_DDIB_HOTPLUG_ICP (1 << 17) #define SDE_DDIA_HOTPLUG_ICP (1 << 16) #define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24)) @@ -7877,6 +7880,11 @@ enum { SDE_TC3_HOTPLUG_ICP | \ SDE_TC2_HOTPLUG_ICP | \ SDE_TC1_HOTPLUG_ICP) +#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \ + SDE_DDI_MASK_ICP) +#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \ + SDE_TC5_HOTPLUG_TGP | \ + SDE_TC_MASK_ICP) #define SDEISR _MMIO(0xc4000) #define SDEIMR _MMIO(0xc4004) @@ -7944,6 +7952,12 @@ enum { */ #define SHOTPLUG_CTL_DDI _MMIO(0xc4030) +#define TGP_DDIC_HPD_ENABLE (1 << 11) +#define TGP_DDIC_HPD_STATUS_MASK (3 << 8) +#define TGP_DDIC_HPD_NO_DETECT (0 << 8) +#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8) +#define TGP_DDIC_HPD_LONG_DETECT (2 << 8) +#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8) #define ICP_DDIB_HPD_ENABLE (1 << 7) #define ICP_DDIB_HPD_STATUS_MASK (3 << 4) #define ICP_DDIB_HPD_NO_DETECT (0 << 4) @@ -8067,6 +8081,18 @@ enum { #define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) #define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) +#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \ + ICP_DDIA_HPD_ENABLE) +#define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \ + ICP_TC_HPD_ENABLE(PORT_TC3) | \ + ICP_TC_HPD_ENABLE(PORT_TC2) | \ + ICP_TC_HPD_ENABLE(PORT_TC1)) +#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \ + ICP_DDI_HPD_ENABLE_MASK) +#define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \ + ICP_TC_HPD_ENABLE(PORT_TC5) | \ + ICP_TC_HPD_ENABLE_MASK) + #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) -- cgit v1.2.3 From 48ef15d32a77042ee60fcb070dcf88ece72ec2e1 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 25 Jul 2019 16:48:12 -0700 Subject: drm/i915/tgl: Update north display hotplug detection to TGL connections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TGL has 3 combophys and 6 TC/TBT ports, so it has 2 more TC/TBT ports than ICL and the PORT_C on TGL is a combophy. So here adding a new hpd north table and function to detect long pulse for TGL. Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20190713010940.17711-6-lucas.demarchi@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20190725234813.27179-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 51 +++++++++++++++++++++++++++++++++++++---- drivers/gpu/drm/i915/i915_reg.h | 12 ++++++++-- 2 files changed, 56 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 34527cdd9388..e43582be24e6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -58,6 +58,8 @@ * and related files, but that will be described in separate chapters. */ +typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); + static const u32 hpd_ilk[HPD_NUM_PINS] = { [HPD_PORT_A] = DE_DP_A_HOTPLUG, }; @@ -135,6 +137,15 @@ static const u32 hpd_gen11[HPD_NUM_PINS] = { [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG }; +static const u32 hpd_gen12[HPD_NUM_PINS] = { + [HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, + [HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, + [HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, + [HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG, + [HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG, + [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG +}; + static const u32 hpd_icp[HPD_NUM_PINS] = { [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, @@ -1694,6 +1705,26 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) } } +static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val) +{ + switch (pin) { + case HPD_PORT_D: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); + case HPD_PORT_E: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); + case HPD_PORT_F: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); + case HPD_PORT_G: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); + case HPD_PORT_H: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5); + case HPD_PORT_I: + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6); + default: + return false; + } +} + static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { @@ -2881,6 +2912,16 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) u32 pin_mask = 0, long_mask = 0; u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; + long_pulse_detect_func long_pulse_detect; + const u32 *hpd; + + if (INTEL_GEN(dev_priv) >= 12) { + long_pulse_detect = gen12_port_hotplug_long_detect; + hpd = hpd_gen12; + } else { + long_pulse_detect = gen11_port_hotplug_long_detect; + hpd = hpd_gen11; + } if (trigger_tc) { u32 dig_hotplug_reg; @@ -2889,8 +2930,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, - dig_hotplug_reg, hpd_gen11, - gen11_port_hotplug_long_detect); + dig_hotplug_reg, hpd, long_pulse_detect); } if (trigger_tbt) { @@ -2900,8 +2940,7 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, - dig_hotplug_reg, hpd_gen11, - gen11_port_hotplug_long_detect); + dig_hotplug_reg, hpd, long_pulse_detect); } if (pin_mask) @@ -3928,9 +3967,11 @@ static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; + const u32 *hpd; u32 val; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); + hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd); hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; val = I915_READ(GEN11_DE_HPD_IMR); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f55dea63e6e5..e2f6634b65f4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7461,21 +7461,29 @@ enum { #define GEN11_DE_HPD_IMR _MMIO(0x44474) #define GEN11_DE_HPD_IIR _MMIO(0x44478) #define GEN11_DE_HPD_IER _MMIO(0x4447c) +#define GEN12_TC6_HOTPLUG (1 << 21) +#define GEN12_TC5_HOTPLUG (1 << 20) #define GEN11_TC4_HOTPLUG (1 << 19) #define GEN11_TC3_HOTPLUG (1 << 18) #define GEN11_TC2_HOTPLUG (1 << 17) #define GEN11_TC1_HOTPLUG (1 << 16) #define GEN11_TC_HOTPLUG(tc_port) (1 << ((tc_port) + 16)) -#define GEN11_DE_TC_HOTPLUG_MASK (GEN11_TC4_HOTPLUG | \ +#define GEN11_DE_TC_HOTPLUG_MASK (GEN12_TC6_HOTPLUG | \ + GEN12_TC5_HOTPLUG | \ + GEN11_TC4_HOTPLUG | \ GEN11_TC3_HOTPLUG | \ GEN11_TC2_HOTPLUG | \ GEN11_TC1_HOTPLUG) +#define GEN12_TBT6_HOTPLUG (1 << 5) +#define GEN12_TBT5_HOTPLUG (1 << 4) #define GEN11_TBT4_HOTPLUG (1 << 3) #define GEN11_TBT3_HOTPLUG (1 << 2) #define GEN11_TBT2_HOTPLUG (1 << 1) #define GEN11_TBT1_HOTPLUG (1 << 0) #define GEN11_TBT_HOTPLUG(tc_port) (1 << (tc_port)) -#define GEN11_DE_TBT_HOTPLUG_MASK (GEN11_TBT4_HOTPLUG | \ +#define GEN11_DE_TBT_HOTPLUG_MASK (GEN12_TBT6_HOTPLUG | \ + GEN12_TBT5_HOTPLUG | \ + GEN11_TBT4_HOTPLUG | \ GEN11_TBT3_HOTPLUG | \ GEN11_TBT2_HOTPLUG | \ GEN11_TBT1_HOTPLUG) -- cgit v1.2.3 From 555233601407725065cf37e00c834394452c6780 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 25 Jul 2019 16:48:13 -0700 Subject: drm/i915/tgl: handle DP aux interrupts For Tiger Lake the DE Port Interrupt Definition bits changed, so use the new bit definitions. Cc: Jose Souza Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20190713010940.17711-7-lucas.demarchi@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20190725234813.27179-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 16 +++++++++++----- drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e43582be24e6..fbe13bacd5b7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2951,19 +2951,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) { - u32 mask = GEN8_AUX_CHANNEL_A; + u32 mask; + + if (INTEL_GEN(dev_priv) >= 12) + /* TODO: Add AUX entries for USBC */ + return TGL_DE_PORT_AUX_DDIA | + TGL_DE_PORT_AUX_DDIB | + TGL_DE_PORT_AUX_DDIC; + mask = GEN8_AUX_CHANNEL_A; if (INTEL_GEN(dev_priv) >= 9) mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; - if (IS_CNL_WITH_PORT_F(dev_priv)) + if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11)) mask |= CNL_AUX_CHANNEL_F; - if (INTEL_GEN(dev_priv) >= 11) - mask |= ICL_AUX_CHANNEL_E | - CNL_AUX_CHANNEL_F; + if (IS_GEN(dev_priv, 11)) + mask |= ICL_AUX_CHANNEL_E; return mask; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e2f6634b65f4..21306dd3790a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7418,6 +7418,9 @@ enum { #define GEN8_PORT_DP_A_HOTPLUG (1 << 3) #define BXT_DE_PORT_GMBUS (1 << 1) #define GEN8_AUX_CHANNEL_A (1 << 0) +#define TGL_DE_PORT_AUX_DDIC (1 << 2) +#define TGL_DE_PORT_AUX_DDIB (1 << 1) +#define TGL_DE_PORT_AUX_DDIA (1 << 0) #define GEN8_DE_MISC_ISR _MMIO(0x44460) #define GEN8_DE_MISC_IMR _MMIO(0x44464) -- cgit v1.2.3 From 2e8de0879cf662967c850edb4462f73f3baf8794 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 30 Jul 2019 11:17:59 -0700 Subject: drm/i915: make i915_selftest.h self-contained MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix build breakage: In file included from : ./drivers/gpu/drm/i915/i915_selftest.h:125:1: error: unknown type name ‘bool’ 125 | bool __igt_timeout(unsigned long timeout, const char *fmt, ...); | ^~~~ Signed-off-by: Lucas De Marchi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190730181759.26162-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_selftest.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h index acdf6eb9e262..4d88205de51b 100644 --- a/drivers/gpu/drm/i915/i915_selftest.h +++ b/drivers/gpu/drm/i915/i915_selftest.h @@ -24,6 +24,8 @@ #ifndef __I915_SELFTEST_H__ #define __I915_SELFTEST_H__ +#include + struct pci_dev; struct drm_i915_private; -- cgit v1.2.3 From 7a81637105345dce2bae7bef69125f0d40570a36 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 3 Jun 2019 15:58:31 +0800 Subject: drm/amd/powerplay: add callback function of get_thermal_temperature_range 1. the thermal temperature is asic related data, move the code logic to xxx_ppt.c. 2. replace data structure PP_TemperatureRange with smu_temperature_range. 3. change temperature uint from temp*1000 to temp (temperature uint). Signed-off-by: Kevin Wang Signed-off-by: Kenneth Feng Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 17 +++++++++++++ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 +++++++++----- drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 34 +++++++++----------------- 4 files changed, 40 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index b702c9ee975f..33d2d75ba903 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -439,7 +439,6 @@ struct smu_table_context struct smu_table *tables; uint32_t table_count; struct smu_table memory_pool; - uint16_t software_shutdown_temp; uint8_t thermal_controller_type; uint16_t TDPODLimit; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 2d0c8890abbb..502067c354d2 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1639,6 +1639,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo return ret; } +static int navi10_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; + + if (!range || !powerplay_table) + return -EINVAL; + + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->software_shutdown_temp; + + return 0; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1676,6 +1692,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, .set_performance_level = navi10_set_performance_level, + .get_thermal_temperature_range = navi10_get_thermal_temperature_range, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index a41ce29b23dc..92e3e1bf25f9 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1150,10 +1150,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, struct smu_temperature_range *range) { struct amdgpu_device *adev = smu->adev; - int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; + int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; if (!range) @@ -1164,6 +1162,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, if (high > range->max) high = range->max; + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max); + if (low > high) return -EINVAL; @@ -1172,8 +1173,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); @@ -1212,7 +1213,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) if (!smu->pm_enabled) return ret; + ret = smu_get_thermal_temperature_range(smu, &range); + if (ret) + return ret; if (smu->smu_table.thermal_controller_type) { ret = smu_v11_0_set_thermal_range(smu, &range); @@ -1237,6 +1241,8 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) adev->pm.dpm.thermal.min_mem_temp = range.mem_min; adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 03e310426ffb..95afc153a924 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -463,7 +463,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, sizeof(PPTable_t)); - table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); @@ -3252,35 +3251,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu, return 0; } -static const struct smu_temperature_range vega20_thermal_policy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - static int vega20_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range) { - + struct smu_table_context *table_context = &smu->smu_table; + ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table; PPTable_t *pptable = smu->smu_table.driver_pptable; - if (!range) + if (!range || !powerplay_table) return -EINVAL; - memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); - - range->max = pptable->TedgeLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->ThotspotLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->ThbmLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->usSoftwareShutdownTemp; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE); + range->hotspot_crit_max = pptable->ThotspotLimit; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT); + range->mem_crit_max = pptable->ThbmLimit; + range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM); return 0; -- cgit v1.2.3 From 4cab85afe9d7903936ef86ef143e37440cb8fc98 Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Tue, 23 Jul 2019 10:18:01 -0400 Subject: drm/amdkfd: Fix byte align on VegaM This was missed during the addition of VegaM support Reviewed-by: Alex Deucher Signed-off-by: Kent Russell Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d3ee9c42f7e..6a5c96e519b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12) ? + adev->asic_type != CHIP_POLARIS12 && + adev->asic_type != CHIP_VEGAM) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; -- cgit v1.2.3 From 0f0727d971f6fdf8f1077180d495ddb9928f0c8b Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 22 Jul 2019 15:31:05 -0700 Subject: drm/amd/display: readd -msse2 to prevent Clang from emitting libcalls to undefined SW FP routines arch/x86/Makefile disables SSE and SSE2 for the whole kernel. The AMDGPU drivers modified in this patch re-enable SSE but not SSE2. Turn on SSE2 to support emitting double precision floating point instructions rather than calls to non-existent (usually available from gcc_s or compiler_rt) floating point helper routines for Clang. This was originally landed in: commit 10117450735c ("drm/amd/display: add -msse2 to prevent Clang from emitting libcalls to undefined SW FP routines") but reverted in: commit 193392ed9f69 ("Revert "drm/amd/display: add -msse2 to prevent Clang from emitting libcalls to undefined SW FP routines"") due to bugreports from GCC builds. Add guards to only do so for Clang. Link: https://bugs.freedesktop.org/show_bug.cgi?id=109487 Link: https://github.com/ClangBuiltLinux/linux/issues/327 Suggested-by: Sedat Dilek Suggested-by: Sami Tolvanen Signed-off-by: Nick Desaulniers Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/calcs/Makefile | 4 ++++ drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 4 ++++ drivers/gpu/drm/amd/display/dc/dml/Makefile | 4 ++++ drivers/gpu/drm/amd/display/dc/dsc/Makefile | 4 ++++ 4 files changed, 16 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile index 95f332ee3e7e..16614d73a5fc 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile @@ -32,6 +32,10 @@ endif calcs_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_CLANG +calcs_ccflags += -msse2 +endif + CFLAGS_dcn_calcs.o := $(calcs_ccflags) CFLAGS_dcn_calc_auto.o := $(calcs_ccflags) CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index e9721a906592..f57a3b281408 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -18,6 +18,10 @@ endif CFLAGS_dcn20_resource.o := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_CLANG +CFLAGS_dcn20_resource.o += -msse2 +endif + AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20)) AMD_DISPLAY_FILES += $(AMD_DAL_DCN20) diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 1735fc1e2eb1..95fd2beca80c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -32,6 +32,10 @@ endif dml_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_CLANG +dml_ccflags += -msse2 +endif + CFLAGS_display_mode_lib.o := $(dml_ccflags) ifdef CONFIG_DRM_AMD_DC_DCN2_0 diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile index e019cd9447e8..17db603f2d1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile @@ -9,6 +9,10 @@ endif dsc_ccflags := -mhard-float -msse $(cc_stack_align) +ifdef CONFIG_CC_IS_CLANG +dsc_ccflags += -msse2 +endif + CFLAGS_rc_calc.o := $(dsc_ccflags) CFLAGS_rc_calc_dpi.o := $(dsc_ccflags) CFLAGS_codec_main_amd.o := $(dsc_ccflags) -- cgit v1.2.3 From 911d8b3069ce87bcc874503d8f0b17b019e6e502 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 23 Jul 2019 17:04:50 +0800 Subject: drm/amdgpu: Use dev_get_drvdata where possible Instead of using to_pci_dev + pci_get_drvdata, use dev_get_drvdata to make code simpler. Signed-off-by: Chuhong Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 20f18d492537..56f807757d2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1101,16 +1101,14 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) static int amdgpu_pmops_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); return amdgpu_device_suspend(drm_dev, true, true); } static int amdgpu_pmops_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(dev); /* GPU comes up enabled by the bios on resume */ if (amdgpu_device_is_px(drm_dev)) { @@ -1124,33 +1122,29 @@ static int amdgpu_pmops_resume(struct device *dev) static int amdgpu_pmops_freeze(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); return amdgpu_device_suspend(drm_dev, false, true); } static int amdgpu_pmops_thaw(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); return amdgpu_device_resume(drm_dev, false, true); } static int amdgpu_pmops_poweroff(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); return amdgpu_device_suspend(drm_dev, true, true); } static int amdgpu_pmops_restore(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = dev_get_drvdata(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); return amdgpu_device_resume(drm_dev, false, true); } @@ -1209,8 +1203,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) static int amdgpu_pmops_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_crtc *crtc; if (!amdgpu_device_is_px(drm_dev)) { -- cgit v1.2.3 From ef1de3618125e4a4d6511553f8b527f58829a8e6 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 23 Jul 2019 17:04:22 +0800 Subject: drm/amd/display: Use dev_get_drvdata Instead of using to_pci_dev + pci_get_drvdata, use dev_get_drvdata to make code simpler. Signed-off-by: Chuhong Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9f31c17f725d..e177be3421a4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2430,8 +2430,7 @@ static ssize_t s3_debug_store(struct device *device, { int ret; int s3_state; - struct pci_dev *pdev = to_pci_dev(device); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(device); struct amdgpu_device *adev = drm_dev->dev_private; ret = kstrtoint(buf, 0, &s3_state); -- cgit v1.2.3 From 59d788b18771f5f0f198e8da3cd4f38dfbfa64ba Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 23 Jul 2019 19:10:08 +0800 Subject: drm/radeon: Use dev_get_drvdata where possible Instead of using to_pci_dev + pci_get_drvdata, use dev_get_drvdata to make code simpler. Signed-off-by: Chuhong Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_drv.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a6cbe11f79c6..b2bb74d5bffb 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -358,15 +358,13 @@ radeon_pci_shutdown(struct pci_dev *pdev) static int radeon_pmops_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(dev); return radeon_suspend_kms(drm_dev, true, true, false); } static int radeon_pmops_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(dev); /* GPU comes up enabled by the bios on resume */ if (radeon_is_px(drm_dev)) { @@ -380,15 +378,13 @@ static int radeon_pmops_resume(struct device *dev) static int radeon_pmops_freeze(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(dev); return radeon_suspend_kms(drm_dev, false, true, true); } static int radeon_pmops_thaw(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(dev); return radeon_resume_kms(drm_dev, false, true); } @@ -447,8 +443,7 @@ static int radeon_pmops_runtime_resume(struct device *dev) static int radeon_pmops_runtime_idle(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct drm_device *drm_dev = dev_get_drvdata(dev); struct drm_crtc *crtc; if (!radeon_is_px(drm_dev)) { -- cgit v1.2.3 From 87abff77880da827b8c9acb37e241999183b406f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 23 Jul 2019 15:23:12 +0100 Subject: drm/amd/display: fix a missing null check on a failed kzalloc Currently the allocation of config may fail and a null pointer dereference on config can occur. Fix this by added a null check on a failed allocation of config. Addresses-Coverity: ("Dereference null return") Fixes: c2cd9d04ecf0 ("drm/amd/display: Hook up calls to do stereo mux and dig programming to stereo control interface") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 94f126d2331c..252b621d93a9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1259,6 +1259,8 @@ bool dc_set_generic_gpio_for_stereo(bool enable, struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config), GFP_KERNEL); + if (!config) + return false; pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0); if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) { -- cgit v1.2.3 From d3a593e97ecdff6bbebfd8b83079ffc5a332dccf Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 23 Jul 2019 14:27:20 +0800 Subject: drm/amd/powerplay: no pptable transfer and dpms enabled with "dpm=0" Honor the 'dpm' module parameter setting on SW SMU routine as what we did on previous ASICs. SMU FW loading is still proceeded even with "dpm=0". Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 0b1285ddc954..37eb334de6bd 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1083,6 +1083,9 @@ static int smu_hw_init(void *handle) } } + if (!smu->pm_enabled) + return 0; + ret = smu_feature_init_dpm(smu); if (ret) goto failed; -- cgit v1.2.3 From 780f3a9c5b9f739d77cc15fac58a97c3ec6cad99 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 23 Jul 2019 16:23:28 +0800 Subject: drm/amd/powerplay: some cosmetic fixes Drop redundant check, duplicate check, duplicate setting and fix the return value. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 33 +++++++++++++++--------------- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 30 +++++++++++---------------- 2 files changed, 28 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03ca8c69114f..d8041ced9910 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -317,13 +317,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (!amdgpu_sriov_vf(adev)) { - if (is_support_sw_smu(adev)) - current_level = smu_get_performance_level(&adev->smu); - else if (adev->powerplay.pp_funcs->get_performance_level) - current_level = amdgpu_dpm_get_performance_level(adev); - } - if (strncmp("low", buf, strlen("low")) == 0) { level = AMD_DPM_FORCED_LEVEL_LOW; } else if (strncmp("high", buf, strlen("high")) == 0) { @@ -347,17 +340,23 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, goto fail; } - if (amdgpu_sriov_vf(adev)) { - if (amdgim_is_hwperf(adev) && - adev->virt.ops->force_dpm_level) { - mutex_lock(&adev->pm.mutex); - adev->virt.ops->force_dpm_level(adev, level); - mutex_unlock(&adev->pm.mutex); - return count; - } else { - return -EINVAL; + /* handle sriov case here */ + if (amdgpu_sriov_vf(adev)) { + if (amdgim_is_hwperf(adev) && + adev->virt.ops->force_dpm_level) { + mutex_lock(&adev->pm.mutex); + adev->virt.ops->force_dpm_level(adev, level); + mutex_unlock(&adev->pm.mutex); + return count; + } else { + return -EINVAL; } - } + } + + if (is_support_sw_smu(adev)) + current_level = smu_get_performance_level(&adev->smu); + else if (adev->powerplay.pp_funcs->get_performance_level) + current_level = amdgpu_dpm_get_performance_level(adev); if (current_level == level) return count; diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 37eb334de6bd..d8bb18445086 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1444,6 +1444,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, if (!smu->pm_enabled) return -EINVAL; + if (!skip_display_settings) { ret = smu_display_config_changed(smu); if (ret) { @@ -1452,8 +1453,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, } } - if (!smu->pm_enabled) - return -EINVAL; ret = smu_apply_clocks_adjust_rules(smu); if (ret) { pr_err("Failed to apply clocks adjust rules!"); @@ -1472,9 +1471,14 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, ret = smu_asic_set_performance_level(smu, level); if (ret) { ret = smu_default_set_performance_level(smu, level); + if (ret) { + pr_err("Failed to set performance level!"); + return ret; + } } - if (!ret) - smu_dpm_ctx->dpm_level = level; + + /* update the saved copy */ + smu_dpm_ctx->dpm_level = level; } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { @@ -1533,28 +1537,18 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { - int ret = 0; - int i; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int ret = 0; if (!smu_dpm_ctx->dpm_context) return -EINVAL; - for (i = 0; i < smu->adev->num_ip_blocks; i++) { - if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) - break; - } - - - smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level); - ret = smu_handle_task(smu, level, - AMD_PP_TASK_READJUST_POWER_STATE); + ret = smu_enable_umd_pstate(smu, &level); if (ret) return ret; - mutex_lock(&smu->mutex); - smu_dpm_ctx->dpm_level = level; - mutex_unlock(&smu->mutex); + ret = smu_handle_task(smu, level, + AMD_PP_TASK_READJUST_POWER_STATE); return ret; } -- cgit v1.2.3 From 668a9469f11470868ad4577d75894f8812919314 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Tue, 23 Jul 2019 19:56:52 +0800 Subject: drm/amd/powerplay: fix temperature granularity error in smu11 in this patch, drm/amd/powerplay: add callback function of get_thermal_temperature_range the driver missed temperature granularity change on other temperature. Signed-off-by: Kevin Wang Reviewed-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 92e3e1bf25f9..e2ea324644c1 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1232,15 +1232,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } - adev->pm.dpm.thermal.min_temp = range.min; - adev->pm.dpm.thermal.max_temp = range.max; - adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; - adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; - adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; - adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; - adev->pm.dpm.thermal.min_mem_temp = range.mem_min; - adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; - adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; -- cgit v1.2.3 From 6913848087e29103f7062376d0ebf7efcaf1a26b Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 23 Jul 2019 16:45:19 -0400 Subject: drm/amdgpu: use VCN firmware offset for cache window Since we are using the signed FW now, and also using PSP firmware loading, but it's still potential to break driver when loading FW directly instead of PSP, so we should add offset. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index ef8bb67844be..0c84dbc6a62d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -396,11 +396,8 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst[i].gpu_addr)); offset = size; - /* No signed header for now from firmware WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - */ - WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); } WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); -- cgit v1.2.3 From 5f4814deab509e4d0a52e4a0b016d49a75b2cf7c Mon Sep 17 00:00:00 2001 From: tiancyin Date: Tue, 16 Jul 2019 18:25:01 +0800 Subject: drm/amdgpu/gmc10: fix pte mytpe field error for navi14 navi14 share same PTE format with navi10. Reviewed-by: Hawking Zhang Signed-off-by: tiancyin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4726b5176417..9ab31ea3cc60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1574,7 +1574,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, flags &= ~AMDGPU_PTE_EXECUTABLE; flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; - if (adev->asic_type == CHIP_NAVI10) { + if (adev->asic_type >= CHIP_NAVI10) { flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 42daf8877c3a..2eda3a8c330d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -90,7 +90,7 @@ struct amdgpu_bo_list_entry; | AMDGPU_PTE_WRITEABLE \ | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC)) -/* NAVI10 only */ +/* gfx10 */ #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48) #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL) -- cgit v1.2.3 From 8c7a5d9e6f141aa48473f15ac271c75688404ba8 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Tue, 23 Jul 2019 14:40:07 -0500 Subject: drm/amdkfd: Use SQC when TCP would fail in gfx9 context save. When a wavefront raises TRAPSTS.XNACK_ERROR with STATUS.ALLOW_REPLAY=0 subsequent memory instructions have undefined behavior. In practice SQC stores continue to work but TCP stores do not. Context save is permitted to fail after XNACK error because the wavefront will be halted and subsequently terminated. However the debugger has an interest in retrieving the wavefront VGPR/LDS state. Detect the out-of-spec case and use SQC stores during context save in place of TCP stores. Signed-off-by: Jay Cornwall Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 365 ++++++++++++++------- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 81 +++++ 2 files changed, 320 insertions(+), 126 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index c45ba0013ca5..7274baff5c16 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -274,7 +274,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = { static const uint32_t cwsr_trap_gfx9_hex[] = { - 0xbf820001, 0xbf82015e, + 0xbf820001, 0xbf820241, 0xb8f8f802, 0x89788678, 0xb8fbf803, 0x866eff7b, 0x00000400, 0xbf85003b, @@ -404,15 +404,57 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0xbefe00c1, 0xbeff00c1, 0xbee80080, 0xbee90080, 0xbef600ff, 0x01000000, + 0x867aff78, 0x00400000, + 0xbf850003, 0xb8faf803, + 0x897a7aff, 0x10000000, + 0xbf85004d, 0xbe840080, + 0xd2890000, 0x00000900, + 0x80048104, 0xd2890001, + 0x00000900, 0x80048104, + 0xd2890002, 0x00000900, + 0x80048104, 0xd2890003, + 0x00000900, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000901, 0x80048104, + 0xd2890001, 0x00000901, + 0x80048104, 0xd2890002, + 0x00000901, 0x80048104, + 0xd2890003, 0x00000901, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000902, + 0x80048104, 0xd2890001, + 0x00000902, 0x80048104, + 0xd2890002, 0x00000902, + 0x80048104, 0xd2890003, + 0x00000902, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000903, 0x80048104, + 0xd2890001, 0x00000903, + 0x80048104, 0xd2890002, + 0x00000903, 0x80048104, + 0xd2890003, 0x00000903, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbf820008, 0xe0724000, 0x701d0000, 0xe0724100, 0x701d0100, 0xe0724200, 0x701d0200, 0xe0724300, 0x701d0300, 0xbefe00c1, 0xbeff00c1, 0xb8fb4306, 0x867bc17b, - 0xbf84002c, 0xbf8a0000, + 0xbf840063, 0xbf8a0000, 0x867aff6f, 0x04000000, - 0xbf840028, 0x8e7b867b, + 0xbf84005f, 0x8e7b867b, 0x8e7b827b, 0xbef6007b, 0xb8f02a05, 0x80708170, 0x8e708a70, 0xb8fa1605, @@ -422,144 +464,215 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0x01000000, 0xbefc0080, 0xd28c0002, 0x000100c1, 0xd28d0003, 0x000204c1, - 0xd1060002, 0x00011103, - 0x7e0602ff, 0x00000200, - 0xbefc00ff, 0x00010000, - 0xbe800077, 0x8677ff77, - 0xff7fffff, 0x8777ff77, - 0x00058000, 0xd8ec0000, - 0x00000002, 0xbf8cc07f, - 0xe0765000, 0x701d0002, - 0x68040702, 0xd0c9006a, - 0x0000f702, 0xbf87fff7, - 0xbef70000, 0xbef000ff, - 0x00000400, 0xbefe00c1, - 0xbeff00c1, 0xb8fb2a05, - 0x807b817b, 0x8e7b827b, - 0x8e76887b, 0xbef600ff, - 0x01000000, 0xbefc0084, - 0xbf0a7b7c, 0xbf840015, - 0xbf11017c, 0x807bff7b, - 0x00001000, 0x7e000300, - 0x7e020301, 0x7e040302, - 0x7e060303, 0xe0724000, - 0x701d0000, 0xe0724100, - 0x701d0100, 0xe0724200, - 0x701d0200, 0xe0724300, - 0x701d0300, 0x807c847c, - 0x8070ff70, 0x00000400, - 0xbf0a7b7c, 0xbf85ffef, - 0xbf9c0000, 0xbf8200da, - 0xbef4007e, 0x8675ff7f, - 0x0000ffff, 0x8775ff75, - 0x00040000, 0xbef60080, - 0xbef700ff, 0x00807fac, - 0x866eff7f, 0x08000000, - 0x8f6e836e, 0x87776e77, - 0x866eff7f, 0x70000000, - 0x8f6e816e, 0x87776e77, - 0x866eff7f, 0x04000000, - 0xbf84001e, 0xbefe00c1, - 0xbeff00c1, 0xb8ef4306, - 0x866fc16f, 0xbf840019, - 0x8e6f866f, 0x8e6f826f, - 0xbef6006f, 0xb8f82a05, - 0x80788178, 0x8e788a78, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0x8078ff78, 0x00000080, - 0xbef600ff, 0x01000000, - 0xbefc0080, 0xe0510000, - 0x781d0000, 0xe0510100, - 0x781d0000, 0x807cff7c, - 0x00000200, 0x8078ff78, - 0x00000200, 0xbf0a6f7c, - 0xbf85fff6, 0xbef80080, + 0x867aff78, 0x00400000, + 0xbf850003, 0xb8faf803, + 0x897a7aff, 0x10000000, + 0xbf850030, 0x24040682, + 0xd86e4000, 0x00000002, + 0xbf8cc07f, 0xbe840080, + 0xd2890000, 0x00000900, + 0x80048104, 0xd2890001, + 0x00000900, 0x80048104, + 0xd2890002, 0x00000900, + 0x80048104, 0xd2890003, + 0x00000900, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000901, 0x80048104, + 0xd2890001, 0x00000901, + 0x80048104, 0xd2890002, + 0x00000901, 0x80048104, + 0xd2890003, 0x00000901, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0x680404ff, + 0x00000200, 0xd0c9006a, + 0x0000f702, 0xbf87ffd2, + 0xbf820015, 0xd1060002, + 0x00011103, 0x7e0602ff, + 0x00000200, 0xbefc00ff, + 0x00010000, 0xbe800077, + 0x8677ff77, 0xff7fffff, + 0x8777ff77, 0x00058000, + 0xd8ec0000, 0x00000002, + 0xbf8cc07f, 0xe0765000, + 0x701d0002, 0x68040702, + 0xd0c9006a, 0x0000f702, + 0xbf87fff7, 0xbef70000, + 0xbef000ff, 0x00000400, 0xbefe00c1, 0xbeff00c1, - 0xb8ef2a05, 0x806f816f, - 0x8e6f826f, 0x8e76886f, + 0xb8fb2a05, 0x807b817b, + 0x8e7b827b, 0x8e76887b, 0xbef600ff, 0x01000000, - 0xbeee0078, 0x8078ff78, - 0x00000400, 0xbefc0084, - 0xbf11087c, 0x806fff6f, - 0x00008000, 0xe0524000, - 0x781d0000, 0xe0524100, - 0x781d0100, 0xe0524200, - 0x781d0200, 0xe0524300, - 0x781d0300, 0xbf8c0f70, + 0xbefc0084, 0xbf0a7b7c, + 0xbf84006d, 0xbf11017c, + 0x807bff7b, 0x00001000, + 0x867aff78, 0x00400000, + 0xbf850003, 0xb8faf803, + 0x897a7aff, 0x10000000, + 0xbf850051, 0xbe840080, + 0xd2890000, 0x00000900, + 0x80048104, 0xd2890001, + 0x00000900, 0x80048104, + 0xd2890002, 0x00000900, + 0x80048104, 0xd2890003, + 0x00000900, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000901, 0x80048104, + 0xd2890001, 0x00000901, + 0x80048104, 0xd2890002, + 0x00000901, 0x80048104, + 0xd2890003, 0x00000901, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000902, + 0x80048104, 0xd2890001, + 0x00000902, 0x80048104, + 0xd2890002, 0x00000902, + 0x80048104, 0xd2890003, + 0x00000902, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000903, 0x80048104, + 0xd2890001, 0x00000903, + 0x80048104, 0xd2890002, + 0x00000903, 0x80048104, + 0xd2890003, 0x00000903, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0x807c847c, + 0xbf0a7b7c, 0xbf85ffb1, + 0xbf9c0000, 0xbf820012, 0x7e000300, 0x7e020301, 0x7e040302, 0x7e060303, - 0x807c847c, 0x8078ff78, - 0x00000400, 0xbf0a6f7c, - 0xbf85ffee, 0xbf9c0000, - 0xe0524000, 0x6e1d0000, - 0xe0524100, 0x6e1d0100, - 0xe0524200, 0x6e1d0200, - 0xe0524300, 0x6e1d0300, + 0xe0724000, 0x701d0000, + 0xe0724100, 0x701d0100, + 0xe0724200, 0x701d0200, + 0xe0724300, 0x701d0300, + 0x807c847c, 0x8070ff70, + 0x00000400, 0xbf0a7b7c, + 0xbf85ffef, 0xbf9c0000, + 0xbf8200da, 0xbef4007e, + 0x8675ff7f, 0x0000ffff, + 0x8775ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x00807fac, 0x866eff7f, + 0x08000000, 0x8f6e836e, + 0x87776e77, 0x866eff7f, + 0x70000000, 0x8f6e816e, + 0x87776e77, 0x866eff7f, + 0x04000000, 0xbf84001e, + 0xbefe00c1, 0xbeff00c1, + 0xb8ef4306, 0x866fc16f, + 0xbf840019, 0x8e6f866f, + 0x8e6f826f, 0xbef6006f, 0xb8f82a05, 0x80788178, 0x8e788a78, 0xb8ee1605, 0x806e816e, 0x8e6e866e, - 0x80786e78, 0x80f8c078, - 0xb8ef1605, 0x806f816f, - 0x8e6f846f, 0x8e76826f, - 0xbef600ff, 0x01000000, - 0xbefc006f, 0xc031003a, - 0x00000078, 0x80f8c078, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe802d00, - 0xbe822d02, 0xbe842d04, - 0xbe862d06, 0xbe882d08, - 0xbe8a2d0a, 0xbe8c2d0c, - 0xbe8e2d0e, 0xbf06807c, - 0xbf84fff0, 0xb8f82a05, + 0x80786e78, 0x8078ff78, + 0x00000080, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xe0510000, 0x781d0000, + 0xe0510100, 0x781d0000, + 0x807cff7c, 0x00000200, + 0x8078ff78, 0x00000200, + 0xbf0a6f7c, 0xbf85fff6, + 0xbef80080, 0xbefe00c1, + 0xbeff00c1, 0xb8ef2a05, + 0x806f816f, 0x8e6f826f, + 0x8e76886f, 0xbef600ff, + 0x01000000, 0xbeee0078, + 0x8078ff78, 0x00000400, + 0xbefc0084, 0xbf11087c, + 0x806fff6f, 0x00008000, + 0xe0524000, 0x781d0000, + 0xe0524100, 0x781d0100, + 0xe0524200, 0x781d0200, + 0xe0524300, 0x781d0300, + 0xbf8c0f70, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0x807c847c, + 0x8078ff78, 0x00000400, + 0xbf0a6f7c, 0xbf85ffee, + 0xbf9c0000, 0xe0524000, + 0x6e1d0000, 0xe0524100, + 0x6e1d0100, 0xe0524200, + 0x6e1d0200, 0xe0524300, + 0x6e1d0300, 0xb8f82a05, 0x80788178, 0x8e788a78, 0xb8ee1605, 0x806e816e, 0x8e6e866e, 0x80786e78, - 0xbef60084, 0xbef600ff, - 0x01000000, 0xc0211bfa, + 0x80f8c078, 0xb8ef1605, + 0x806f816f, 0x8e6f846f, + 0x8e76826f, 0xbef600ff, + 0x01000000, 0xbefc006f, + 0xc031003a, 0x00000078, + 0x80f8c078, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe802d00, 0xbe822d02, + 0xbe842d04, 0xbe862d06, + 0xbe882d08, 0xbe8a2d0a, + 0xbe8c2d0c, 0xbe8e2d0e, + 0xbf06807c, 0xbf84fff0, + 0xb8f82a05, 0x80788178, + 0x8e788a78, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0xbef60084, + 0xbef600ff, 0x01000000, + 0xc0211bfa, 0x00000078, + 0x80788478, 0xc0211b3a, 0x00000078, 0x80788478, - 0xc0211b3a, 0x00000078, - 0x80788478, 0xc0211b7a, + 0xc0211b7a, 0x00000078, + 0x80788478, 0xc0211c3a, 0x00000078, 0x80788478, - 0xc0211c3a, 0x00000078, - 0x80788478, 0xc0211c7a, + 0xc0211c7a, 0x00000078, + 0x80788478, 0xc0211eba, 0x00000078, 0x80788478, - 0xc0211eba, 0x00000078, - 0x80788478, 0xc0211efa, + 0xc0211efa, 0x00000078, + 0x80788478, 0xc0211a3a, 0x00000078, 0x80788478, - 0xc0211a3a, 0x00000078, - 0x80788478, 0xc0211a7a, + 0xc0211a7a, 0x00000078, + 0x80788478, 0xc0211cfa, 0x00000078, 0x80788478, - 0xc0211cfa, 0x00000078, - 0x80788478, 0xbf8cc07f, - 0xbefc006f, 0xbefe0070, - 0xbeff0071, 0x866f7bff, - 0x000003ff, 0xb96f4803, - 0x866f7bff, 0xfffff800, - 0x8f6f8b6f, 0xb96fa2c3, - 0xb973f801, 0xb8ee2a05, - 0x806e816e, 0x8e6e8a6e, - 0xb8ef1605, 0x806f816f, - 0x8e6f866f, 0x806e6f6e, - 0x806e746e, 0x826f8075, - 0x866fff6f, 0x0000ffff, - 0xc00b1c37, 0x00000050, - 0xc00b1d37, 0x00000060, - 0xc0031e77, 0x00000074, - 0xbf8cc07f, 0x866fff6d, - 0xf8000000, 0x8f6f9b6f, - 0x8e6f906f, 0xbeee0080, - 0x876e6f6e, 0x866fff6d, - 0x04000000, 0x8f6f9a6f, - 0x8e6f8f6f, 0x876e6f6e, - 0x866fff7a, 0x00800000, - 0x8f6f976f, 0xb96ef807, - 0x866dff6d, 0x0000ffff, - 0x86fe7e7e, 0x86ea6a6a, - 0x8f6e837a, 0xb96ee0c2, - 0xbf800002, 0xb97a0002, - 0xbf8a0000, 0x95806f6c, - 0xbf810000, 0x00000000, + 0xbf8cc07f, 0xbefc006f, + 0xbefe0070, 0xbeff0071, + 0x866f7bff, 0x000003ff, + 0xb96f4803, 0x866f7bff, + 0xfffff800, 0x8f6f8b6f, + 0xb96fa2c3, 0xb973f801, + 0xb8ee2a05, 0x806e816e, + 0x8e6e8a6e, 0xb8ef1605, + 0x806f816f, 0x8e6f866f, + 0x806e6f6e, 0x806e746e, + 0x826f8075, 0x866fff6f, + 0x0000ffff, 0xc00b1c37, + 0x00000050, 0xc00b1d37, + 0x00000060, 0xc0031e77, + 0x00000074, 0xbf8cc07f, + 0x866fff6d, 0xf8000000, + 0x8f6f9b6f, 0x8e6f906f, + 0xbeee0080, 0x876e6f6e, + 0x866fff6d, 0x04000000, + 0x8f6f9a6f, 0x8e6f8f6f, + 0x876e6f6e, 0x866fff7a, + 0x00800000, 0x8f6f976f, + 0xb96ef807, 0x866dff6d, + 0x0000ffff, 0x86fe7e7e, + 0x86ea6a6a, 0x8f6e837a, + 0xb96ee0c2, 0xbf800002, + 0xb97a0002, 0xbf8a0000, + 0x95806f6c, 0xbf810000, }; static const uint32_t cwsr_trap_gfx10_hex[] = { diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index 871f2d431a44..4d146bca0b05 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -94,6 +94,7 @@ var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency +var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger /**************************************************************************/ /* variables */ @@ -107,6 +108,7 @@ var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0 var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1 var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3 var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29 +var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 @@ -127,6 +129,7 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11 var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21 var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800 +var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK = 0x10000000 var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME @@ -584,6 +587,16 @@ if G8SR_VGPR_SR_IN_DWX4 s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes else +if SAVE_AFTER_XNACK_ERROR + check_if_tcp_store_ok() + s_cbranch_scc1 L_SAVE_FIRST_VGPRS_WITH_TCP + + write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset) + s_branch L_SAVE_LDS + +L_SAVE_FIRST_VGPRS_WITH_TCP: +end + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 @@ -683,6 +696,27 @@ elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss else // BUFFER_STORE v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0 v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid + +if SAVE_AFTER_XNACK_ERROR + check_if_tcp_store_ok() + s_cbranch_scc1 L_SAVE_LDS_WITH_TCP + + v_lshlrev_b32 v2, 2, v3 +L_SAVE_LDS_LOOP_SQC: + ds_read2_b32 v[0:1], v2 offset0:0 offset1:0x40 + s_waitcnt lgkmcnt(0) + + write_vgprs_to_mem_with_sqc(v0, 2, s_save_buf_rsrc0, s_save_mem_offset) + + v_add_u32 v2, 0x200, v2 + v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size + s_cbranch_vccnz L_SAVE_LDS_LOOP_SQC + + s_branch L_SAVE_LDS_DONE + +L_SAVE_LDS_WITH_TCP: +end + v_mul_i32_i24 v2, v3, 8 // tid*8 v_mov_b32 v3, 256*2 s_mov_b32 m0, 0x10000 @@ -769,6 +803,21 @@ else s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later +if SAVE_AFTER_XNACK_ERROR + check_if_tcp_store_ok() + s_cbranch_scc1 L_SAVE_VGPR_LOOP + +L_SAVE_VGPR_LOOP_SQC: + write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset) + + s_add_u32 m0, m0, 4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc1 L_SAVE_VGPR_LOOP_SQC + + s_set_gpr_idx_off + s_branch L_SAVE_VGPR_END +end + L_SAVE_VGPR_LOOP: v_mov_b32 v0, v0 //v0 = v[0+m0] v_mov_b32 v1, v1 //v0 = v[0+m0] @@ -1265,7 +1314,39 @@ function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset) s_sub_u32 s_mem_offset, s_mem_offset, 4*16 end +function check_if_tcp_store_ok + // If STATUS.ALLOW_REPLAY=0 and TRAPSTS.XNACK_ERROR=1 then TCP stores will fail. + s_and_b32 s_save_tmp, s_save_status, SQ_WAVE_STATUS_ALLOW_REPLAY_MASK + s_cbranch_scc1 L_TCP_STORE_CHECK_DONE + + s_getreg_b32 s_save_tmp, hwreg(HW_REG_TRAPSTS) + s_andn2_b32 s_save_tmp, SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK, s_save_tmp + +L_TCP_STORE_CHECK_DONE: +end + +function write_vgpr_to_mem_with_sqc(v, s_rsrc, s_mem_offset) + s_mov_b32 s4, 0 +L_WRITE_VGPR_LANE_LOOP: + for var lane = 0; lane < 4; ++ lane + v_readlane_b32 s[lane], v, s4 + s_add_u32 s4, s4, 1 + end + + s_buffer_store_dwordx4 s[0:3], s_rsrc, s_mem_offset glc:1 + ack_sqc_store_workaround() + + s_add_u32 s_mem_offset, s_mem_offset, 0x10 + s_cmp_eq_u32 s4, 0x40 + s_cbranch_scc0 L_WRITE_VGPR_LANE_LOOP +end + +function write_vgprs_to_mem_with_sqc(v, n_vgprs, s_rsrc, s_mem_offset) + for var vgpr = 0; vgpr < n_vgprs; ++ vgpr + write_vgpr_to_mem_with_sqc(v[vgpr], s_rsrc, s_mem_offset) + end +end function get_lds_size_bytes(s_lds_size_byte) // SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW -- cgit v1.2.3 From 7c2eaf5cdb8797bae792fb3cd96ebc6c7f3c53e7 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Mon, 22 Jul 2019 19:21:13 -0500 Subject: drm/amdkfd: Fix lost single step exceptions in gfx9 trap handler If the trap is entered due to MODE.DEBUG_EN=1 and SAVECTX is raised concurrently the handler cannot identify the source of the exception. This causes the debugger to lose single step exception notification when a context save request arrives at the same time. When MODE.DEBUG_EN=1 and STATUS.HALT=0 (exception not already handled) jump to the second-level trap handler upon entering the trap. The second-level trap will set STATUS.HALT=1 and return to the shader. If SAVECTX was raised then control flow will return to the trap, which will then handle the context save request. Cc: Tony Tye Cc: Laurent Morichetti Cc: Qingchuan Shi Signed-off-by: Jay Cornwall Reviewed-by: Laurent Morichetti Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 1418 ++++++++++---------- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 20 + 2 files changed, 733 insertions(+), 705 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 7274baff5c16..427594035597 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -274,405 +274,409 @@ static const uint32_t cwsr_trap_gfx8_hex[] = { static const uint32_t cwsr_trap_gfx9_hex[] = { - 0xbf820001, 0xbf820241, + 0xbf820001, 0xbf820248, 0xb8f8f802, 0x89788678, - 0xb8fbf803, 0x866eff7b, - 0x00000400, 0xbf85003b, - 0x866eff7b, 0x00000800, - 0xbf850003, 0x866eff7b, - 0x00000100, 0xbf84000c, + 0xb8eef801, 0x866eff6e, + 0x00000800, 0xbf840003, 0x866eff78, 0x00002000, - 0xbf840005, 0xbf8e0010, - 0xb8eef803, 0x866eff6e, - 0x00000400, 0xbf84fffb, - 0x8778ff78, 0x00002000, - 0x80ec886c, 0x82ed806d, - 0xb8eef807, 0x866fff6e, - 0x001f8000, 0x8e6f8b6f, - 0x8977ff77, 0xfc000000, - 0x87776f77, 0x896eff6e, - 0x001f8000, 0xb96ef807, - 0xb8faf812, 0xb8fbf813, - 0x8efa887a, 0xc0071bbd, - 0x00000000, 0xbf8cc07f, - 0xc0071ebd, 0x00000008, - 0xbf8cc07f, 0x86ee6e6e, - 0xbf840001, 0xbe801d6e, - 0xb8fbf803, 0x867bff7b, - 0x000001ff, 0xbf850002, - 0x806c846c, 0x826d806d, + 0xbf840016, 0xb8fbf803, + 0x866eff7b, 0x00000400, + 0xbf85003b, 0x866eff7b, + 0x00000800, 0xbf850003, + 0x866eff7b, 0x00000100, + 0xbf84000c, 0x866eff78, + 0x00002000, 0xbf840005, + 0xbf8e0010, 0xb8eef803, + 0x866eff6e, 0x00000400, + 0xbf84fffb, 0x8778ff78, + 0x00002000, 0x80ec886c, + 0x82ed806d, 0xb8eef807, + 0x866fff6e, 0x001f8000, + 0x8e6f8b6f, 0x8977ff77, + 0xfc000000, 0x87776f77, + 0x896eff6e, 0x001f8000, + 0xb96ef807, 0xb8faf812, + 0xb8fbf813, 0x8efa887a, + 0xc0071bbd, 0x00000000, + 0xbf8cc07f, 0xc0071ebd, + 0x00000008, 0xbf8cc07f, + 0x86ee6e6e, 0xbf840001, + 0xbe801d6e, 0xb8fbf803, + 0x867bff7b, 0x000001ff, + 0xbf850002, 0x806c846c, + 0x826d806d, 0x866dff6d, + 0x0000ffff, 0x8f6e8b77, + 0x866eff6e, 0x001f8000, + 0xb96ef807, 0x86fe7e7e, + 0x86ea6a6a, 0x8f6e8378, + 0xb96ee0c2, 0xbf800002, + 0xb9780002, 0xbe801f6c, 0x866dff6d, 0x0000ffff, - 0x8f6e8b77, 0x866eff6e, - 0x001f8000, 0xb96ef807, - 0x86fe7e7e, 0x86ea6a6a, - 0x8f6e8378, 0xb96ee0c2, - 0xbf800002, 0xb9780002, - 0xbe801f6c, 0x866dff6d, - 0x0000ffff, 0xbefa0080, - 0xb97a0283, 0xb8fa2407, - 0x8e7a9b7a, 0x876d7a6d, - 0xb8fa03c7, 0x8e7a9a7a, - 0x876d7a6d, 0xb8faf807, - 0x867aff7a, 0x00007fff, - 0xb97af807, 0xbeee007e, - 0xbeef007f, 0xbefe0180, - 0xbf900004, 0x877a8478, - 0xb97af802, 0xbf8e0002, - 0xbf88fffe, 0xb8fa2a05, - 0x807a817a, 0x8e7a8a7a, - 0xb8fb1605, 0x807b817b, - 0x8e7b867b, 0x807a7b7a, - 0x807a7e7a, 0x827b807f, - 0x867bff7b, 0x0000ffff, - 0xc04b1c3d, 0x00000050, - 0xbf8cc07f, 0xc04b1d3d, - 0x00000060, 0xbf8cc07f, - 0xc0431e7d, 0x00000074, - 0xbf8cc07f, 0xbef4007e, - 0x8675ff7f, 0x0000ffff, - 0x8775ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x00807fac, 0x867aff7f, - 0x08000000, 0x8f7a837a, - 0x87777a77, 0x867aff7f, - 0x70000000, 0x8f7a817a, - 0x87777a77, 0xbef1007c, - 0xbef00080, 0xb8f02a05, - 0x80708170, 0x8e708a70, - 0xb8fa1605, 0x807a817a, - 0x8e7a867a, 0x80707a70, - 0xbef60084, 0xbef600ff, - 0x01000000, 0xbefe007c, - 0xbefc0070, 0xc0611c7a, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, + 0xbefa0080, 0xb97a0283, + 0xb8fa2407, 0x8e7a9b7a, + 0x876d7a6d, 0xb8fa03c7, + 0x8e7a9a7a, 0x876d7a6d, + 0xb8faf807, 0x867aff7a, + 0x00007fff, 0xb97af807, + 0xbeee007e, 0xbeef007f, + 0xbefe0180, 0xbf900004, + 0x877a8478, 0xb97af802, + 0xbf8e0002, 0xbf88fffe, + 0xb8fa2a05, 0x807a817a, + 0x8e7a8a7a, 0xb8fb1605, + 0x807b817b, 0x8e7b867b, + 0x807a7b7a, 0x807a7e7a, + 0x827b807f, 0x867bff7b, + 0x0000ffff, 0xc04b1c3d, + 0x00000050, 0xbf8cc07f, + 0xc04b1d3d, 0x00000060, + 0xbf8cc07f, 0xc0431e7d, + 0x00000074, 0xbf8cc07f, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0x867aff7f, 0x08000000, + 0x8f7a837a, 0x87777a77, + 0x867aff7f, 0x70000000, + 0x8f7a817a, 0x87777a77, + 0xbef1007c, 0xbef00080, + 0xb8f02a05, 0x80708170, + 0x8e708a70, 0xb8fa1605, + 0x807a817a, 0x8e7a867a, + 0x80707a70, 0xbef60084, + 0xbef600ff, 0x01000000, 0xbefe007c, 0xbefc0070, - 0xc0611b3a, 0x0000007c, + 0xc0611c7a, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611b7a, + 0xbefc0070, 0xc0611b3a, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, 0xbefc0070, - 0xc0611bba, 0x0000007c, + 0xc0611b7a, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611bfa, + 0xbefc0070, 0xc0611bba, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, 0xbefc0070, - 0xc0611e3a, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xb8fbf803, - 0xbefe007c, 0xbefc0070, - 0xc0611efa, 0x0000007c, + 0xc0611bfa, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611a3a, + 0xbefc0070, 0xc0611e3a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xb8fbf803, 0xbefe007c, + 0xbefc0070, 0xc0611efa, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, 0xbefc0070, - 0xc0611a7a, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xb8f1f801, - 0xbefe007c, 0xbefc0070, - 0xc0611c7a, 0x0000007c, + 0xc0611a3a, 0x0000007c, 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0x867aff7f, - 0x04000000, 0xbeef0080, - 0x876f6f7a, 0xb8f02a05, - 0x80708170, 0x8e708a70, - 0xb8fb1605, 0x807b817b, - 0x8e7b847b, 0x8e76827b, - 0xbef600ff, 0x01000000, - 0xbef20174, 0x80747074, - 0x82758075, 0xbefc0080, - 0xbf800000, 0xbe802b00, - 0xbe822b02, 0xbe842b04, - 0xbe862b06, 0xbe882b08, - 0xbe8a2b0a, 0xbe8c2b0c, - 0xbe8e2b0e, 0xc06b003a, - 0x00000000, 0xbf8cc07f, - 0xc06b013a, 0x00000010, - 0xbf8cc07f, 0xc06b023a, - 0x00000020, 0xbf8cc07f, - 0xc06b033a, 0x00000030, - 0xbf8cc07f, 0x8074c074, - 0x82758075, 0x807c907c, - 0xbf0a7b7c, 0xbf85ffe7, - 0xbef40172, 0xbef00080, - 0xbefe00c1, 0xbeff00c1, - 0xbee80080, 0xbee90080, - 0xbef600ff, 0x01000000, - 0x867aff78, 0x00400000, - 0xbf850003, 0xb8faf803, - 0x897a7aff, 0x10000000, - 0xbf85004d, 0xbe840080, - 0xd2890000, 0x00000900, - 0x80048104, 0xd2890001, - 0x00000900, 0x80048104, - 0xd2890002, 0x00000900, - 0x80048104, 0xd2890003, - 0x00000900, 0x80048104, - 0xc069003a, 0x00000070, - 0xbf8cc07f, 0x80709070, - 0xbf06c004, 0xbf84ffee, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611a7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xb8f1f801, 0xbefe007c, + 0xbefc0070, 0xc0611c7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0x867aff7f, 0x04000000, + 0xbeef0080, 0x876f6f7a, + 0xb8f02a05, 0x80708170, + 0x8e708a70, 0xb8fb1605, + 0x807b817b, 0x8e7b847b, + 0x8e76827b, 0xbef600ff, + 0x01000000, 0xbef20174, + 0x80747074, 0x82758075, + 0xbefc0080, 0xbf800000, + 0xbe802b00, 0xbe822b02, + 0xbe842b04, 0xbe862b06, + 0xbe882b08, 0xbe8a2b0a, + 0xbe8c2b0c, 0xbe8e2b0e, + 0xc06b003a, 0x00000000, + 0xbf8cc07f, 0xc06b013a, + 0x00000010, 0xbf8cc07f, + 0xc06b023a, 0x00000020, + 0xbf8cc07f, 0xc06b033a, + 0x00000030, 0xbf8cc07f, + 0x8074c074, 0x82758075, + 0x807c907c, 0xbf0a7b7c, + 0xbf85ffe7, 0xbef40172, + 0xbef00080, 0xbefe00c1, + 0xbeff00c1, 0xbee80080, + 0xbee90080, 0xbef600ff, + 0x01000000, 0x867aff78, + 0x00400000, 0xbf850003, + 0xb8faf803, 0x897a7aff, + 0x10000000, 0xbf85004d, 0xbe840080, 0xd2890000, - 0x00000901, 0x80048104, - 0xd2890001, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890001, 0x00000900, 0x80048104, 0xd2890002, - 0x00000901, 0x80048104, - 0xd2890003, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890003, 0x00000900, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, 0xbe840080, - 0xd2890000, 0x00000902, + 0xd2890000, 0x00000901, 0x80048104, 0xd2890001, - 0x00000902, 0x80048104, - 0xd2890002, 0x00000902, + 0x00000901, 0x80048104, + 0xd2890002, 0x00000901, 0x80048104, 0xd2890003, - 0x00000902, 0x80048104, + 0x00000901, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, 0xbe840080, 0xd2890000, - 0x00000903, 0x80048104, - 0xd2890001, 0x00000903, + 0x00000902, 0x80048104, + 0xd2890001, 0x00000902, 0x80048104, 0xd2890002, - 0x00000903, 0x80048104, - 0xd2890003, 0x00000903, + 0x00000902, 0x80048104, + 0xd2890003, 0x00000902, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, - 0xbf84ffee, 0xbf820008, - 0xe0724000, 0x701d0000, - 0xe0724100, 0x701d0100, - 0xe0724200, 0x701d0200, - 0xe0724300, 0x701d0300, - 0xbefe00c1, 0xbeff00c1, - 0xb8fb4306, 0x867bc17b, - 0xbf840063, 0xbf8a0000, - 0x867aff6f, 0x04000000, - 0xbf84005f, 0x8e7b867b, - 0x8e7b827b, 0xbef6007b, - 0xb8f02a05, 0x80708170, - 0x8e708a70, 0xb8fa1605, - 0x807a817a, 0x8e7a867a, - 0x80707a70, 0x8070ff70, - 0x00000080, 0xbef600ff, - 0x01000000, 0xbefc0080, - 0xd28c0002, 0x000100c1, - 0xd28d0003, 0x000204c1, - 0x867aff78, 0x00400000, - 0xbf850003, 0xb8faf803, - 0x897a7aff, 0x10000000, - 0xbf850030, 0x24040682, - 0xd86e4000, 0x00000002, - 0xbf8cc07f, 0xbe840080, - 0xd2890000, 0x00000900, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000903, 0x80048104, 0xd2890001, - 0x00000900, 0x80048104, - 0xd2890002, 0x00000900, + 0x00000903, 0x80048104, + 0xd2890002, 0x00000903, 0x80048104, 0xd2890003, - 0x00000900, 0x80048104, + 0x00000903, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, + 0xbf820008, 0xe0724000, + 0x701d0000, 0xe0724100, + 0x701d0100, 0xe0724200, + 0x701d0200, 0xe0724300, + 0x701d0300, 0xbefe00c1, + 0xbeff00c1, 0xb8fb4306, + 0x867bc17b, 0xbf840063, + 0xbf8a0000, 0x867aff6f, + 0x04000000, 0xbf84005f, + 0x8e7b867b, 0x8e7b827b, + 0xbef6007b, 0xb8f02a05, + 0x80708170, 0x8e708a70, + 0xb8fa1605, 0x807a817a, + 0x8e7a867a, 0x80707a70, + 0x8070ff70, 0x00000080, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xd28c0002, + 0x000100c1, 0xd28d0003, + 0x000204c1, 0x867aff78, + 0x00400000, 0xbf850003, + 0xb8faf803, 0x897a7aff, + 0x10000000, 0xbf850030, + 0x24040682, 0xd86e4000, + 0x00000002, 0xbf8cc07f, 0xbe840080, 0xd2890000, - 0x00000901, 0x80048104, - 0xd2890001, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890001, 0x00000900, 0x80048104, 0xd2890002, - 0x00000901, 0x80048104, - 0xd2890003, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890003, 0x00000900, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, - 0xbf84ffee, 0x680404ff, - 0x00000200, 0xd0c9006a, - 0x0000f702, 0xbf87ffd2, - 0xbf820015, 0xd1060002, - 0x00011103, 0x7e0602ff, - 0x00000200, 0xbefc00ff, - 0x00010000, 0xbe800077, - 0x8677ff77, 0xff7fffff, - 0x8777ff77, 0x00058000, - 0xd8ec0000, 0x00000002, - 0xbf8cc07f, 0xe0765000, - 0x701d0002, 0x68040702, - 0xd0c9006a, 0x0000f702, - 0xbf87fff7, 0xbef70000, - 0xbef000ff, 0x00000400, - 0xbefe00c1, 0xbeff00c1, - 0xb8fb2a05, 0x807b817b, - 0x8e7b827b, 0x8e76887b, - 0xbef600ff, 0x01000000, - 0xbefc0084, 0xbf0a7b7c, - 0xbf84006d, 0xbf11017c, - 0x807bff7b, 0x00001000, - 0x867aff78, 0x00400000, - 0xbf850003, 0xb8faf803, - 0x897a7aff, 0x10000000, - 0xbf850051, 0xbe840080, - 0xd2890000, 0x00000900, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000901, 0x80048104, 0xd2890001, - 0x00000900, 0x80048104, - 0xd2890002, 0x00000900, + 0x00000901, 0x80048104, + 0xd2890002, 0x00000901, 0x80048104, 0xd2890003, - 0x00000900, 0x80048104, + 0x00000901, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, + 0x680404ff, 0x00000200, + 0xd0c9006a, 0x0000f702, + 0xbf87ffd2, 0xbf820015, + 0xd1060002, 0x00011103, + 0x7e0602ff, 0x00000200, + 0xbefc00ff, 0x00010000, + 0xbe800077, 0x8677ff77, + 0xff7fffff, 0x8777ff77, + 0x00058000, 0xd8ec0000, + 0x00000002, 0xbf8cc07f, + 0xe0765000, 0x701d0002, + 0x68040702, 0xd0c9006a, + 0x0000f702, 0xbf87fff7, + 0xbef70000, 0xbef000ff, + 0x00000400, 0xbefe00c1, + 0xbeff00c1, 0xb8fb2a05, + 0x807b817b, 0x8e7b827b, + 0x8e76887b, 0xbef600ff, + 0x01000000, 0xbefc0084, + 0xbf0a7b7c, 0xbf84006d, + 0xbf11017c, 0x807bff7b, + 0x00001000, 0x867aff78, + 0x00400000, 0xbf850003, + 0xb8faf803, 0x897a7aff, + 0x10000000, 0xbf850051, 0xbe840080, 0xd2890000, - 0x00000901, 0x80048104, - 0xd2890001, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890001, 0x00000900, 0x80048104, 0xd2890002, - 0x00000901, 0x80048104, - 0xd2890003, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890003, 0x00000900, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, 0xbe840080, - 0xd2890000, 0x00000902, + 0xd2890000, 0x00000901, 0x80048104, 0xd2890001, - 0x00000902, 0x80048104, - 0xd2890002, 0x00000902, + 0x00000901, 0x80048104, + 0xd2890002, 0x00000901, 0x80048104, 0xd2890003, - 0x00000902, 0x80048104, + 0x00000901, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, 0xbe840080, 0xd2890000, - 0x00000903, 0x80048104, - 0xd2890001, 0x00000903, + 0x00000902, 0x80048104, + 0xd2890001, 0x00000902, 0x80048104, 0xd2890002, - 0x00000903, 0x80048104, - 0xd2890003, 0x00000903, + 0x00000902, 0x80048104, + 0xd2890003, 0x00000902, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, - 0xbf84ffee, 0x807c847c, - 0xbf0a7b7c, 0xbf85ffb1, - 0xbf9c0000, 0xbf820012, - 0x7e000300, 0x7e020301, - 0x7e040302, 0x7e060303, - 0xe0724000, 0x701d0000, - 0xe0724100, 0x701d0100, - 0xe0724200, 0x701d0200, - 0xe0724300, 0x701d0300, - 0x807c847c, 0x8070ff70, - 0x00000400, 0xbf0a7b7c, - 0xbf85ffef, 0xbf9c0000, - 0xbf8200da, 0xbef4007e, - 0x8675ff7f, 0x0000ffff, - 0x8775ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x00807fac, 0x866eff7f, - 0x08000000, 0x8f6e836e, - 0x87776e77, 0x866eff7f, - 0x70000000, 0x8f6e816e, - 0x87776e77, 0x866eff7f, - 0x04000000, 0xbf84001e, - 0xbefe00c1, 0xbeff00c1, - 0xb8ef4306, 0x866fc16f, - 0xbf840019, 0x8e6f866f, - 0x8e6f826f, 0xbef6006f, - 0xb8f82a05, 0x80788178, - 0x8e788a78, 0xb8ee1605, - 0x806e816e, 0x8e6e866e, - 0x80786e78, 0x8078ff78, - 0x00000080, 0xbef600ff, - 0x01000000, 0xbefc0080, - 0xe0510000, 0x781d0000, - 0xe0510100, 0x781d0000, - 0x807cff7c, 0x00000200, - 0x8078ff78, 0x00000200, - 0xbf0a6f7c, 0xbf85fff6, - 0xbef80080, 0xbefe00c1, - 0xbeff00c1, 0xb8ef2a05, - 0x806f816f, 0x8e6f826f, - 0x8e76886f, 0xbef600ff, - 0x01000000, 0xbeee0078, - 0x8078ff78, 0x00000400, - 0xbefc0084, 0xbf11087c, - 0x806fff6f, 0x00008000, - 0xe0524000, 0x781d0000, - 0xe0524100, 0x781d0100, - 0xe0524200, 0x781d0200, - 0xe0524300, 0x781d0300, - 0xbf8c0f70, 0x7e000300, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000903, + 0x80048104, 0xd2890001, + 0x00000903, 0x80048104, + 0xd2890002, 0x00000903, + 0x80048104, 0xd2890003, + 0x00000903, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0x807c847c, 0xbf0a7b7c, + 0xbf85ffb1, 0xbf9c0000, + 0xbf820012, 0x7e000300, 0x7e020301, 0x7e040302, - 0x7e060303, 0x807c847c, - 0x8078ff78, 0x00000400, - 0xbf0a6f7c, 0xbf85ffee, - 0xbf9c0000, 0xe0524000, - 0x6e1d0000, 0xe0524100, - 0x6e1d0100, 0xe0524200, - 0x6e1d0200, 0xe0524300, - 0x6e1d0300, 0xb8f82a05, + 0x7e060303, 0xe0724000, + 0x701d0000, 0xe0724100, + 0x701d0100, 0xe0724200, + 0x701d0200, 0xe0724300, + 0x701d0300, 0x807c847c, + 0x8070ff70, 0x00000400, + 0xbf0a7b7c, 0xbf85ffef, + 0xbf9c0000, 0xbf8200da, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0x866eff7f, 0x08000000, + 0x8f6e836e, 0x87776e77, + 0x866eff7f, 0x70000000, + 0x8f6e816e, 0x87776e77, + 0x866eff7f, 0x04000000, + 0xbf84001e, 0xbefe00c1, + 0xbeff00c1, 0xb8ef4306, + 0x866fc16f, 0xbf840019, + 0x8e6f866f, 0x8e6f826f, + 0xbef6006f, 0xb8f82a05, 0x80788178, 0x8e788a78, 0xb8ee1605, 0x806e816e, 0x8e6e866e, 0x80786e78, - 0x80f8c078, 0xb8ef1605, - 0x806f816f, 0x8e6f846f, - 0x8e76826f, 0xbef600ff, - 0x01000000, 0xbefc006f, - 0xc031003a, 0x00000078, - 0x80f8c078, 0xbf8cc07f, - 0x80fc907c, 0xbf800000, - 0xbe802d00, 0xbe822d02, - 0xbe842d04, 0xbe862d06, - 0xbe882d08, 0xbe8a2d0a, - 0xbe8c2d0c, 0xbe8e2d0e, - 0xbf06807c, 0xbf84fff0, + 0x8078ff78, 0x00000080, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xe0510000, + 0x781d0000, 0xe0510100, + 0x781d0000, 0x807cff7c, + 0x00000200, 0x8078ff78, + 0x00000200, 0xbf0a6f7c, + 0xbf85fff6, 0xbef80080, + 0xbefe00c1, 0xbeff00c1, + 0xb8ef2a05, 0x806f816f, + 0x8e6f826f, 0x8e76886f, + 0xbef600ff, 0x01000000, + 0xbeee0078, 0x8078ff78, + 0x00000400, 0xbefc0084, + 0xbf11087c, 0x806fff6f, + 0x00008000, 0xe0524000, + 0x781d0000, 0xe0524100, + 0x781d0100, 0xe0524200, + 0x781d0200, 0xe0524300, + 0x781d0300, 0xbf8c0f70, + 0x7e000300, 0x7e020301, + 0x7e040302, 0x7e060303, + 0x807c847c, 0x8078ff78, + 0x00000400, 0xbf0a6f7c, + 0xbf85ffee, 0xbf9c0000, + 0xe0524000, 0x6e1d0000, + 0xe0524100, 0x6e1d0100, + 0xe0524200, 0x6e1d0200, + 0xe0524300, 0x6e1d0300, 0xb8f82a05, 0x80788178, 0x8e788a78, 0xb8ee1605, 0x806e816e, 0x8e6e866e, - 0x80786e78, 0xbef60084, + 0x80786e78, 0x80f8c078, + 0xb8ef1605, 0x806f816f, + 0x8e6f846f, 0x8e76826f, 0xbef600ff, 0x01000000, - 0xc0211bfa, 0x00000078, - 0x80788478, 0xc0211b3a, + 0xbefc006f, 0xc031003a, + 0x00000078, 0x80f8c078, + 0xbf8cc07f, 0x80fc907c, + 0xbf800000, 0xbe802d00, + 0xbe822d02, 0xbe842d04, + 0xbe862d06, 0xbe882d08, + 0xbe8a2d0a, 0xbe8c2d0c, + 0xbe8e2d0e, 0xbf06807c, + 0xbf84fff0, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xc0211bfa, 0x00000078, 0x80788478, - 0xc0211b7a, 0x00000078, - 0x80788478, 0xc0211c3a, + 0xc0211b3a, 0x00000078, + 0x80788478, 0xc0211b7a, 0x00000078, 0x80788478, - 0xc0211c7a, 0x00000078, - 0x80788478, 0xc0211eba, + 0xc0211c3a, 0x00000078, + 0x80788478, 0xc0211c7a, 0x00000078, 0x80788478, - 0xc0211efa, 0x00000078, - 0x80788478, 0xc0211a3a, + 0xc0211eba, 0x00000078, + 0x80788478, 0xc0211efa, 0x00000078, 0x80788478, - 0xc0211a7a, 0x00000078, - 0x80788478, 0xc0211cfa, + 0xc0211a3a, 0x00000078, + 0x80788478, 0xc0211a7a, 0x00000078, 0x80788478, - 0xbf8cc07f, 0xbefc006f, - 0xbefe0070, 0xbeff0071, - 0x866f7bff, 0x000003ff, - 0xb96f4803, 0x866f7bff, - 0xfffff800, 0x8f6f8b6f, - 0xb96fa2c3, 0xb973f801, - 0xb8ee2a05, 0x806e816e, - 0x8e6e8a6e, 0xb8ef1605, - 0x806f816f, 0x8e6f866f, - 0x806e6f6e, 0x806e746e, - 0x826f8075, 0x866fff6f, - 0x0000ffff, 0xc00b1c37, - 0x00000050, 0xc00b1d37, - 0x00000060, 0xc0031e77, - 0x00000074, 0xbf8cc07f, - 0x866fff6d, 0xf8000000, - 0x8f6f9b6f, 0x8e6f906f, - 0xbeee0080, 0x876e6f6e, - 0x866fff6d, 0x04000000, - 0x8f6f9a6f, 0x8e6f8f6f, - 0x876e6f6e, 0x866fff7a, - 0x00800000, 0x8f6f976f, - 0xb96ef807, 0x866dff6d, - 0x0000ffff, 0x86fe7e7e, - 0x86ea6a6a, 0x8f6e837a, - 0xb96ee0c2, 0xbf800002, - 0xb97a0002, 0xbf8a0000, - 0x95806f6c, 0xbf810000, + 0xc0211cfa, 0x00000078, + 0x80788478, 0xbf8cc07f, + 0xbefc006f, 0xbefe0070, + 0xbeff0071, 0x866f7bff, + 0x000003ff, 0xb96f4803, + 0x866f7bff, 0xfffff800, + 0x8f6f8b6f, 0xb96fa2c3, + 0xb973f801, 0xb8ee2a05, + 0x806e816e, 0x8e6e8a6e, + 0xb8ef1605, 0x806f816f, + 0x8e6f866f, 0x806e6f6e, + 0x806e746e, 0x826f8075, + 0x866fff6f, 0x0000ffff, + 0xc00b1c37, 0x00000050, + 0xc00b1d37, 0x00000060, + 0xc0031e77, 0x00000074, + 0xbf8cc07f, 0x866fff6d, + 0xf8000000, 0x8f6f9b6f, + 0x8e6f906f, 0xbeee0080, + 0x876e6f6e, 0x866fff6d, + 0x04000000, 0x8f6f9a6f, + 0x8e6f8f6f, 0x876e6f6e, + 0x866fff7a, 0x00800000, + 0x8f6f976f, 0xb96ef807, + 0x866dff6d, 0x0000ffff, + 0x86fe7e7e, 0x86ea6a6a, + 0x8f6e837a, 0xb96ee0c2, + 0xbf800002, 0xb97a0002, + 0xbf8a0000, 0x95806f6c, + 0xbf810000, 0x00000000, }; static const uint32_t cwsr_trap_gfx10_hex[] = { @@ -974,248 +978,145 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { - 0xbf820001, 0xbf8202bd, + 0xbf820001, 0xbf8202c4, 0xb8f8f802, 0x89788678, - 0xb8fbf803, 0x866eff7b, - 0x00000400, 0xbf85003b, - 0x866eff7b, 0x00000800, - 0xbf850003, 0x866eff7b, - 0x00000100, 0xbf84000c, + 0xb8eef801, 0x866eff6e, + 0x00000800, 0xbf840003, 0x866eff78, 0x00002000, - 0xbf840005, 0xbf8e0010, - 0xb8eef803, 0x866eff6e, - 0x00000400, 0xbf84fffb, - 0x8778ff78, 0x00002000, - 0x80ec886c, 0x82ed806d, - 0xb8eef807, 0x866fff6e, - 0x001f8000, 0x8e6f8b6f, - 0x8977ff77, 0xfc000000, - 0x87776f77, 0x896eff6e, - 0x001f8000, 0xb96ef807, - 0xb8faf812, 0xb8fbf813, - 0x8efa887a, 0xc0071bbd, - 0x00000000, 0xbf8cc07f, - 0xc0071ebd, 0x00000008, - 0xbf8cc07f, 0x86ee6e6e, - 0xbf840001, 0xbe801d6e, - 0xb8fbf803, 0x867bff7b, - 0x000001ff, 0xbf850002, - 0x806c846c, 0x826d806d, + 0xbf840016, 0xb8fbf803, + 0x866eff7b, 0x00000400, + 0xbf85003b, 0x866eff7b, + 0x00000800, 0xbf850003, + 0x866eff7b, 0x00000100, + 0xbf84000c, 0x866eff78, + 0x00002000, 0xbf840005, + 0xbf8e0010, 0xb8eef803, + 0x866eff6e, 0x00000400, + 0xbf84fffb, 0x8778ff78, + 0x00002000, 0x80ec886c, + 0x82ed806d, 0xb8eef807, + 0x866fff6e, 0x001f8000, + 0x8e6f8b6f, 0x8977ff77, + 0xfc000000, 0x87776f77, + 0x896eff6e, 0x001f8000, + 0xb96ef807, 0xb8faf812, + 0xb8fbf813, 0x8efa887a, + 0xc0071bbd, 0x00000000, + 0xbf8cc07f, 0xc0071ebd, + 0x00000008, 0xbf8cc07f, + 0x86ee6e6e, 0xbf840001, + 0xbe801d6e, 0xb8fbf803, + 0x867bff7b, 0x000001ff, + 0xbf850002, 0x806c846c, + 0x826d806d, 0x866dff6d, + 0x0000ffff, 0x8f6e8b77, + 0x866eff6e, 0x001f8000, + 0xb96ef807, 0x86fe7e7e, + 0x86ea6a6a, 0x8f6e8378, + 0xb96ee0c2, 0xbf800002, + 0xb9780002, 0xbe801f6c, 0x866dff6d, 0x0000ffff, - 0x8f6e8b77, 0x866eff6e, - 0x001f8000, 0xb96ef807, - 0x86fe7e7e, 0x86ea6a6a, - 0x8f6e8378, 0xb96ee0c2, - 0xbf800002, 0xb9780002, - 0xbe801f6c, 0x866dff6d, - 0x0000ffff, 0xbefa0080, - 0xb97a0283, 0xb8fa2407, - 0x8e7a9b7a, 0x876d7a6d, - 0xb8fa03c7, 0x8e7a9a7a, - 0x876d7a6d, 0xb8faf807, - 0x867aff7a, 0x00007fff, - 0xb97af807, 0xbeee007e, - 0xbeef007f, 0xbefe0180, - 0xbf900004, 0x877a8478, - 0xb97af802, 0xbf8e0002, - 0xbf88fffe, 0xb8fa2a05, - 0x807a817a, 0x8e7a8a7a, - 0x8e7a817a, 0xb8fb1605, - 0x807b817b, 0x8e7b867b, - 0x807a7b7a, 0x807a7e7a, - 0x827b807f, 0x867bff7b, - 0x0000ffff, 0xc04b1c3d, - 0x00000050, 0xbf8cc07f, - 0xc04b1d3d, 0x00000060, - 0xbf8cc07f, 0xc0431e7d, - 0x00000074, 0xbf8cc07f, - 0xbef4007e, 0x8675ff7f, - 0x0000ffff, 0x8775ff75, - 0x00040000, 0xbef60080, - 0xbef700ff, 0x00807fac, - 0x867aff7f, 0x08000000, - 0x8f7a837a, 0x87777a77, - 0x867aff7f, 0x70000000, - 0x8f7a817a, 0x87777a77, - 0xbef1007c, 0xbef00080, - 0xb8f02a05, 0x80708170, - 0x8e708a70, 0x8e708170, - 0xb8fa1605, 0x807a817a, - 0x8e7a867a, 0x80707a70, - 0xbef60084, 0xbef600ff, - 0x01000000, 0xbefe007c, - 0xbefc0070, 0xc0611c7a, - 0x0000007c, 0xbf8cc07f, - 0x80708470, 0xbefc007e, + 0xbefa0080, 0xb97a0283, + 0xb8fa2407, 0x8e7a9b7a, + 0x876d7a6d, 0xb8fa03c7, + 0x8e7a9a7a, 0x876d7a6d, + 0xb8faf807, 0x867aff7a, + 0x00007fff, 0xb97af807, + 0xbeee007e, 0xbeef007f, + 0xbefe0180, 0xbf900004, + 0x877a8478, 0xb97af802, + 0xbf8e0002, 0xbf88fffe, + 0xb8fa2a05, 0x807a817a, + 0x8e7a8a7a, 0x8e7a817a, + 0xb8fb1605, 0x807b817b, + 0x8e7b867b, 0x807a7b7a, + 0x807a7e7a, 0x827b807f, + 0x867bff7b, 0x0000ffff, + 0xc04b1c3d, 0x00000050, + 0xbf8cc07f, 0xc04b1d3d, + 0x00000060, 0xbf8cc07f, + 0xc0431e7d, 0x00000074, + 0xbf8cc07f, 0xbef4007e, + 0x8675ff7f, 0x0000ffff, + 0x8775ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x00807fac, 0x867aff7f, + 0x08000000, 0x8f7a837a, + 0x87777a77, 0x867aff7f, + 0x70000000, 0x8f7a817a, + 0x87777a77, 0xbef1007c, + 0xbef00080, 0xb8f02a05, + 0x80708170, 0x8e708a70, + 0x8e708170, 0xb8fa1605, + 0x807a817a, 0x8e7a867a, + 0x80707a70, 0xbef60084, + 0xbef600ff, 0x01000000, 0xbefe007c, 0xbefc0070, - 0xc0611b3a, 0x0000007c, + 0xc0611c7a, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611b7a, + 0xbefc0070, 0xc0611b3a, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, 0xbefc0070, - 0xc0611bba, 0x0000007c, + 0xc0611b7a, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611bfa, + 0xbefc0070, 0xc0611bba, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, 0xbefc0070, - 0xc0611e3a, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xb8fbf803, - 0xbefe007c, 0xbefc0070, - 0xc0611efa, 0x0000007c, + 0xc0611bfa, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, - 0xbefc0070, 0xc0611a3a, + 0xbefc0070, 0xc0611e3a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xb8fbf803, 0xbefe007c, + 0xbefc0070, 0xc0611efa, 0x0000007c, 0xbf8cc07f, 0x80708470, 0xbefc007e, 0xbefe007c, 0xbefc0070, - 0xc0611a7a, 0x0000007c, - 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0xb8f1f801, - 0xbefe007c, 0xbefc0070, - 0xc0611c7a, 0x0000007c, + 0xc0611a3a, 0x0000007c, 0xbf8cc07f, 0x80708470, - 0xbefc007e, 0x867aff7f, - 0x04000000, 0xbeef0080, - 0x876f6f7a, 0xb8f02a05, - 0x80708170, 0x8e708a70, - 0x8e708170, 0xb8fb1605, - 0x807b817b, 0x8e7b847b, - 0x8e76827b, 0xbef600ff, - 0x01000000, 0xbef20174, - 0x80747074, 0x82758075, - 0xbefc0080, 0xbf800000, - 0xbe802b00, 0xbe822b02, - 0xbe842b04, 0xbe862b06, - 0xbe882b08, 0xbe8a2b0a, - 0xbe8c2b0c, 0xbe8e2b0e, - 0xc06b003a, 0x00000000, - 0xbf8cc07f, 0xc06b013a, - 0x00000010, 0xbf8cc07f, - 0xc06b023a, 0x00000020, - 0xbf8cc07f, 0xc06b033a, - 0x00000030, 0xbf8cc07f, - 0x8074c074, 0x82758075, - 0x807c907c, 0xbf0a7b7c, - 0xbf85ffe7, 0xbef40172, - 0xbef00080, 0xbefe00c1, - 0xbeff00c1, 0xbee80080, - 0xbee90080, 0xbef600ff, - 0x01000000, 0x867aff78, - 0x00400000, 0xbf850003, - 0xb8faf803, 0x897a7aff, - 0x10000000, 0xbf85004d, - 0xbe840080, 0xd2890000, - 0x00000900, 0x80048104, - 0xd2890001, 0x00000900, - 0x80048104, 0xd2890002, - 0x00000900, 0x80048104, - 0xd2890003, 0x00000900, - 0x80048104, 0xc069003a, - 0x00000070, 0xbf8cc07f, - 0x80709070, 0xbf06c004, - 0xbf84ffee, 0xbe840080, - 0xd2890000, 0x00000901, - 0x80048104, 0xd2890001, - 0x00000901, 0x80048104, - 0xd2890002, 0x00000901, - 0x80048104, 0xd2890003, - 0x00000901, 0x80048104, - 0xc069003a, 0x00000070, - 0xbf8cc07f, 0x80709070, - 0xbf06c004, 0xbf84ffee, - 0xbe840080, 0xd2890000, - 0x00000902, 0x80048104, - 0xd2890001, 0x00000902, - 0x80048104, 0xd2890002, - 0x00000902, 0x80048104, - 0xd2890003, 0x00000902, - 0x80048104, 0xc069003a, - 0x00000070, 0xbf8cc07f, - 0x80709070, 0xbf06c004, - 0xbf84ffee, 0xbe840080, - 0xd2890000, 0x00000903, - 0x80048104, 0xd2890001, - 0x00000903, 0x80048104, - 0xd2890002, 0x00000903, - 0x80048104, 0xd2890003, - 0x00000903, 0x80048104, - 0xc069003a, 0x00000070, - 0xbf8cc07f, 0x80709070, - 0xbf06c004, 0xbf84ffee, - 0xbf820008, 0xe0724000, - 0x701d0000, 0xe0724100, - 0x701d0100, 0xe0724200, - 0x701d0200, 0xe0724300, - 0x701d0300, 0xbefe00c1, - 0xbeff00c1, 0xb8fb4306, - 0x867bc17b, 0xbf840064, - 0xbf8a0000, 0x867aff6f, - 0x04000000, 0xbf840060, - 0x8e7b867b, 0x8e7b827b, - 0xbef6007b, 0xb8f02a05, - 0x80708170, 0x8e708a70, - 0x8e708170, 0xb8fa1605, - 0x807a817a, 0x8e7a867a, - 0x80707a70, 0x8070ff70, - 0x00000080, 0xbef600ff, - 0x01000000, 0xbefc0080, - 0xd28c0002, 0x000100c1, - 0xd28d0003, 0x000204c1, - 0x867aff78, 0x00400000, - 0xbf850003, 0xb8faf803, - 0x897a7aff, 0x10000000, - 0xbf850030, 0x24040682, - 0xd86e4000, 0x00000002, - 0xbf8cc07f, 0xbe840080, - 0xd2890000, 0x00000900, - 0x80048104, 0xd2890001, - 0x00000900, 0x80048104, - 0xd2890002, 0x00000900, - 0x80048104, 0xd2890003, - 0x00000900, 0x80048104, - 0xc069003a, 0x00000070, - 0xbf8cc07f, 0x80709070, - 0xbf06c004, 0xbf84ffee, - 0xbe840080, 0xd2890000, - 0x00000901, 0x80048104, - 0xd2890001, 0x00000901, - 0x80048104, 0xd2890002, - 0x00000901, 0x80048104, - 0xd2890003, 0x00000901, - 0x80048104, 0xc069003a, - 0x00000070, 0xbf8cc07f, - 0x80709070, 0xbf06c004, - 0xbf84ffee, 0x680404ff, - 0x00000200, 0xd0c9006a, - 0x0000f702, 0xbf87ffd2, - 0xbf820015, 0xd1060002, - 0x00011103, 0x7e0602ff, - 0x00000200, 0xbefc00ff, - 0x00010000, 0xbe800077, - 0x8677ff77, 0xff7fffff, - 0x8777ff77, 0x00058000, - 0xd8ec0000, 0x00000002, - 0xbf8cc07f, 0xe0765000, - 0x701d0002, 0x68040702, - 0xd0c9006a, 0x0000f702, - 0xbf87fff7, 0xbef70000, - 0xbef000ff, 0x00000400, + 0xbefc007e, 0xbefe007c, + 0xbefc0070, 0xc0611a7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0xb8f1f801, 0xbefe007c, + 0xbefc0070, 0xc0611c7a, + 0x0000007c, 0xbf8cc07f, + 0x80708470, 0xbefc007e, + 0x867aff7f, 0x04000000, + 0xbeef0080, 0x876f6f7a, + 0xb8f02a05, 0x80708170, + 0x8e708a70, 0x8e708170, + 0xb8fb1605, 0x807b817b, + 0x8e7b847b, 0x8e76827b, + 0xbef600ff, 0x01000000, + 0xbef20174, 0x80747074, + 0x82758075, 0xbefc0080, + 0xbf800000, 0xbe802b00, + 0xbe822b02, 0xbe842b04, + 0xbe862b06, 0xbe882b08, + 0xbe8a2b0a, 0xbe8c2b0c, + 0xbe8e2b0e, 0xc06b003a, + 0x00000000, 0xbf8cc07f, + 0xc06b013a, 0x00000010, + 0xbf8cc07f, 0xc06b023a, + 0x00000020, 0xbf8cc07f, + 0xc06b033a, 0x00000030, + 0xbf8cc07f, 0x8074c074, + 0x82758075, 0x807c907c, + 0xbf0a7b7c, 0xbf85ffe7, + 0xbef40172, 0xbef00080, 0xbefe00c1, 0xbeff00c1, - 0xb8fb2a05, 0x807b817b, - 0x8e7b827b, 0x8e76887b, + 0xbee80080, 0xbee90080, 0xbef600ff, 0x01000000, - 0xbefc0084, 0xbf0a7b7c, - 0xbf84006d, 0xbf11017c, - 0x807bff7b, 0x00001000, 0x867aff78, 0x00400000, 0xbf850003, 0xb8faf803, 0x897a7aff, 0x10000000, - 0xbf850051, 0xbe840080, + 0xbf85004d, 0xbe840080, 0xd2890000, 0x00000900, 0x80048104, 0xd2890001, 0x00000900, 0x80048104, @@ -1253,208 +1154,315 @@ static const uint32_t cwsr_trap_arcturus_hex[] = { 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, - 0xbf84ffee, 0x807c847c, - 0xbf0a7b7c, 0xbf85ffb1, - 0xbf9c0000, 0xbf820012, - 0x7e000300, 0x7e020301, - 0x7e040302, 0x7e060303, + 0xbf84ffee, 0xbf820008, 0xe0724000, 0x701d0000, 0xe0724100, 0x701d0100, 0xe0724200, 0x701d0200, 0xe0724300, 0x701d0300, - 0x807c847c, 0x8070ff70, - 0x00000400, 0xbf0a7b7c, - 0xbf85ffef, 0xbf9c0000, - 0xbefc0080, 0xbf11017c, - 0x867aff78, 0x00400000, - 0xbf850003, 0xb8faf803, - 0x897a7aff, 0x10000000, - 0xbf850059, 0xd3d84000, - 0x18000100, 0xd3d84001, - 0x18000101, 0xd3d84002, - 0x18000102, 0xd3d84003, - 0x18000103, 0xbe840080, - 0xd2890000, 0x00000900, - 0x80048104, 0xd2890001, + 0xbefe00c1, 0xbeff00c1, + 0xb8fb4306, 0x867bc17b, + 0xbf840064, 0xbf8a0000, + 0x867aff6f, 0x04000000, + 0xbf840060, 0x8e7b867b, + 0x8e7b827b, 0xbef6007b, + 0xb8f02a05, 0x80708170, + 0x8e708a70, 0x8e708170, + 0xb8fa1605, 0x807a817a, + 0x8e7a867a, 0x80707a70, + 0x8070ff70, 0x00000080, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xd28c0002, + 0x000100c1, 0xd28d0003, + 0x000204c1, 0x867aff78, + 0x00400000, 0xbf850003, + 0xb8faf803, 0x897a7aff, + 0x10000000, 0xbf850030, + 0x24040682, 0xd86e4000, + 0x00000002, 0xbf8cc07f, + 0xbe840080, 0xd2890000, 0x00000900, 0x80048104, - 0xd2890002, 0x00000900, - 0x80048104, 0xd2890003, + 0xd2890001, 0x00000900, + 0x80048104, 0xd2890002, 0x00000900, 0x80048104, + 0xd2890003, 0x00000900, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000901, + 0x80048104, 0xd2890001, + 0x00000901, 0x80048104, + 0xd2890002, 0x00000901, + 0x80048104, 0xd2890003, + 0x00000901, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, + 0x680404ff, 0x00000200, + 0xd0c9006a, 0x0000f702, + 0xbf87ffd2, 0xbf820015, + 0xd1060002, 0x00011103, + 0x7e0602ff, 0x00000200, + 0xbefc00ff, 0x00010000, + 0xbe800077, 0x8677ff77, + 0xff7fffff, 0x8777ff77, + 0x00058000, 0xd8ec0000, + 0x00000002, 0xbf8cc07f, + 0xe0765000, 0x701d0002, + 0x68040702, 0xd0c9006a, + 0x0000f702, 0xbf87fff7, + 0xbef70000, 0xbef000ff, + 0x00000400, 0xbefe00c1, + 0xbeff00c1, 0xb8fb2a05, + 0x807b817b, 0x8e7b827b, + 0x8e76887b, 0xbef600ff, + 0x01000000, 0xbefc0084, + 0xbf0a7b7c, 0xbf84006d, + 0xbf11017c, 0x807bff7b, + 0x00001000, 0x867aff78, + 0x00400000, 0xbf850003, + 0xb8faf803, 0x897a7aff, + 0x10000000, 0xbf850051, 0xbe840080, 0xd2890000, - 0x00000901, 0x80048104, - 0xd2890001, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890001, 0x00000900, 0x80048104, 0xd2890002, - 0x00000901, 0x80048104, - 0xd2890003, 0x00000901, + 0x00000900, 0x80048104, + 0xd2890003, 0x00000900, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, 0xbe840080, - 0xd2890000, 0x00000902, + 0xd2890000, 0x00000901, 0x80048104, 0xd2890001, - 0x00000902, 0x80048104, - 0xd2890002, 0x00000902, + 0x00000901, 0x80048104, + 0xd2890002, 0x00000901, 0x80048104, 0xd2890003, - 0x00000902, 0x80048104, + 0x00000901, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, 0xbf84ffee, 0xbe840080, 0xd2890000, - 0x00000903, 0x80048104, - 0xd2890001, 0x00000903, + 0x00000902, 0x80048104, + 0xd2890001, 0x00000902, 0x80048104, 0xd2890002, - 0x00000903, 0x80048104, - 0xd2890003, 0x00000903, + 0x00000902, 0x80048104, + 0xd2890003, 0x00000902, 0x80048104, 0xc069003a, 0x00000070, 0xbf8cc07f, 0x80709070, 0xbf06c004, - 0xbf84ffee, 0x807c847c, - 0xbf0a7b7c, 0xbf85ffa9, - 0xbf9c0000, 0xbf820016, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000903, + 0x80048104, 0xd2890001, + 0x00000903, 0x80048104, + 0xd2890002, 0x00000903, + 0x80048104, 0xd2890003, + 0x00000903, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0x807c847c, 0xbf0a7b7c, + 0xbf85ffb1, 0xbf9c0000, + 0xbf820012, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0xe0724000, + 0x701d0000, 0xe0724100, + 0x701d0100, 0xe0724200, + 0x701d0200, 0xe0724300, + 0x701d0300, 0x807c847c, + 0x8070ff70, 0x00000400, + 0xbf0a7b7c, 0xbf85ffef, + 0xbf9c0000, 0xbefc0080, + 0xbf11017c, 0x867aff78, + 0x00400000, 0xbf850003, + 0xb8faf803, 0x897a7aff, + 0x10000000, 0xbf850059, 0xd3d84000, 0x18000100, 0xd3d84001, 0x18000101, 0xd3d84002, 0x18000102, 0xd3d84003, 0x18000103, - 0xe0724000, 0x701d0000, - 0xe0724100, 0x701d0100, - 0xe0724200, 0x701d0200, - 0xe0724300, 0x701d0300, - 0x807c847c, 0x8070ff70, - 0x00000400, 0xbf0a7b7c, - 0xbf85ffeb, 0xbf9c0000, - 0xbf820106, 0xbef4007e, - 0x8675ff7f, 0x0000ffff, - 0x8775ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x00807fac, 0x866eff7f, - 0x08000000, 0x8f6e836e, - 0x87776e77, 0x866eff7f, - 0x70000000, 0x8f6e816e, - 0x87776e77, 0x866eff7f, - 0x04000000, 0xbf84001f, - 0xbefe00c1, 0xbeff00c1, - 0xb8ef4306, 0x866fc16f, - 0xbf84001a, 0x8e6f866f, - 0x8e6f826f, 0xbef6006f, - 0xb8f82a05, 0x80788178, - 0x8e788a78, 0x8e788178, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0x8078ff78, 0x00000080, + 0xbe840080, 0xd2890000, + 0x00000900, 0x80048104, + 0xd2890001, 0x00000900, + 0x80048104, 0xd2890002, + 0x00000900, 0x80048104, + 0xd2890003, 0x00000900, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000901, + 0x80048104, 0xd2890001, + 0x00000901, 0x80048104, + 0xd2890002, 0x00000901, + 0x80048104, 0xd2890003, + 0x00000901, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0xbe840080, 0xd2890000, + 0x00000902, 0x80048104, + 0xd2890001, 0x00000902, + 0x80048104, 0xd2890002, + 0x00000902, 0x80048104, + 0xd2890003, 0x00000902, + 0x80048104, 0xc069003a, + 0x00000070, 0xbf8cc07f, + 0x80709070, 0xbf06c004, + 0xbf84ffee, 0xbe840080, + 0xd2890000, 0x00000903, + 0x80048104, 0xd2890001, + 0x00000903, 0x80048104, + 0xd2890002, 0x00000903, + 0x80048104, 0xd2890003, + 0x00000903, 0x80048104, + 0xc069003a, 0x00000070, + 0xbf8cc07f, 0x80709070, + 0xbf06c004, 0xbf84ffee, + 0x807c847c, 0xbf0a7b7c, + 0xbf85ffa9, 0xbf9c0000, + 0xbf820016, 0xd3d84000, + 0x18000100, 0xd3d84001, + 0x18000101, 0xd3d84002, + 0x18000102, 0xd3d84003, + 0x18000103, 0xe0724000, + 0x701d0000, 0xe0724100, + 0x701d0100, 0xe0724200, + 0x701d0200, 0xe0724300, + 0x701d0300, 0x807c847c, + 0x8070ff70, 0x00000400, + 0xbf0a7b7c, 0xbf85ffeb, + 0xbf9c0000, 0xbf820106, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0x866eff7f, 0x08000000, + 0x8f6e836e, 0x87776e77, + 0x866eff7f, 0x70000000, + 0x8f6e816e, 0x87776e77, + 0x866eff7f, 0x04000000, + 0xbf84001f, 0xbefe00c1, + 0xbeff00c1, 0xb8ef4306, + 0x866fc16f, 0xbf84001a, + 0x8e6f866f, 0x8e6f826f, + 0xbef6006f, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0x8e788178, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0x8078ff78, + 0x00000080, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xe0510000, 0x781d0000, + 0xe0510100, 0x781d0000, + 0x807cff7c, 0x00000200, + 0x8078ff78, 0x00000200, + 0xbf0a6f7c, 0xbf85fff6, + 0xbef80080, 0xbefe00c1, + 0xbeff00c1, 0xb8ef2a05, + 0x806f816f, 0x8e6f826f, + 0x8e76886f, 0xbef90076, 0xbef600ff, 0x01000000, - 0xbefc0080, 0xe0510000, - 0x781d0000, 0xe0510100, - 0x781d0000, 0x807cff7c, - 0x00000200, 0x8078ff78, - 0x00000200, 0xbf0a6f7c, - 0xbf85fff6, 0xbef80080, - 0xbefe00c1, 0xbeff00c1, - 0xb8ef2a05, 0x806f816f, - 0x8e6f826f, 0x8e76886f, - 0xbef90076, 0xbef600ff, - 0x01000000, 0xbeee0078, - 0x8078ff78, 0x00000400, - 0xbef30079, 0x8079ff79, - 0x00000400, 0xbefc0084, - 0xbf11087c, 0x806fff6f, - 0x00008000, 0xe0524000, - 0x791d0000, 0xe0524100, - 0x791d0100, 0xe0524200, - 0x791d0200, 0xe0524300, - 0x791d0300, 0x8079ff79, - 0x00000400, 0xbf8c0f70, - 0xd3d94000, 0x18000100, - 0xd3d94001, 0x18000101, - 0xd3d94002, 0x18000102, - 0xd3d94003, 0x18000103, - 0xe0524000, 0x781d0000, - 0xe0524100, 0x781d0100, - 0xe0524200, 0x781d0200, - 0xe0524300, 0x781d0300, - 0xbf8c0f70, 0x7e000300, - 0x7e020301, 0x7e040302, - 0x7e060303, 0x807c847c, - 0x8078ff78, 0x00000400, - 0xbf0a6f7c, 0xbf85ffdb, - 0xbf9c0000, 0xe0524000, - 0x731d0000, 0xe0524100, - 0x731d0100, 0xe0524200, - 0x731d0200, 0xe0524300, - 0x731d0300, 0xbf8c0f70, - 0xd3d94000, 0x18000100, - 0xd3d94001, 0x18000101, - 0xd3d94002, 0x18000102, - 0xd3d94003, 0x18000103, - 0xe0524000, 0x6e1d0000, - 0xe0524100, 0x6e1d0100, - 0xe0524200, 0x6e1d0200, - 0xe0524300, 0x6e1d0300, - 0xb8f82a05, 0x80788178, - 0x8e788a78, 0x8e788178, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0x80f8c078, 0xb8ef1605, - 0x806f816f, 0x8e6f846f, - 0x8e76826f, 0xbef600ff, - 0x01000000, 0xbefc006f, - 0xc031003a, 0x00000078, - 0x80f8c078, 0xbf8cc07f, - 0x80fc907c, 0xbf800000, - 0xbe802d00, 0xbe822d02, - 0xbe842d04, 0xbe862d06, - 0xbe882d08, 0xbe8a2d0a, - 0xbe8c2d0c, 0xbe8e2d0e, - 0xbf06807c, 0xbf84fff0, - 0xb8f82a05, 0x80788178, - 0x8e788a78, 0x8e788178, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0xbef60084, 0xbef600ff, - 0x01000000, 0xc0211bfa, + 0xbeee0078, 0x8078ff78, + 0x00000400, 0xbef30079, + 0x8079ff79, 0x00000400, + 0xbefc0084, 0xbf11087c, + 0x806fff6f, 0x00008000, + 0xe0524000, 0x791d0000, + 0xe0524100, 0x791d0100, + 0xe0524200, 0x791d0200, + 0xe0524300, 0x791d0300, + 0x8079ff79, 0x00000400, + 0xbf8c0f70, 0xd3d94000, + 0x18000100, 0xd3d94001, + 0x18000101, 0xd3d94002, + 0x18000102, 0xd3d94003, + 0x18000103, 0xe0524000, + 0x781d0000, 0xe0524100, + 0x781d0100, 0xe0524200, + 0x781d0200, 0xe0524300, + 0x781d0300, 0xbf8c0f70, + 0x7e000300, 0x7e020301, + 0x7e040302, 0x7e060303, + 0x807c847c, 0x8078ff78, + 0x00000400, 0xbf0a6f7c, + 0xbf85ffdb, 0xbf9c0000, + 0xe0524000, 0x731d0000, + 0xe0524100, 0x731d0100, + 0xe0524200, 0x731d0200, + 0xe0524300, 0x731d0300, + 0xbf8c0f70, 0xd3d94000, + 0x18000100, 0xd3d94001, + 0x18000101, 0xd3d94002, + 0x18000102, 0xd3d94003, + 0x18000103, 0xe0524000, + 0x6e1d0000, 0xe0524100, + 0x6e1d0100, 0xe0524200, + 0x6e1d0200, 0xe0524300, + 0x6e1d0300, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0x8e788178, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0x80f8c078, + 0xb8ef1605, 0x806f816f, + 0x8e6f846f, 0x8e76826f, + 0xbef600ff, 0x01000000, + 0xbefc006f, 0xc031003a, + 0x00000078, 0x80f8c078, + 0xbf8cc07f, 0x80fc907c, + 0xbf800000, 0xbe802d00, + 0xbe822d02, 0xbe842d04, + 0xbe862d06, 0xbe882d08, + 0xbe8a2d0a, 0xbe8c2d0c, + 0xbe8e2d0e, 0xbf06807c, + 0xbf84fff0, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0x8e788178, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0xbef60084, + 0xbef600ff, 0x01000000, + 0xc0211bfa, 0x00000078, + 0x80788478, 0xc0211b3a, 0x00000078, 0x80788478, - 0xc0211b3a, 0x00000078, - 0x80788478, 0xc0211b7a, + 0xc0211b7a, 0x00000078, + 0x80788478, 0xc0211c3a, 0x00000078, 0x80788478, - 0xc0211c3a, 0x00000078, - 0x80788478, 0xc0211c7a, + 0xc0211c7a, 0x00000078, + 0x80788478, 0xc0211eba, 0x00000078, 0x80788478, - 0xc0211eba, 0x00000078, - 0x80788478, 0xc0211efa, + 0xc0211efa, 0x00000078, + 0x80788478, 0xc0211a3a, 0x00000078, 0x80788478, - 0xc0211a3a, 0x00000078, - 0x80788478, 0xc0211a7a, + 0xc0211a7a, 0x00000078, + 0x80788478, 0xc0211cfa, 0x00000078, 0x80788478, - 0xc0211cfa, 0x00000078, - 0x80788478, 0xbf8cc07f, - 0xbefc006f, 0xbefe0070, - 0xbeff0071, 0x866f7bff, - 0x000003ff, 0xb96f4803, - 0x866f7bff, 0xfffff800, - 0x8f6f8b6f, 0xb96fa2c3, - 0xb973f801, 0xb8ee2a05, - 0x806e816e, 0x8e6e8a6e, - 0x8e6e816e, 0xb8ef1605, - 0x806f816f, 0x8e6f866f, - 0x806e6f6e, 0x806e746e, - 0x826f8075, 0x866fff6f, - 0x0000ffff, 0xc00b1c37, - 0x00000050, 0xc00b1d37, - 0x00000060, 0xc0031e77, - 0x00000074, 0xbf8cc07f, - 0x866fff6d, 0xf8000000, - 0x8f6f9b6f, 0x8e6f906f, - 0xbeee0080, 0x876e6f6e, - 0x866fff6d, 0x04000000, - 0x8f6f9a6f, 0x8e6f8f6f, - 0x876e6f6e, 0x866fff7a, - 0x00800000, 0x8f6f976f, - 0xb96ef807, 0x866dff6d, - 0x0000ffff, 0x86fe7e7e, - 0x86ea6a6a, 0x8f6e837a, - 0xb96ee0c2, 0xbf800002, - 0xb97a0002, 0xbf8a0000, - 0x95806f6c, 0xbf810000, + 0xbf8cc07f, 0xbefc006f, + 0xbefe0070, 0xbeff0071, + 0x866f7bff, 0x000003ff, + 0xb96f4803, 0x866f7bff, + 0xfffff800, 0x8f6f8b6f, + 0xb96fa2c3, 0xb973f801, + 0xb8ee2a05, 0x806e816e, + 0x8e6e8a6e, 0x8e6e816e, + 0xb8ef1605, 0x806f816f, + 0x8e6f866f, 0x806e6f6e, + 0x806e746e, 0x826f8075, + 0x866fff6f, 0x0000ffff, + 0xc00b1c37, 0x00000050, + 0xc00b1d37, 0x00000060, + 0xc0031e77, 0x00000074, + 0xbf8cc07f, 0x866fff6d, + 0xf8000000, 0x8f6f9b6f, + 0x8e6f906f, 0xbeee0080, + 0x876e6f6e, 0x866fff6d, + 0x04000000, 0x8f6f9a6f, + 0x8e6f8f6f, 0x876e6f6e, + 0x866fff7a, 0x00800000, + 0x8f6f976f, 0xb96ef807, + 0x866dff6d, 0x0000ffff, + 0x86fe7e7e, 0x86ea6a6a, + 0x8f6e837a, 0xb96ee0c2, + 0xbf800002, 0xb97a0002, + 0xbf8a0000, 0x95806f6c, + 0xbf810000, 0x00000000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index 4d146bca0b05..cee4cfd5182d 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -95,6 +95,7 @@ var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts o var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger +var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised /**************************************************************************/ /* variables */ @@ -136,6 +137,8 @@ var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000 var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME +var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800 + var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24 var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27 @@ -253,6 +256,23 @@ L_SKIP_RESTORE: s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save + +if SINGLE_STEP_MISSED_WORKAROUND + // No single step exceptions if MODE.DEBUG_EN=0. + s_getreg_b32 ttmp2, hwreg(HW_REG_MODE) + s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK + s_cbranch_scc0 L_NO_SINGLE_STEP_WORKAROUND + + // Second-level trap already handled exception if STATUS.HALT=1. + s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK + + // Prioritize single step exception over context save. + // Second-level trap will halt wave and RFE, re-entering for SAVECTX. + s_cbranch_scc0 L_FETCH_2ND_TRAP + +L_NO_SINGLE_STEP_WORKAROUND: +end + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save s_cbranch_scc1 L_SAVE //this is the operation for save -- cgit v1.2.3 From a36e89674000a2663421da57391ad5897081a2e1 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Wed, 24 Jul 2019 12:23:42 -0500 Subject: drm/amdkfd: Replace gfx10 trap handler with correct branch Previously submitted code was taken from an incorrect branch and was non-functional. Cc: Oak Zeng Signed-off-by: Jay Cornwall Acked-by: Alex Deucher Reviewed-By: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 553 +++--- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm | 1978 +++++++++----------- 2 files changed, 1220 insertions(+), 1311 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 427594035597..2b3d7017f142 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -680,24 +680,47 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { }; static const uint32_t cwsr_trap_gfx10_hex[] = { - 0xbf820001, 0xbf82012e, - 0xb0804004, 0xb970f802, - 0x8a708670, 0xb971f803, - 0x8771ff71, 0x00000400, - 0xbf850008, 0xb971f803, - 0x8771ff71, 0x000001ff, - 0xbf850001, 0x806c846c, + 0xbf820001, 0xbf8201b2, + 0xb0804004, 0xb978f802, + 0x8a788678, 0xb971f803, + 0x876eff71, 0x00000400, + 0xbf850033, 0x876eff71, + 0x00000100, 0xbf840002, + 0x8878ff78, 0x00002000, + 0x8a77ff77, 0xff000000, + 0xb96ef807, 0x876fff6e, + 0x02000000, 0x8f6f866f, + 0x88776f77, 0x876fff6e, + 0x003f8000, 0x8f6f896f, + 0x88776f77, 0x8a6eff6e, + 0x023f8000, 0xb9eef807, + 0xb970f812, 0xb971f813, + 0x8ff08870, 0xf4051bb8, + 0xfa000000, 0xbf8cc07f, + 0xf4051c38, 0xfa000008, + 0xbf8cc07f, 0x87ee6e6e, + 0xbf840001, 0xbe80206e, + 0xb971f803, 0x8771ff71, + 0x000001ff, 0xbf850002, + 0x806c846c, 0x826d806d, + 0x876dff6d, 0x0000ffff, + 0x906e8977, 0x876fff6e, + 0x003f8000, 0x906e8677, + 0x876eff6e, 0x02000000, + 0x886e6f6e, 0xb9eef807, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9f8f802, 0xbe80226c, + 0xb971f803, 0x8771ff71, + 0x00000100, 0xbf840006, + 0xbef60380, 0xb9f60203, 0x876dff6d, 0x0000ffff, - 0xbe80226c, 0xb971f803, - 0x8771ff71, 0x00000100, - 0xbf840006, 0xbef60380, - 0xb9f60203, 0x876dff6d, - 0x0000ffff, 0x80ec886c, - 0x82ed806d, 0xbef60380, - 0xb9f60283, 0xb973f816, - 0xb9762c07, 0x8f769c76, - 0x886d766d, 0xb97603c7, - 0x8f769b76, 0x886d766d, + 0x80ec886c, 0x82ed806d, + 0xbef60380, 0xb9f60283, + 0xb972f816, 0xb9762c07, + 0x8f769a76, 0x886d766d, + 0xb97603c7, 0x8f769976, + 0x886d766d, 0xb9760647, + 0x8f769876, 0x886d766d, 0xb976f807, 0x8776ff76, 0x00007fff, 0xb9f6f807, 0xbeee037e, 0xbeef037f, @@ -706,32 +729,167 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, - 0xbef703ff, 0x00807fac, + 0xbef703ff, 0x10807fac, 0x8776ff7f, 0x08000000, 0x90768376, 0x88777677, 0x8776ff7f, 0x70000000, 0x90768176, 0x88777677, 0xbefb037c, 0xbefa0380, - 0xb97202dc, 0x8872727f, - 0xbefe03c1, 0x877c8172, + 0xb97302dc, 0x8f739973, + 0x8873737f, 0xb97a2a05, + 0x807a817a, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0x8f7a897a, + 0xbf820001, 0x8f7a8a7a, + 0xb9761e06, 0x8f768a76, + 0x807a767a, 0x807aff7a, + 0x00000200, 0xbef603ff, + 0x01000000, 0xbefe037c, + 0xbefc037a, 0xf4611efa, + 0xf8000000, 0x807a847a, + 0xbefc037e, 0xbefe037c, + 0xbefc037a, 0xf4611b3a, + 0xf8000000, 0x807a847a, + 0xbefc037e, 0xbefe037c, + 0xbefc037a, 0xf4611b7a, + 0xf8000000, 0x807a847a, + 0xbefc037e, 0xbefe037c, + 0xbefc037a, 0xf4611bba, + 0xf8000000, 0x807a847a, + 0xbefc037e, 0xbefe037c, + 0xbefc037a, 0xf4611bfa, + 0xf8000000, 0x807a847a, + 0xbefc037e, 0xbefe037c, + 0xbefc037a, 0xf4611e3a, + 0xf8000000, 0x807a847a, + 0xbefc037e, 0xb971f803, + 0xbefe037c, 0xbefc037a, + 0xf4611c7a, 0xf8000000, + 0x807a847a, 0xbefc037e, + 0xbefe037c, 0xbefc037a, + 0xf4611cba, 0xf8000000, + 0x807a847a, 0xbefc037e, + 0xb97bf801, 0xbefe037c, + 0xbefc037a, 0xf4611efa, + 0xf8000000, 0x807a847a, + 0xbefc037e, 0x8776ff7f, + 0x04000000, 0xbeef0380, + 0x886f6f76, 0xb97a2a05, + 0x807a817a, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0x8f7a897a, + 0xbf820001, 0x8f7a8a7a, + 0xb9761e06, 0x8f768a76, + 0x807a767a, 0xbef603ff, + 0x01000000, 0xbef20374, + 0x80747a74, 0x82758075, + 0xbefc0380, 0xbf800000, + 0xbe802f00, 0xbe822f02, + 0xbe842f04, 0xbe862f06, + 0xbe882f08, 0xbe8a2f0a, + 0xbe8c2f0c, 0xbe8e2f0e, + 0xf469003a, 0xfa000000, + 0xf469013a, 0xfa000010, + 0xf469023a, 0xfa000020, + 0xf469033a, 0xfa000030, + 0x8074c074, 0x82758075, + 0x807c907c, 0xbf0aff7c, + 0x00000060, 0xbf85ffea, + 0xbe802f00, 0xbe822f02, + 0xbe842f04, 0xbe862f06, + 0xbe882f08, 0xf469003a, + 0xfa000000, 0xf469013a, + 0xfa000010, 0xf465023a, + 0xfa000020, 0x8074c074, + 0x82758075, 0xbef40372, + 0xbefa0380, 0xbefe03c1, + 0x907c9973, 0x877c817c, 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820001, - 0xbeff03c1, 0xb9712a05, - 0x80718171, 0x8f718271, - 0x877c8172, 0xbf06817c, - 0xbf85000d, 0x8f768771, + 0xbeff0380, 0xbf820002, + 0xbeff03c1, 0xbf82000b, 0xbef603ff, 0x01000000, - 0xbefc0380, 0x7e008700, 0xe0704000, 0x7a5d0000, - 0x807c817c, 0x807aff7a, - 0x00000080, 0xbf0a717c, - 0xbf85fff8, 0xbf82001b, - 0x8f768871, 0xbef603ff, - 0x01000000, 0xbefc0380, - 0x7e008700, 0xe0704000, - 0x7a5d0000, 0x807c817c, - 0x807aff7a, 0x00000100, - 0xbf0a717c, 0xbf85fff8, + 0xe0704080, 0x7a5d0100, + 0xe0704100, 0x7a5d0200, + 0xe0704180, 0x7a5d0300, + 0xbf82000a, 0xbef603ff, + 0x01000000, 0xe0704000, + 0x7a5d0000, 0xe0704100, + 0x7a5d0100, 0xe0704200, + 0x7a5d0200, 0xe0704300, + 0x7a5d0300, 0xbefe03c1, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0xbeff0380, 0xbf820001, + 0xbeff03c1, 0xb9714306, + 0x8771c171, 0xbf840046, + 0xbf8a0000, 0x8776ff6f, + 0x04000000, 0xbf840042, + 0x8f718671, 0x8f718271, + 0xbef60371, 0xb97a2a05, + 0x807a817a, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0x8f7a897a, + 0xbf820001, 0x8f7a8a7a, + 0xb9761e06, 0x8f768a76, + 0x807a767a, 0x807aff7a, + 0x00000200, 0x807aff7a, + 0x00000080, 0xbef603ff, + 0x01000000, 0xd7650000, + 0x000100c1, 0xd7660000, + 0x000200c1, 0x16000084, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbefc0380, + 0xbf850012, 0xbe8303ff, + 0x00000080, 0xbf800000, + 0xbf800000, 0xbf800000, + 0xd8d80000, 0x01000000, + 0xbf8c0000, 0xe0704000, + 0x7a5d0100, 0x807c037c, + 0x807a037a, 0xd5250000, + 0x0001ff00, 0x00000080, + 0xbf0a717c, 0xbf85fff4, + 0xbf820011, 0xbe8303ff, + 0x00000100, 0xbf800000, + 0xbf800000, 0xbf800000, + 0xd8d80000, 0x01000000, + 0xbf8c0000, 0xe0704000, + 0x7a5d0100, 0x807c037c, + 0x807a037a, 0xd5250000, + 0x0001ff00, 0x00000100, + 0xbf0a717c, 0xbf85fff4, + 0xbefe03c1, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850004, 0xbefa03ff, + 0x00000200, 0xbeff0380, + 0xbf820003, 0xbefa03ff, + 0x00000400, 0xbeff03c1, + 0xb9712a05, 0x80718171, + 0x8f718271, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850017, 0xbef603ff, + 0x01000000, 0xbefc0384, + 0xbf0a717c, 0xbf840037, + 0x7e008700, 0x7e028701, + 0x7e048702, 0x7e068703, + 0xe0704000, 0x7a5d0000, + 0xe0704080, 0x7a5d0100, + 0xe0704100, 0x7a5d0200, + 0xe0704180, 0x7a5d0300, + 0x807c847c, 0x807aff7a, + 0x00000200, 0xbf0a717c, + 0xbf85ffef, 0xbf820025, + 0xbef603ff, 0x01000000, + 0xbefc0384, 0xbf0a717c, + 0xbf840020, 0x7e008700, + 0x7e028701, 0x7e048702, + 0x7e068703, 0xe0704000, + 0x7a5d0000, 0xe0704100, + 0x7a5d0100, 0xe0704200, + 0x7a5d0200, 0xe0704300, + 0x7a5d0300, 0x807c847c, + 0x807aff7a, 0x00000400, + 0xbf0a717c, 0xbf85ffef, 0xb9711e06, 0x8771c171, 0xbf84000c, 0x8f718371, 0x80717c71, 0xbefe03c1, @@ -739,133 +897,82 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xe0704000, 0x7a5d0000, 0x807c817c, 0x807aff7a, 0x00000080, 0xbf0a717c, - 0xbf85fff8, 0xbf8a0000, - 0x8776ff72, 0x04000000, - 0xbf84002b, 0xbefe03c1, - 0x877c8172, 0xbf06817c, - 0xbf850002, 0xbeff0380, - 0xbf820001, 0xbeff03c1, - 0xb9714306, 0x8771c171, - 0xbf840021, 0x8f718671, - 0x8f718271, 0xbef60371, - 0xbef603ff, 0x01000000, - 0xd7650000, 0x000100c1, - 0xd7660000, 0x000200c1, - 0x16000084, 0x877c8172, - 0xbf06817c, 0xbefc0380, - 0xbf85000a, 0x807cff7c, - 0x00000080, 0x807aff7a, - 0x00000080, 0xd5250000, - 0x0001ff00, 0x00000080, - 0xbf0a717c, 0xbf85fff7, - 0xbf820009, 0x807cff7c, - 0x00000100, 0x807aff7a, - 0x00000100, 0xd5250000, - 0x0001ff00, 0x00000100, - 0xbf0a717c, 0xbf85fff7, - 0x877c8172, 0xbf06817c, - 0xbf850003, 0x8f7687ff, - 0x0000006a, 0xbf820002, - 0x8f7688ff, 0x0000006a, - 0xbef603ff, 0x01000000, - 0x877c8172, 0xbf06817c, - 0xbefc0380, 0xbf800000, - 0xbf85000b, 0xbe802e00, - 0x7e000200, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000080, 0x807c817c, - 0xbf0aff7c, 0x0000006a, - 0xbf85fff6, 0xbf82000a, - 0xbe802e00, 0x7e000200, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000100, - 0x807c817c, 0xbf0aff7c, - 0x0000006a, 0xbf85fff6, - 0xbef60384, 0xbef603ff, - 0x01000000, 0x877c8172, - 0xbf06817c, 0xbf850030, - 0x7e00027b, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000080, 0x7e00026c, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000080, - 0x7e00026d, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000080, 0x7e00026e, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000080, - 0x7e00026f, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000080, 0x7e000270, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000080, - 0xb971f803, 0x7e000271, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000080, - 0x7e000273, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000080, 0xb97bf801, - 0x7e00027b, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000080, 0xbf82002f, - 0x7e00027b, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000100, 0x7e00026c, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000100, - 0x7e00026d, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000100, 0x7e00026e, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000100, - 0x7e00026f, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000100, 0x7e000270, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000100, - 0xb971f803, 0x7e000271, - 0xe0704000, 0x7a5d0000, - 0x807aff7a, 0x00000100, - 0x7e000273, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000100, 0xb97bf801, - 0x7e00027b, 0xe0704000, - 0x7a5d0000, 0x807aff7a, - 0x00000100, 0xbf820119, + 0xbf85fff8, 0xbf820138, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, - 0xbef703ff, 0x00807fac, + 0xbef703ff, 0x10807fac, 0x8772ff7f, 0x08000000, 0x90728372, 0x88777277, 0x8772ff7f, 0x70000000, 0x90728172, 0x88777277, - 0xb97902dc, 0x8879797f, - 0xbef80380, 0xbefe03c1, - 0x877c8179, 0xbf06817c, + 0xb97302dc, 0x8f739973, + 0x8873737f, 0x8772ff7f, + 0x04000000, 0xbf840036, + 0xbefe03c1, 0x907c9973, + 0x877c817c, 0xbf06817c, 0xbf850002, 0xbeff0380, 0xbf820001, 0xbeff03c1, - 0xb96f2a05, 0x806f816f, - 0x8f6f826f, 0x877c8179, - 0xbf06817c, 0xbf850013, - 0x8f76876f, 0xbef603ff, - 0x01000000, 0xbef20378, - 0x8078ff78, 0x00000080, - 0xbefc0381, 0xe0304000, - 0x785d0000, 0xbf8c3f70, - 0x7e008500, 0x807c817c, + 0xb96f4306, 0x876fc16f, + 0xbf84002b, 0x8f6f866f, + 0x8f6f826f, 0xbef6036f, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, 0x8078ff78, 0x00000080, - 0xbf0a6f7c, 0xbf85fff7, - 0xe0304000, 0x725d0000, - 0xbf820023, 0x8f76886f, + 0xbef603ff, 0x01000000, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbefc0380, + 0xbf850009, 0xe0310000, + 0x781d0000, 0x807cff7c, + 0x00000080, 0x8078ff78, + 0x00000080, 0xbf0a6f7c, + 0xbf85fff8, 0xbf820008, + 0xe0310000, 0x781d0000, + 0x807cff7c, 0x00000100, + 0x8078ff78, 0x00000100, + 0xbf0a6f7c, 0xbf85fff8, + 0xbef80380, 0xbefe03c1, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0xbeff0380, 0xbf820001, + 0xbeff03c1, 0xb96f2a05, + 0x806f816f, 0x8f6f826f, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850021, 0xbef603ff, 0x01000000, 0xbef20378, 0x8078ff78, - 0x00000100, 0xbefc0381, + 0x00000200, 0xbefc0384, 0xe0304000, 0x785d0000, + 0xe0304080, 0x785d0100, + 0xe0304100, 0x785d0200, + 0xe0304180, 0x785d0300, 0xbf8c3f70, 0x7e008500, - 0x807c817c, 0x8078ff78, - 0x00000100, 0xbf0a6f7c, - 0xbf85fff7, 0xb96f1e06, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807c847c, + 0x8078ff78, 0x00000200, + 0xbf0a6f7c, 0xbf85ffee, + 0xe0304000, 0x725d0000, + 0xe0304080, 0x725d0100, + 0xe0304100, 0x725d0200, + 0xe0304180, 0x725d0300, + 0xbf820031, 0xbef603ff, + 0x01000000, 0xbef20378, + 0x8078ff78, 0x00000400, + 0xbefc0384, 0xe0304000, + 0x785d0000, 0xe0304100, + 0x785d0100, 0xe0304200, + 0x785d0200, 0xe0304300, + 0x785d0300, 0xbf8c3f70, + 0x7e008500, 0x7e028501, + 0x7e048502, 0x7e068503, + 0x807c847c, 0x8078ff78, + 0x00000400, 0xbf0a6f7c, + 0xbf85ffee, 0xb96f1e06, 0x876fc16f, 0xbf84000e, 0x8f6f836f, 0x806f7c6f, 0xbefe03c1, 0xbeff0380, @@ -875,107 +982,81 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x00000080, 0xbf0a6f7c, 0xbf85fff7, 0xbeff03c1, 0xe0304000, 0x725d0000, - 0x8772ff79, 0x04000000, - 0xbf840020, 0xbefe03c1, - 0x877c8179, 0xbf06817c, - 0xbf850002, 0xbeff0380, - 0xbf820001, 0xbeff03c1, - 0xb96f4306, 0x876fc16f, - 0xbf840016, 0x8f6f866f, - 0x8f6f826f, 0xbef6036f, - 0xbef603ff, 0x01000000, - 0x877c8172, 0xbf06817c, - 0xbefc0380, 0xbf850007, - 0x807cff7c, 0x00000080, - 0x8078ff78, 0x00000080, - 0xbf0a6f7c, 0xbf85fffa, - 0xbf820006, 0x807cff7c, - 0x00000100, 0x8078ff78, - 0x00000100, 0xbf0a6f7c, - 0xbf85fffa, 0x877c8179, - 0xbf06817c, 0xbf850003, - 0x8f7687ff, 0x0000006a, - 0xbf820002, 0x8f7688ff, - 0x0000006a, 0xbef603ff, - 0x01000000, 0x877c8179, - 0xbf06817c, 0xbf850012, - 0xf4211cba, 0xf0000000, - 0x8078ff78, 0x00000080, - 0xbefc0381, 0xf421003a, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xbf8cc07f, - 0xbe803000, 0xbf800000, - 0x807c817c, 0xbf0aff7c, - 0x0000006a, 0xbf85fff5, - 0xbe800372, 0xbf820011, - 0xf4211cba, 0xf0000000, - 0x8078ff78, 0x00000100, - 0xbefc0381, 0xf421003a, - 0xf0000000, 0x8078ff78, - 0x00000100, 0xbf8cc07f, - 0xbe803000, 0xbf800000, - 0x807c817c, 0xbf0aff7c, - 0x0000006a, 0xbf85fff5, - 0xbe800372, 0xbef60384, + 0xe0304080, 0x725d0100, + 0xe0304100, 0x725d0200, + 0xe0304180, 0x725d0300, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000058, + 0x80f88878, 0xbef603ff, + 0x01000000, 0xbefc03ff, + 0x0000006a, 0xf425003a, + 0xf0000000, 0x80f8a078, + 0xbf8cc07f, 0x80fc827c, + 0xbf800000, 0xbe803100, + 0xf42d003a, 0xf0000000, + 0x80f8c078, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xf431003a, 0xf0000000, + 0x80f8c078, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, 0xbef603ff, 0x01000000, - 0x877c8179, 0xbf06817c, - 0xbf850025, 0xf4211bfa, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211b3a, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211b7a, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211eba, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211efa, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211c3a, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211c7a, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211cfa, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xf4211e7a, - 0xf0000000, 0x8078ff78, - 0x00000080, 0xbf820024, 0xf4211bfa, 0xf0000000, - 0x8078ff78, 0x00000100, - 0xf4211b3a, 0xf0000000, - 0x8078ff78, 0x00000100, + 0x80788478, 0xf4211b3a, + 0xf0000000, 0x80788478, 0xf4211b7a, 0xf0000000, - 0x8078ff78, 0x00000100, - 0xf4211eba, 0xf0000000, - 0x8078ff78, 0x00000100, + 0x80788478, 0xf4211eba, + 0xf0000000, 0x80788478, 0xf4211efa, 0xf0000000, - 0x8078ff78, 0x00000100, - 0xf4211c3a, 0xf0000000, - 0x8078ff78, 0x00000100, + 0x80788478, 0xf4211c3a, + 0xf0000000, 0x80788478, 0xf4211c7a, 0xf0000000, - 0x8078ff78, 0x00000100, + 0x80788478, 0xf4211e7a, + 0xf0000000, 0x80788478, 0xf4211cfa, 0xf0000000, - 0x8078ff78, 0x00000100, - 0xf4211e7a, 0xf0000000, - 0x8078ff78, 0x00000100, - 0xbf8cc07f, 0x876dff6d, + 0x80788478, 0xbf8cc07f, + 0xbef2036d, 0x876dff72, 0x0000ffff, 0xbefc036f, 0xbefe037a, 0xbeff037b, 0x876f71ff, 0x000003ff, - 0xb9ef4803, 0xb9f3f816, + 0xb9ef4803, 0xb9f9f816, 0x876f71ff, 0xfffff800, 0x906f8b6f, 0xb9efa2c3, - 0xb9f9f801, 0x876fff6d, - 0xf0000000, 0x906f9c6f, - 0x8f6f906f, 0xbef20380, - 0x88726f72, 0x876fff6d, - 0x08000000, 0x906f9b6f, - 0x8f6f8f6f, 0x88726f72, - 0x876fff70, 0x00800000, - 0x906f976f, 0xb9f2f807, - 0xb9f0f802, 0xbf8a0000, - 0xbe80226c, 0xbf810000, + 0xb9f3f801, 0x876fff72, + 0xfc000000, 0x906f9a6f, + 0x8f6f906f, 0xbef30380, + 0x88736f73, 0x876fff72, + 0x02000000, 0x906f996f, + 0x8f6f8f6f, 0x88736f73, + 0x876fff72, 0x01000000, + 0x906f986f, 0x8f6f996f, + 0x88736f73, 0x876fff70, + 0x00800000, 0x906f976f, + 0xb9f3f807, 0x87fe7e7e, + 0x87ea6a6a, 0xb9f0f802, + 0xbf8a0000, 0xbe80226c, + 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, - 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index f20e463e748b..261e05430852 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -20,1105 +20,933 @@ * OTHER DEALINGS IN THE SOFTWARE. */ +var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 +var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 +var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 +var SQ_WAVE_STATUS_HALT_MASK = 0x2000 + +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6 +var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24 +var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4 +var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24 +var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4 +var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11 +var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1 + +var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400 +var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF +var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10 +var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100 +var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8 +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0 +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21 +var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800 + +var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 +var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 +var SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT = 25 +var SQ_WAVE_IB_STS_REPLAY_W64H_SIZE = 1 +var SQ_WAVE_IB_STS_REPLAY_W64H_MASK = 0x02000000 +var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1 +var SQ_WAVE_IB_STS_RCNT_SIZE = 6 +var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000 +var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF + +var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24 +var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27 + +// bits [31:24] unused by SPI debug data +var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31 +var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000 +var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 24 +var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0x7F000000 + +// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] +// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE +var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 +var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC + +var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 +var S_SAVE_SPI_INIT_ATC_SHIFT = 27 +var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 +var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28 +var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 +var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26 + +var S_SAVE_PC_HI_RCNT_SHIFT = 26 +var S_SAVE_PC_HI_RCNT_MASK = 0xFC000000 +var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 25 +var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x02000000 +var S_SAVE_PC_HI_REPLAY_W64H_SHIFT = 24 +var S_SAVE_PC_HI_REPLAY_W64H_MASK = 0x01000000 + +var s_sgpr_save_num = 106 + +var s_save_spi_init_lo = exec_lo +var s_save_spi_init_hi = exec_hi +var s_save_pc_lo = ttmp0 +var s_save_pc_hi = ttmp1 +var s_save_exec_lo = ttmp2 +var s_save_exec_hi = ttmp3 +var s_save_status = ttmp12 +var s_save_trapsts = ttmp5 +var s_save_xnack_mask = ttmp6 +var s_wave_size = ttmp7 +var s_save_buf_rsrc0 = ttmp8 +var s_save_buf_rsrc1 = ttmp9 +var s_save_buf_rsrc2 = ttmp10 +var s_save_buf_rsrc3 = ttmp11 +var s_save_mem_offset = ttmp14 +var s_save_alloc_size = s_save_trapsts +var s_save_tmp = s_save_buf_rsrc2 +var s_save_m0 = ttmp15 + +var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE +var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC + +var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 +var S_RESTORE_SPI_INIT_ATC_SHIFT = 27 +var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 +var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28 +var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 +var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26 +var S_WAVE_SIZE = 25 + +var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT +var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK +var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT +var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK + +var s_restore_spi_init_lo = exec_lo +var s_restore_spi_init_hi = exec_hi +var s_restore_mem_offset = ttmp12 +var s_restore_alloc_size = ttmp3 +var s_restore_tmp = ttmp6 +var s_restore_mem_offset_save = s_restore_tmp +var s_restore_m0 = s_restore_alloc_size +var s_restore_mode = ttmp7 +var s_restore_pc_lo = ttmp0 +var s_restore_pc_hi = ttmp1 +var s_restore_exec_lo = ttmp14 +var s_restore_exec_hi = ttmp15 +var s_restore_status = ttmp4 +var s_restore_trapsts = ttmp5 +var s_restore_xnack_mask = ttmp13 +var s_restore_buf_rsrc0 = ttmp8 +var s_restore_buf_rsrc1 = ttmp9 +var s_restore_buf_rsrc2 = ttmp10 +var s_restore_buf_rsrc3 = ttmp11 +var s_restore_size = ttmp7 shader main + asic(DEFAULT) + type(CS) + wave_size(32) -asic(DEFAULT) - -type(CS) - -wave_size(32) -/*************************************************************************/ -/* control on how to run the shader */ -/*************************************************************************/ -//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run) -var EMU_RUN_HACK = 0 -var EMU_RUN_HACK_RESTORE_NORMAL = 0 -var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0 -var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0 -var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK -var SAVE_LDS = 0 -var WG_BASE_ADDR_LO = 0x9000a000 -var WG_BASE_ADDR_HI = 0x0 -var WAVE_SPACE = 0x9000 //memory size that each wave occupies in workgroup state mem, increase from 5000 to 9000 for more SGPR need to be saved -var CTX_SAVE_CONTROL = 0x0 -var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL -var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run) -var SGPR_SAVE_USE_SQC = 0 //use SQC D$ to do the write -var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //need to change BUF_DATA_FORMAT in S_SAVE_BUF_RSRC_WORD3_MISC from 0 to BUF_DATA_FORMAT_32 if set to 1 (i.e. 0x00827FAC) -var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing -var SAVE_RESTORE_HWID_DDID = 0 -var RESTORE_DDID_IN_SGPR18 = 0 -/**************************************************************************/ -/* variables */ -/**************************************************************************/ -var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 -var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 -var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 - -var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 -var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 -var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8 -var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6 -var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24 -var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits -var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24 -var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4 -var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11 -var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1 - -var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400 -var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask -var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10 -var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100 -var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8 -var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF -var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0 -var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10 -var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800 -var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11 -var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21 - -var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME -var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME -var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1 //FIXME -var SQ_WAVE_IB_STS_RCNT_SIZE = 6 //FIXME -var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME - -var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24 -var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27 - - -/* Save */ -var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes -var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE - -var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit -var S_SAVE_SPI_INIT_ATC_SHIFT = 27 -var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype -var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28 -var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG -var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26 - -var S_SAVE_PC_HI_RCNT_SHIFT = 28 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used -var S_SAVE_PC_HI_RCNT_MASK = 0xF0000000 //FIXME -var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 27 //FIXME -var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME - -var s_save_spi_init_lo = exec_lo -var s_save_spi_init_hi = exec_hi - -var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} -var s_save_pc_hi = ttmp1 -var s_save_exec_lo = ttmp2 -var s_save_exec_hi = ttmp3 -var s_save_status = ttmp4 -var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine -var s_wave_size = ttmp6 //ttmp6 is not needed now, since it's only 32bit xnack mask, now use it to determine wave32 or wave64 in EMU_HACK -var s_save_xnack_mask = ttmp7 -var s_save_buf_rsrc0 = ttmp8 -var s_save_buf_rsrc1 = ttmp9 -var s_save_buf_rsrc2 = ttmp10 -var s_save_buf_rsrc3 = ttmp11 - -var s_save_mem_offset = ttmp14 -var s_sgpr_save_num = 106 //in gfx10, all sgpr must be saved -var s_save_alloc_size = s_save_trapsts //conflict -var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time) -var s_save_m0 = ttmp15 - -/* Restore */ -var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE -var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC - -var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit -var S_RESTORE_SPI_INIT_ATC_SHIFT = 27 -var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype -var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28 -var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG -var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26 - -var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT -var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK -var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT -var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK - -var s_restore_spi_init_lo = exec_lo -var s_restore_spi_init_hi = exec_hi - -var s_restore_mem_offset = ttmp12 -var s_restore_alloc_size = ttmp3 -var s_restore_tmp = ttmp6 -var s_restore_mem_offset_save = s_restore_tmp //no conflict - -var s_restore_m0 = s_restore_alloc_size //no conflict - -var s_restore_mode = ttmp13 -var s_restore_hwid1 = ttmp2 -var s_restore_ddid = s_restore_hwid1 -var s_restore_pc_lo = ttmp0 -var s_restore_pc_hi = ttmp1 -var s_restore_exec_lo = ttmp14 -var s_restore_exec_hi = ttmp15 -var s_restore_status = ttmp4 -var s_restore_trapsts = ttmp5 -//var s_restore_xnack_mask_lo = xnack_mask_lo -//var s_restore_xnack_mask_hi = xnack_mask_hi -var s_restore_xnack_mask = ttmp7 -var s_restore_buf_rsrc0 = ttmp8 -var s_restore_buf_rsrc1 = ttmp9 -var s_restore_buf_rsrc2 = ttmp10 -var s_restore_buf_rsrc3 = ttmp11 -var s_restore_size = ttmp13 //ttmp13 has no conflict - -/**************************************************************************/ -/* trap handler entry points */ -/**************************************************************************/ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore - //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC - s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC - s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f. - s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE - //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE - s_branch L_SKIP_RESTORE //NOT restore, SAVE actually - else - s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save - end + s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save L_JUMP_TO_RESTORE: - s_branch L_RESTORE //restore + s_branch L_RESTORE L_SKIP_RESTORE: - - s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC - s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save - s_cbranch_scc1 L_SAVE //this is the operation for save - - // ********* Handle non-CWSR traps ******************* - if (!EMU_RUN_HACK) - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception - s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly. - s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0 - - L_EXCP_CASE: - s_and_b32 ttmp1, ttmp1, 0xFFFF - s_rfe_b64 [ttmp0, ttmp1] - end - // ********* End handling of non-CWSR traps ******************* - -/**************************************************************************/ -/* save routine */ -/**************************************************************************/ - -L_SAVE: - + s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC + s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save + s_cbranch_scc1 L_SAVE + + // If STATUS.MEM_VIOL is asserted then halt the wave to prevent + // the exception raising again and blocking context save. + s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK + s_cbranch_scc0 L_FETCH_2ND_TRAP + s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK + +L_FETCH_2ND_TRAP: + // Preserve and clear scalar XNACK state before issuing scalar loads. + // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into + // unused space ttmp11[31:24]. + s_andn2_b32 ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK) + s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS) + s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK + s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT) + s_or_b32 ttmp11, ttmp11, ttmp3 + s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK + s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT) + s_or_b32 ttmp11, ttmp11, ttmp3 + s_andn2_b32 ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK) + s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2 + + // Read second-level TBA/TMA from first-level TMA and jump if available. + // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data) + // ttmp12 holds SQ_WAVE_STATUS + s_getreg_b32 ttmp4, hwreg(HW_REG_SHADER_TMA_LO) + s_getreg_b32 ttmp5, hwreg(HW_REG_SHADER_TMA_HI) + s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 + s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA + s_waitcnt lgkmcnt(0) + s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA + s_waitcnt lgkmcnt(0) + s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3] + s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set + s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler + +L_NO_NEXT_TRAP: + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK + s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly. + s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0 + s_addc_u32 ttmp1, ttmp1, 0 +L_EXCP_CASE: + s_and_b32 ttmp1, ttmp1, 0xFFFF + + // Restore SQ_WAVE_IB_STS. + s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT) + s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK + s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT) + s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK + s_or_b32 ttmp2, ttmp2, ttmp3 + s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2 + + // Restore SQ_WAVE_STATUS. + s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 + s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status + + s_rfe_b64 [ttmp0, ttmp1] + +L_SAVE: //check whether there is mem_viol - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK s_cbranch_scc0 L_NO_PC_REWIND - + //if so, need rewind PC assuming GDS operation gets NACKed - s_mov_b32 s_save_tmp, 0 //clear mem_viol bit - s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] - s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8 - s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc + s_mov_b32 s_save_tmp, 0 + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit + s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] + s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8 + s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 L_NO_PC_REWIND: - s_mov_b32 s_save_tmp, 0 //clear saveCtx bit - s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit - - //s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK - //s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi - s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK) - s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT - s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT - s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp - s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY - s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT - s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp - s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS - s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG + s_mov_b32 s_save_tmp, 0 + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit + + s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK) + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) + s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) + s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT, SQ_WAVE_IB_STS_REPLAY_W64H_SIZE) + s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_REPLAY_W64H_SHIFT + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY and REPLAY_W64H in IB_STS + s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp - - /* inform SPI the readiness and wait for SPI's go signal */ - s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI - s_mov_b32 s_save_exec_hi, exec_hi - s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive - if (EMU_RUN_HACK) - - else - s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC - end - - L_SLEEP: - s_sleep 0x2 - - if (EMU_RUN_HACK) - - else - s_cbranch_execz L_SLEEP - end - - - /* setup Resource Contants */ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_save_tmp, v9, 0 - //determine it is wave32 or wave64 - s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) - s_cmp_eq_u32 s_wave_size, 0 - s_cbranch_scc1 L_SAVE_WAVE32 - s_lshr_b32 s_save_tmp, s_save_tmp, 6 //SAVE WAVE64 - s_branch L_SAVE_CON - L_SAVE_WAVE32: - s_lshr_b32 s_save_tmp, s_save_tmp, 5 //SAVE WAVE32 - L_SAVE_CON: - s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end - - - s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo - s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE - s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited - s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC - s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK - s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position - s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC - s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK - s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position - s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE - - s_mov_b32 s_save_m0, m0 //save M0 - - /* global mem offset */ - s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0 - s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) //get wave_save_size - s_or_b32 s_wave_size, s_save_spi_init_hi, s_wave_size //share s_wave_size with exec_hi - - /* save VGPRs */ - ////////////////////////////// - L_SAVE_VGPR: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI - s_mov_b32 exec_hi, 0x00000000 - s_branch L_SAVE_VGPR_NORMAL - L_ENABLE_SAVE_VGPR_EXEC_HI: - s_mov_b32 exec_hi, 0xFFFFFFFF - L_SAVE_VGPR_NORMAL: - s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size - //for wave32 and wave64, the num of vgpr function is the same? - s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible - //determine it is wave32 or wave64 - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_SAVE_VGPR_WAVE64 - - //zhenxu added it for save vgpr for wave32 - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 7 //NUM_RECORDS in bytes (32 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_mov_b32 m0, 0x0 //VGPR initial index value =0 - //s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 - //s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later, doesn't need this in gfx10 - - L_SAVE_VGPR_WAVE32_LOOP: - v_movrels_b32 v0, v0 //v0 = v[0+m0] - - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - end - - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //every buffer_store_dword does 128 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_VGPR_WAVE32_LOOP //VGPR save is complete? - s_branch L_SAVE_LDS - //save vgpr for wave32 ends - - L_SAVE_VGPR_WAVE64: - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_mov_b32 m0, 0x0 //VGPR initial index value =0 - //s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 - //s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later, doesn't need this in gfx10 - - L_SAVE_VGPR_WAVE64_LOOP: - v_movrels_b32 v0, v0 //v0 = v[0+m0] - - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - end - - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //every buffer_store_dword does 256 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_VGPR_WAVE64_LOOP //VGPR save is complete? - //s_set_gpr_idx_off - // - //Below part will be the save shared vgpr part (new for gfx10) - s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size - s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero? - s_cbranch_scc0 L_SAVE_LDS //no shared_vgpr used? jump to L_SAVE_LDS - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value) - //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count. - //save shared_vgpr will start from the index of m0 - s_add_u32 s_save_alloc_size, s_save_alloc_size, m0 - s_mov_b32 exec_lo, 0xFFFFFFFF - s_mov_b32 exec_hi, 0x00000000 - L_SAVE_SHARED_VGPR_WAVE64_LOOP: - v_movrels_b32 v0, v0 //v0 = v[0+m0] - buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //every buffer_store_dword does 256 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete? - - /* save LDS */ - ////////////////////////////// - L_SAVE_LDS: - - //Only check the first wave need LDS - /* the first wave in the threadgroup */ - s_barrier //FIXME not performance-optimal "LDS is used? wait for other waves in the same TG" - s_and_b32 s_save_tmp, s_wave_size, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here - s_cbranch_scc0 L_SAVE_SGPR - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI - s_mov_b32 exec_hi, 0x00000000 - s_branch L_SAVE_LDS_NORMAL - L_ENABLE_SAVE_LDS_EXEC_HI: - s_mov_b32 exec_hi, 0xFFFFFFFF - L_SAVE_LDS_NORMAL: - s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size - s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero? - s_cbranch_scc0 L_SAVE_SGPR //no lds used? jump to L_SAVE_VGPR - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw - s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes - s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - //load 0~63*4(byte address) to vgpr v15 - v_mbcnt_lo_u32_b32 v0, -1, 0 - v_mbcnt_hi_u32_b32 v0, -1, v0 - v_mul_u32_u24 v0, 4, v0 - - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_mov_b32 m0, 0x0 - s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 - - L_SAVE_LDS_LOOP_W32: - if (SAVE_LDS) - ds_read_b32 v1, v0 - s_waitcnt 0 //ensure data ready - buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - //buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 //save lds to memory doesn't exist in 10 - end - s_add_u32 m0, m0, 128 //every buffer_store_lds does 128 bytes - s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //mem offset increased by 128 bytes - v_add_nc_u32 v0, v0, 128 - s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete? - s_branch L_SAVE_SGPR - - L_SAVE_LDS_LOOP_W64: - if (SAVE_LDS) - ds_read_b32 v1, v0 - s_waitcnt 0 //ensure data ready - buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - //buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 //save lds to memory doesn't exist in 10 - end - s_add_u32 m0, m0, 256 //every buffer_store_lds does 256 bytes - s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //mem offset increased by 256 bytes - v_add_nc_u32 v0, v0, 256 - s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete? - - - /* save SGPRs */ - ////////////////////////////// - //s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size - //s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 - //s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - //s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //In gfx10, Number of SGPRs = (sgpr_size + 1) * 8 (non-zero value) - L_SAVE_SGPR: - //need to look at it is wave32 or wave64 - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_SAVE_SGPR_VMEM_WAVE64 - if (SGPR_SAVE_USE_SQC) - s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 7 //NUM_RECORDS in bytes (32 threads) - end - s_branch L_SAVE_SGPR_CONT - L_SAVE_SGPR_VMEM_WAVE64: - if (SGPR_SAVE_USE_SQC) - s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 8 //NUM_RECORDS in bytes (64 threads) - end - L_SAVE_SGPR_CONT: - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - //s_mov_b32 m0, 0x0 //SGPR initial index value =0 - //s_nop 0x0 //Manually inserted wait states - - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - - s_mov_b32 m0, 0x0 //SGPR initial index value =0 - s_nop 0x0 //Manually inserted wait states - - s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE64 - - L_SAVE_SGPR_LOOP_WAVE32: - s_movrels_b32 s0, s0 //s0 = s[0+m0] - //zhenxu, adding one more argument to save sgpr function, this is only for vmem, using sqc is not change - write_sgpr_to_mem_wave32(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4 - s_add_u32 m0, m0, 1 //next sgpr index - s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_sgpr_save_num) ? 1 : 0 - s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE32 //SGPR save is complete? - s_branch L_SAVE_HWREG - - L_SAVE_SGPR_LOOP_WAVE64: - s_movrels_b32 s0, s0 //s0 = s[0+m0] - //zhenxu, adding one more argument to save sgpr function, this is only for vmem, using sqc is not change - write_sgpr_to_mem_wave64(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4 - s_add_u32 m0, m0, 1 //next sgpr index - s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_sgpr_save_num) ? 1 : 0 - s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE64 //SGPR save is complete? - - - /* save HW registers */ - ////////////////////////////// - L_SAVE_HWREG: - s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_SAVE_HWREG_WAVE64 - - write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0 - - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME)) - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - end - - write_sgpr_to_mem_wave32(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC - write_sgpr_to_mem_wave32(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - write_sgpr_to_mem_wave32(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC - write_sgpr_to_mem_wave32(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - write_sgpr_to_mem_wave32(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS - - //s_save_trapsts conflicts with s_save_alloc_size - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - write_sgpr_to_mem_wave32(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS - - //write_sgpr_to_mem_wave32(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO - write_sgpr_to_mem_wave32(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI - - //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2 - s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE - write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - if(SAVE_RESTORE_HWID_DDID) - s_getreg_b32 s_save_m0, hwreg(HW_REG_HW_ID1) //HW_ID1, handler records the SE/SA/WGP/SIMD/wave of the original wave - write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - end - s_branch L_S_PGM_END_SAVED - - L_SAVE_HWREG_WAVE64: - write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0 - - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME)) - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - end - - write_sgpr_to_mem_wave64(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC - write_sgpr_to_mem_wave64(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - write_sgpr_to_mem_wave64(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC - write_sgpr_to_mem_wave64(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - write_sgpr_to_mem_wave64(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS - - //s_save_trapsts conflicts with s_save_alloc_size - s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) - write_sgpr_to_mem_wave64(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS - - //write_sgpr_to_mem_wave64(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO - write_sgpr_to_mem_wave64(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI - - //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2 - s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE - write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - - - if(SAVE_RESTORE_HWID_DDID) - s_getreg_b32 s_save_m0, hwreg(HW_REG_HW_ID1) //HW_ID1, handler records the SE/SA/WGP/SIMD/wave of the original wave - write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - - /* save DDID */ - ////////////////////////////// - L_SAVE_DDID: - //EXEC has been saved, no vector inst following - s_mov_b32 exec_lo, 0x80000000 //Set MSB to 1. Cleared when draw index is returned - s_sendmsg sendmsg(MSG_GET_DDID) - - L_WAIT_DDID_LOOP: - s_nop 7 // sleep a bit - s_bitcmp0_b32 exec_lo, 31 // test to see if MSB is cleared, meaning done - s_cbranch_scc0 L_WAIT_DDID_LOOP - - s_mov_b32 s_save_m0, exec_lo - - - s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_SAVE_DDID_WAVE64 - - write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - - L_SAVE_DDID_WAVE64: - write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) - - end - - L_S_PGM_END_SAVED: - /* S_PGM_END_SAVED */ //FIXME graphics ONLY - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT)) - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - s_rfe_b64 s_save_pc_lo //Return to the main shader program - else - end - - - s_branch L_END_PGM - - - -/**************************************************************************/ -/* restore routine */ -/**************************************************************************/ + + /* inform SPI the readiness and wait for SPI's go signal */ + s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI + s_mov_b32 s_save_exec_hi, exec_hi + s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive + + s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC + +L_SLEEP: + // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause + // SQ hang, since the 7,8th wave could not get arbit to exec inst, while + // other waves are stuck into the sleep-loop and waiting for wrexec!=0 + s_sleep 0x2 + s_cbranch_execz L_SLEEP + + /* setup Resource Contants */ + s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo + s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited + s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK + s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) + s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK + s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) + s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE + + s_mov_b32 s_save_m0, m0 + + /* global mem offset */ + s_mov_b32 s_save_mem_offset, 0x0 + s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) + s_lshl_b32 s_wave_size, s_wave_size, S_WAVE_SIZE + s_or_b32 s_wave_size, s_save_spi_init_hi, s_wave_size //share s_wave_size with exec_hi, it's at bit25 + + /* save HW registers */ + +L_SAVE_HWREG: + // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR) + get_vgpr_size_bytes(s_save_mem_offset, s_wave_size) + get_svgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes() + + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset) + + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset) + + s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) + write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) + + /* the first wave in the threadgroup */ + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK + s_mov_b32 s_save_exec_hi, 0x0 + s_or_b32 s_save_exec_hi, s_save_tmp, s_save_exec_hi // save first wave bit to s_save_exec_hi.bits[26] + + /* save SGPRs */ + // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save... + + // SGPR SR memory offset : size(VGPR)+size(SVGPR) + get_vgpr_size_bytes(s_save_mem_offset, s_wave_size) + get_svgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0 + s_mov_b32 s_save_xnack_mask, s_save_buf_rsrc0 + s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset + s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0 + + s_mov_b32 m0, 0x0 //SGPR initial index value =0 + s_nop 0x0 //Manually inserted wait states +L_SAVE_SGPR_LOOP: + // SGPR is allocated in 16 SGPR granularity + s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0] + s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0] + s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0] + s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0] + s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0] + s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0] + s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0] + s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0] + + write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) + s_add_u32 m0, m0, 16 //next sgpr index + s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0 + s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete? + + //save the rest 10 SGPR + s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0] + s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0] + s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0] + s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0] + s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0] + write_10sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) + + // restore s_save_buf_rsrc0,1 + s_mov_b32 s_save_buf_rsrc0, s_save_xnack_mask + + /* save first 4 VGPR, then LDS save could use */ + // each wave will alloc 4 vgprs at least... + + s_mov_b32 s_save_mem_offset, 0 + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_SAVE_4VGPR_WAVE32 +L_ENABLE_SAVE_4VGPR_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF + s_branch L_SAVE_4VGPR_WAVE64 +L_SAVE_4VGPR_WAVE32: + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR Allocated in 4-GPR granularity + + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3 + s_branch L_SAVE_LDS + +L_SAVE_4VGPR_WAVE64: + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR Allocated in 4-GPR granularity + + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 + + /* save LDS */ + +L_SAVE_LDS: + // Change EXEC to all threads... + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_SAVE_LDS_NORMAL +L_ENABLE_SAVE_LDS_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF +L_SAVE_LDS_NORMAL: + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) + s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE + + s_barrier //LDS is used? wait for other waves in the same TG + s_and_b32 s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK + s_cbranch_scc0 L_SAVE_LDS_DONE + + // first wave do LDS save; + + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes + s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes + + // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG) + // + get_vgpr_size_bytes(s_save_mem_offset, s_wave_size) + get_svgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes() + s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes() + + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + //load 0~63*4(byte address) to vgpr v0 + v_mbcnt_lo_u32_b32 v0, -1, 0 + v_mbcnt_hi_u32_b32 v0, -1, v0 + v_mul_u32_u24 v0, 4, v0 + + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_mov_b32 m0, 0x0 + s_cbranch_scc1 L_SAVE_LDS_W64 + +L_SAVE_LDS_W32: + s_mov_b32 s3, 128 + s_nop 0 + s_nop 0 + s_nop 0 +L_SAVE_LDS_LOOP_W32: + ds_read_b32 v1, v0 + s_waitcnt 0 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + + s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes + s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 + v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete? + + s_branch L_SAVE_LDS_DONE + +L_SAVE_LDS_W64: + s_mov_b32 s3, 256 + s_nop 0 + s_nop 0 + s_nop 0 +L_SAVE_LDS_LOOP_W64: + ds_read_b32 v1, v0 + s_waitcnt 0 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + + s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes + s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 + v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete? + +L_SAVE_LDS_DONE: + /* save VGPRs - set the Rest VGPRs */ +L_SAVE_VGPR: + // VGPR SR memory offset: 0 + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI + s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs + s_mov_b32 exec_hi, 0x00000000 + s_branch L_SAVE_VGPR_NORMAL +L_ENABLE_SAVE_VGPR_EXEC_HI: + s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs + s_mov_b32 exec_hi, 0xFFFFFFFF +L_SAVE_VGPR_NORMAL: + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) + s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) + //determine it is wave32 or wave64 + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_SAVE_VGPR_WAVE64 + + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR Allocated in 4-GPR granularity + + // VGPR store using dw burst + s_mov_b32 m0, 0x4 //VGPR initial index value =4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc0 L_SAVE_VGPR_END + +L_SAVE_VGPR_W32_LOOP: + v_movrels_b32 v0, v0 //v0 = v[0+m0] + v_movrels_b32 v1, v1 //v1 = v[1+m0] + v_movrels_b32 v2, v2 //v2 = v[2+m0] + v_movrels_b32 v3, v3 //v3 = v[3+m0] + + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3 + + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete? + + s_branch L_SAVE_VGPR_END + +L_SAVE_VGPR_WAVE64: + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR store using dw burst + s_mov_b32 m0, 0x4 //VGPR initial index value =4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc0 L_SAVE_VGPR_END + +L_SAVE_VGPR_W64_LOOP: + v_movrels_b32 v0, v0 //v0 = v[0+m0] + v_movrels_b32 v1, v1 //v1 = v[1+m0] + v_movrels_b32 v2, v2 //v2 = v[2+m0] + v_movrels_b32 v3, v3 //v3 = v[3+m0] + + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 + + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete? + + //Below part will be the save shared vgpr part (new for gfx10) + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) + s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero? + s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value) + //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count. + //save shared_vgpr will start from the index of m0 + s_add_u32 s_save_alloc_size, s_save_alloc_size, m0 + s_mov_b32 exec_lo, 0xFFFFFFFF + s_mov_b32 exec_hi, 0x00000000 +L_SAVE_SHARED_VGPR_WAVE64_LOOP: + v_movrels_b32 v0, v0 //v0 = v[0+m0] + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete? + +L_SAVE_VGPR_END: + s_branch L_END_PGM L_RESTORE: - /* Setup Resource Contants */ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_restore_tmp, v9, 0 - //determine it is wave32 or wave64 - s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) //change to ttmp13 - s_cmp_eq_u32 s_restore_size, 0 - s_cbranch_scc1 L_RESTORE_WAVE32 - s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 //SAVE WAVE64 - s_branch L_RESTORE_CON - L_RESTORE_WAVE32: - s_lshr_b32 s_restore_tmp, s_restore_tmp, 5 //SAVE WAVE32 - L_RESTORE_CON: - s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE - s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL - else - end - - s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo - s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi - s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE - s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) - s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC - s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK - s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position - s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC - s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK - s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position - s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE - //determine it is wave32 or wave64 - s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) - s_or_b32 s_restore_size, s_restore_spi_init_hi, s_restore_size //share s_wave_size with exec_hi - - /* global mem offset */ - s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0 - - /* restore VGPRs */ - ////////////////////////////// - L_RESTORE_VGPR: - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead - s_and_b32 m0, s_restore_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI - s_mov_b32 exec_hi, 0x00000000 - s_branch L_RESTORE_VGPR_NORMAL - L_ENABLE_RESTORE_VGPR_EXEC_HI: - s_mov_b32 exec_hi, 0xFFFFFFFF - L_RESTORE_VGPR_NORMAL: - s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size - s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) - //determine it is wave32 or wave64 - s_and_b32 m0, s_restore_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_RESTORE_VGPR_WAVE64 - - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 7 //NUM_RECORDS in bytes (32 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 - s_mov_b32 m0, 1 //VGPR initial index value = 1 - //s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8 - //s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later, might not need this in gfx10 - - L_RESTORE_VGPR_WAVE32_LOOP: - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 - end - s_waitcnt vmcnt(0) //ensure data ready - v_movreld_b32 v0, v0 //v[0+m0] = v0 - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //every buffer_load_dword does 128 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete? - //s_set_gpr_idx_off - /* VGPR restore on v0 */ - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 - end - - s_branch L_RESTORE_LDS - - L_RESTORE_VGPR_WAVE64: - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 - s_mov_b32 m0, 1 //VGPR initial index value = 1 - L_RESTORE_VGPR_WAVE64_LOOP: - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 - end - s_waitcnt vmcnt(0) //ensure data ready - v_movreld_b32 v0, v0 //v[0+m0] = v0 - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //every buffer_load_dword does 256 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete? - //s_set_gpr_idx_off - // - //Below part will be the restore shared vgpr part (new for gfx10) - s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size - s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero? - s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used? jump to L_SAVE_LDS - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value) - //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count. - //restore shared_vgpr will start from the index of m0 - s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0 - s_mov_b32 exec_lo, 0xFFFFFFFF - s_mov_b32 exec_hi, 0x00000000 - L_RESTORE_SHARED_VGPR_WAVE64_LOOP: - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 - s_waitcnt vmcnt(0) //ensure data ready - v_movreld_b32 v0, v0 //v[0+m0] = v0 - s_add_u32 m0, m0, 1 //next vgpr index - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //every buffer_load_dword does 256 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete? - - s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!! - - /* VGPR restore on v0 */ - L_RESTORE_V0: - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 - end - - - /* restore LDS */ - ////////////////////////////// - L_RESTORE_LDS: - - //Only need to check the first wave - /* the first wave in the threadgroup */ - s_and_b32 s_restore_tmp, s_restore_size, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK - s_cbranch_scc0 L_RESTORE_SGPR - - s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead - s_and_b32 m0, s_restore_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI - s_mov_b32 exec_hi, 0x00000000 - s_branch L_RESTORE_LDS_NORMAL - L_ENABLE_RESTORE_LDS_EXEC_HI: - s_mov_b32 exec_hi, 0xFFFFFFFF - L_RESTORE_LDS_NORMAL: - s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size - s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero? - s_cbranch_scc0 L_RESTORE_SGPR //no lds used? jump to L_RESTORE_VGPR - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw - s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes - s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_and_b32 m0, s_wave_size, 1 - s_cmp_eq_u32 m0, 1 - s_mov_b32 m0, 0x0 - s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 - - L_RESTORE_LDS_LOOP_W32: - if (SAVE_LDS) - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 - s_waitcnt 0 - end - s_add_u32 m0, m0, 128 //every buffer_load_dword does 256 bytes - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 256 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete? - s_branch L_RESTORE_SGPR - - L_RESTORE_LDS_LOOP_W64: - if (SAVE_LDS) - buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 - s_waitcnt 0 - end - s_add_u32 m0, m0, 256 //every buffer_load_dword does 256 bytes - s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256 bytes - s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete? - - - /* restore SGPRs */ - ////////////////////////////// - //s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size - //s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 - //s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - //s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SGPRs = (sgpr_size + 1) * 8 (non-zero value) - L_RESTORE_SGPR: - //need to look at it is wave32 or wave64 - s_and_b32 m0, s_restore_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_RESTORE_SGPR_VMEM_WAVE64 - if (SGPR_SAVE_USE_SQC) - s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 7 //NUM_RECORDS in bytes (32 threads) - end - s_branch L_RESTORE_SGPR_CONT - L_RESTORE_SGPR_VMEM_WAVE64: - if (SGPR_SAVE_USE_SQC) - s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 8 //NUM_RECORDS in bytes (64 threads) - end - - L_RESTORE_SGPR_CONT: - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_and_b32 m0, s_restore_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_RESTORE_SGPR_WAVE64 - - read_sgpr_from_mem_wave32(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp - s_mov_b32 m0, 0x1 - - L_RESTORE_SGPR_LOOP_WAVE32: - read_sgpr_from_mem_wave32(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made - s_waitcnt lgkmcnt(0) //ensure data ready - s_movreld_b32 s0, s0 //s[0+m0] = s0 - s_nop 0 // hazard SALU M0=> S_MOVREL - s_add_u32 m0, m0, 1 //next sgpr index - s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_SGPR_LOOP_WAVE32 //SGPR restore (except s0) is complete? - s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */ - s_branch L_RESTORE_HWREG - - L_RESTORE_SGPR_WAVE64: - read_sgpr_from_mem_wave64(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp - s_mov_b32 m0, 0x1 //SGPR initial index value =1 //go on with with s1 - - L_RESTORE_SGPR_LOOP_WAVE64: - read_sgpr_from_mem_wave64(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made - s_waitcnt lgkmcnt(0) //ensure data ready - s_movreld_b32 s0, s0 //s[0+m0] = s0 - s_nop 0 // hazard SALU M0=> S_MOVREL - s_add_u32 m0, m0, 1 //next sgpr index - s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_restore_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_RESTORE_SGPR_LOOP_WAVE64 //SGPR restore (except s0) is complete? - s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */ - - - /* restore HW registers */ - ////////////////////////////// - L_RESTORE_HWREG: - s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_and_b32 m0, s_restore_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_RESTORE_HWREG_WAVE64 - - read_sgpr_from_mem_wave32(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0 - read_sgpr_from_mem_wave32(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC - read_sgpr_from_mem_wave32(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - read_sgpr_from_mem_wave32(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC - read_sgpr_from_mem_wave32(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - read_sgpr_from_mem_wave32(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS - read_sgpr_from_mem_wave32(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS - //read_sgpr_from_mem_wave32(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO - //read_sgpr_from_mem_wave32(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI - read_sgpr_from_mem_wave32(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK - read_sgpr_from_mem_wave32(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE - if(SAVE_RESTORE_HWID_DDID) - read_sgpr_from_mem_wave32(s_restore_hwid1, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //HW_ID1 - end - s_branch L_RESTORE_HWREG_FINISH - - L_RESTORE_HWREG_WAVE64: - read_sgpr_from_mem_wave64(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0 - read_sgpr_from_mem_wave64(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC - read_sgpr_from_mem_wave64(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - read_sgpr_from_mem_wave64(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC - read_sgpr_from_mem_wave64(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - read_sgpr_from_mem_wave64(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS - read_sgpr_from_mem_wave64(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS - //read_sgpr_from_mem_wave64(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO - //read_sgpr_from_mem_wave64(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI - read_sgpr_from_mem_wave64(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK - read_sgpr_from_mem_wave64(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE - if(SAVE_RESTORE_HWID_DDID) - read_sgpr_from_mem_wave64(s_restore_hwid1, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //HW_ID1 - end - L_RESTORE_HWREG_FINISH: - s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS - - - - if(SAVE_RESTORE_HWID_DDID) - L_RESTORE_DDID: - s_mov_b32 m0, s_restore_hwid1 //virture ttrace support: The save-context handler records the SE/SA/WGP/SIMD/wave of the original wave - s_ttracedata //and then can output it as SHADER_DATA to ttrace on restore to provide a correlation across the save-restore - - s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else - s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - s_and_b32 m0, s_restore_size, 1 - s_cmp_eq_u32 m0, 1 - s_cbranch_scc1 L_RESTORE_DDID_WAVE64 - - read_sgpr_from_mem_wave32(s_restore_ddid, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - s_branch L_RESTORE_DDID_FINISH - L_RESTORE_DDID_WAVE64: - read_sgpr_from_mem_wave64(s_restore_ddid, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) - - L_RESTORE_DDID_FINISH: - s_waitcnt lgkmcnt(0) - //s_mov_b32 m0, s_restore_ddid - //s_ttracedata - if (RESTORE_DDID_IN_SGPR18) - s_mov_b32 s18, s_restore_ddid - end - - end - - s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS - - //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - - s_mov_b32 m0, s_restore_m0 - s_mov_b32 exec_lo, s_restore_exec_lo - s_mov_b32 exec_hi, s_restore_exec_hi - - s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts + /* Setup Resource Contants */ + s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo + s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) + s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK + s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) + s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK + s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) + s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE + //determine it is wave32 or wave64 + s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) + s_lshl_b32 s_restore_size, s_restore_size, S_WAVE_SIZE + s_or_b32 s_restore_size, s_restore_spi_init_hi, s_restore_size + + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK + s_cbranch_scc0 L_RESTORE_VGPR + + /* restore LDS */ +L_RESTORE_LDS: + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_RESTORE_LDS_NORMAL +L_ENABLE_RESTORE_LDS_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF +L_RESTORE_LDS_NORMAL: + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) + s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes + s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes + + // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG) + // + get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size) + get_svgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes() + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_mov_b32 m0, 0x0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 + +L_RESTORE_LDS_LOOP_W32: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW + s_add_u32 m0, m0, 128 // 128 DW + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW + s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete? + s_branch L_RESTORE_VGPR + +L_RESTORE_LDS_LOOP_W64: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW + s_add_u32 m0, m0, 256 // 256 DW + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW + s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete? + + /* restore VGPRs */ +L_RESTORE_VGPR: + // VGPR SR memory offset : 0 + s_mov_b32 s_restore_mem_offset, 0x0 + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI + s_mov_b32 exec_hi, 0x00000000 + s_branch L_RESTORE_VGPR_NORMAL +L_ENABLE_RESTORE_VGPR_EXEC_HI: + s_mov_b32 exec_hi, 0xFFFFFFFF +L_RESTORE_VGPR_NORMAL: + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) + //determine it is wave32 or wave64 + s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_RESTORE_VGPR_WAVE64 + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR load using dw burst + s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 + s_mov_b32 m0, 4 //VGPR initial index value = 4 + +L_RESTORE_VGPR_WAVE32_LOOP: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*3 + s_waitcnt vmcnt(0) + v_movreld_b32 v0, v0 //v[0+m0] = v0 + v_movreld_b32 v1, v1 + v_movreld_b32 v2, v2 + v_movreld_b32 v3, v3 + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete? + + /* VGPR restore on v0 */ + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3 + + s_branch L_RESTORE_SGPR + +L_RESTORE_VGPR_WAVE64: + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + // VGPR load using dw burst + s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 + s_mov_b32 m0, 4 //VGPR initial index value = 4 + +L_RESTORE_VGPR_WAVE64_LOOP: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3 + s_waitcnt vmcnt(0) + v_movreld_b32 v0, v0 //v[0+m0] = v0 + v_movreld_b32 v1, v1 + v_movreld_b32 v2, v2 + v_movreld_b32 v3, v3 + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete? + + //Below part will be the restore shared vgpr part (new for gfx10) + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size + s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero? + s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used? + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value) + //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count. + //restore shared_vgpr will start from the index of m0 + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0 + s_mov_b32 exec_lo, 0xFFFFFFFF + s_mov_b32 exec_hi, 0x00000000 +L_RESTORE_SHARED_VGPR_WAVE64_LOOP: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 + s_waitcnt vmcnt(0) + v_movreld_b32 v0, v0 //v[0+m0] = v0 + s_add_u32 m0, m0, 1 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete? + + s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!! + + /* VGPR restore on v0 */ +L_RESTORE_V0: + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3 + + /* restore SGPRs */ + //will be 2+8+16*6 + // SGPR SR memory offset : size(VGPR)+size(SVGPR) +L_RESTORE_SGPR: + get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size) + get_svgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes() + s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 22*4 //s106~s127 is not saved + s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 2*4 // restore SGPR from S[n] to S[0], by 2 sgprs group + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + s_mov_b32 m0, s_sgpr_save_num + + read_2sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) + + s_waitcnt lgkmcnt(0) + + s_sub_u32 m0, m0, 2 // Restore from S[n] to S[0] + s_nop 0 // hazard SALU M0=> S_MOVREL + + s_movreld_b64 s0, s0 //s[0+m0] = s0 + + read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) + s_waitcnt lgkmcnt(0) + + s_sub_u32 m0, m0, 8 // Restore from S[n] to S[0] + s_nop 0 // hazard SALU M0=> S_MOVREL + + s_movreld_b64 s0, s0 //s[0+m0] = s0 + s_movreld_b64 s2, s2 + s_movreld_b64 s4, s4 + s_movreld_b64 s6, s6 + + L_RESTORE_SGPR_LOOP: + read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) + s_waitcnt lgkmcnt(0) + + s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0] + s_nop 0 // hazard SALU M0=> S_MOVREL + + s_movreld_b64 s0, s0 //s[0+m0] = s0 + s_movreld_b64 s2, s2 + s_movreld_b64 s4, s4 + s_movreld_b64 s6, s6 + s_movreld_b64 s8, s8 + s_movreld_b64 s10, s10 + s_movreld_b64 s12, s12 + s_movreld_b64 s14, s14 + + s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0 + s_cbranch_scc0 L_RESTORE_SGPR_LOOP + + /* restore HW registers */ +L_RESTORE_HWREG: + // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR) + get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size) + get_svgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes() + + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + + read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset) + + s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS + + s_mov_b32 s_restore_tmp, s_restore_pc_hi + s_and_b32 s_restore_pc_hi, s_restore_tmp, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS + + s_mov_b32 m0, s_restore_m0 + s_mov_b32 exec_lo, s_restore_exec_lo + s_mov_b32 exec_hi, s_restore_exec_hi + + s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0 - s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask //restore xnack_mask - s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts - s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT + s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask + s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts + s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0 - //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore - s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode - //reuse s_restore_m0 as a temp register - s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK - s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT - s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT - s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero - s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0 - s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK - s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT - s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT - s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0 - s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK - s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT - s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp - s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status - - s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time - - -// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution - s_rfe_b64 s_restore_pc_lo // s_restore_m0[0] is used to set STATUS.inst_atc - - -/**************************************************************************/ -/* the END */ -/**************************************************************************/ -L_END_PGM: + s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode + s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_RCNT_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT + s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT + s_mov_b32 s_restore_mode, 0x0 + s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0 + s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_FIRST_REPLAY_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT + s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT + s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0 + s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_REPLAY_W64H_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_REPLAY_W64H_SHIFT + s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT + s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0 + + s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT + s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_mode + + s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 + s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu + + s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG + + s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution + +L_END_PGM: s_endpgm - -end - - -/**************************************************************************/ -/* the helper functions */ -/**************************************************************************/ -function write_sgpr_to_mem_wave32(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf) - if (use_sqc) - s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on - s_mov_b32 m0, s_mem_offset - s_buffer_store_dword s, s_rsrc, m0 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 4 - s_mov_b32 m0, exec_lo - elsif (use_mtbuf) - v_mov_b32 v0, s - tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 128 - else - v_mov_b32 v0, s - buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 128 - end end -function write_sgpr_to_mem_wave64(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf) - if (use_sqc) - s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on - s_mov_b32 m0, s_mem_offset - s_buffer_store_dword s, s_rsrc, m0 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 4 - s_mov_b32 m0, exec_lo - elsif (use_mtbuf) - v_mov_b32 v0, s - tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 256 - else - v_mov_b32 v0, s - buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1 - s_add_u32 s_mem_offset, s_mem_offset, 256 - end +function write_hwreg_to_mem(s, s_rsrc, s_mem_offset) + s_mov_b32 exec_lo, m0 + s_mov_b32 m0, s_mem_offset + s_buffer_store_dword s, s_rsrc, m0 glc:1 + s_add_u32 s_mem_offset, s_mem_offset, 4 + s_mov_b32 m0, exec_lo +end + + +function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset) + s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1 + s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1 + s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1 + s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1 + s_add_u32 s_rsrc[0], s_rsrc[0], 4*16 + s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 +end + +function write_10sgpr_to_mem(s, s_rsrc, s_mem_offset) + s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1 + s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1 + s_buffer_store_dwordx2 s[8], s_rsrc, 32 glc:1 + s_add_u32 s_rsrc[0], s_rsrc[0], 4*16 + s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 +end + + +function read_hwreg_from_mem(s, s_rsrc, s_mem_offset) + s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1 + s_add_u32 s_mem_offset, s_mem_offset, 4 end -function read_sgpr_from_mem_wave32(s, s_rsrc, s_mem_offset, use_sqc) - s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1 - if (use_sqc) - s_add_u32 s_mem_offset, s_mem_offset, 4 - else - s_add_u32 s_mem_offset, s_mem_offset, 128 - end +function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1 + s_sub_u32 s_mem_offset, s_mem_offset, 4*16 end -function read_sgpr_from_mem_wave64(s, s_rsrc, s_mem_offset, use_sqc) - s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1 - if (use_sqc) - s_add_u32 s_mem_offset, s_mem_offset, 4 - else - s_add_u32 s_mem_offset, s_mem_offset, 256 - end +function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset glc:1 + s_sub_u32 s_mem_offset, s_mem_offset, 4*16 end +function read_2sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_buffer_load_dwordx2 s, s_rsrc, s_mem_offset glc:1 + s_sub_u32 s_mem_offset, s_mem_offset, 4*8 +end + + +function get_lds_size_bytes(s_lds_size_byte) + s_getreg_b32 s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) + s_lshl_b32 s_lds_size_byte, s_lds_size_byte, 8 //LDS size in dwords = lds_size * 64 *4Bytes // granularity 64DW +end + +function get_vgpr_size_bytes(s_vgpr_size_byte, s_size) + s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) + s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1 + s_lshr_b32 m0, s_size, S_WAVE_SIZE + s_and_b32 m0, m0, 1 + s_cmp_eq_u32 m0, 1 + s_cbranch_scc1 L_ENABLE_SHIFT_W64 + s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value) + s_branch L_SHIFT_DONE +L_ENABLE_SHIFT_W64: + s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) +L_SHIFT_DONE: +end + +function get_svgpr_size_bytes(s_svgpr_size_byte) + s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) + s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7) +end + +function get_sgpr_size_bytes + return 512 +end + +function get_hwreg_size_bytes + return 128 +end -- cgit v1.2.3 From 306fc9c568518d18821adfa050147ab1a9241fbd Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Wed, 24 Jul 2019 12:26:08 -0500 Subject: drm/amdkfd: Remove dead code from gfx8/gfx9 trap handlers Signed-off-by: Jay Cornwall Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 395 +-------------------- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 375 +------------------ 2 files changed, 5 insertions(+), 765 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm index a47f5b933120..b195b7cd8a17 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm @@ -24,78 +24,6 @@ * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex */ -/* HW (VI) source code for CWSR trap handler */ -/* Version 18 + multiple trap handler */ - -// this performance-optimal version was originally from Seven Xu at SRDC - -// Revison #18 --... -/* Rev History -** #1. Branch from gc dv. //gfxip/gfx8/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV) -** #4. SR Memory Layout: -** 1. VGPR-SGPR-HWREG-{LDS} -** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern.. -** #5. Update: 1. Accurate g8sr_ts_save_d timestamp -** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation) -** #7. Update: 1. don't barrier if noLDS -** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version -** 2. Fix SQ issue by s_sleep 2 -** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last -** 2. optimize s_buffer save by burst 16sgprs... -** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs. -** #11. Update 1. Add 2 more timestamp for debug version -** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance -** #13. Integ 1. Always use MUBUF for PV trap shader... -** #14. Update 1. s_buffer_store soft clause... -** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot. -** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree -** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part] -** 2. PERF - Save LDS before save VGPR to cover LDS save long latency... -** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32 -** 2. FUNC - Handle non-CWSR traps -*/ - -var G8SR_WDMEM_HWREG_OFFSET = 0 -var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes - -// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore. - -var G8SR_DEBUG_TIMESTAMP = 0 -var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset -var s_g8sr_ts_save_s = s[34:35] // save start -var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi -var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ -var s_g8sr_ts_save_d = s[40:41] // save end -var s_g8sr_ts_restore_s = s[42:43] // restore start -var s_g8sr_ts_restore_d = s[44:45] // restore end - -var G8SR_VGPR_SR_IN_DWX4 = 0 -var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes -var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 - - -/*************************************************************************/ -/* control on how to run the shader */ -/*************************************************************************/ -//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run) -var EMU_RUN_HACK = 0 -var EMU_RUN_HACK_RESTORE_NORMAL = 0 -var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0 -var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0 -var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK -var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK -var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK -var SAVE_LDS = 1 -var WG_BASE_ADDR_LO = 0x9000a000 -var WG_BASE_ADDR_HI = 0x0 -var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem -var CTX_SAVE_CONTROL = 0x0 -var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL -var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run) -var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write -var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes -var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing - /**************************************************************************/ /* variables */ /**************************************************************************/ @@ -226,16 +154,7 @@ shader main type(CS) - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore - //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC - s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC - s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f. - s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE - //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE - s_branch L_SKIP_RESTORE //NOT restore, SAVE actually - else s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save - end L_JUMP_TO_RESTORE: s_branch L_RESTORE //restore @@ -249,7 +168,7 @@ L_SKIP_RESTORE: s_cbranch_scc1 L_SAVE //this is the operation for save // ********* Handle non-CWSR traps ******************* -if (!EMU_RUN_HACK) + /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */ s_load_dwordx4 [ttmp8,ttmp9,ttmp10, ttmp11], [tma_lo,tma_hi], 0 s_waitcnt lgkmcnt(0) @@ -268,7 +187,7 @@ L_EXCP_CASE: s_and_b32 ttmp1, ttmp1, 0xFFFF set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC) s_rfe_b64 [ttmp0, ttmp1] -end + // ********* End handling of non-CWSR traps ******************* /**************************************************************************/ @@ -276,12 +195,6 @@ end /**************************************************************************/ L_SAVE: - -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_save_s - s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? -end - s_mov_b32 s_save_tmp, 0 //clear saveCtx bit s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit @@ -303,16 +216,7 @@ end s_mov_b32 s_save_exec_hi, exec_hi s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_sq_save_msg - s_waitcnt lgkmcnt(0) -end - - if (EMU_RUN_HACK) - - else s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC - end // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for. s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT) @@ -321,36 +225,9 @@ end L_SLEEP: s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0 - if (EMU_RUN_HACK) - - else s_cbranch_execz L_SLEEP - end - -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_spi_wrexec - s_waitcnt lgkmcnt(0) -end /* setup Resource Contants */ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_save_tmp, v9, 0 - s_lshr_b32 s_save_tmp, s_save_tmp, 6 - s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end - - s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE @@ -383,22 +260,10 @@ end s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0 - - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME)) - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - s_mov_b32 tba_lo, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO - s_mov_b32 tba_hi, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI - end - write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC @@ -440,18 +305,8 @@ end s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - if (SGPR_SAVE_USE_SQC) s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) - end - - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0 //s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0 @@ -490,30 +345,14 @@ end s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on s_mov_b32 exec_hi, 0xFFFFFFFF - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - // VGPR Allocated in 4-GPR granularity -if G8SR_VGPR_SR_IN_DWX4 - // the const stride for DWx4 is 4*4 bytes - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes - - buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes -else buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 -end @@ -549,64 +388,10 @@ end s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes() - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - s_mov_b32 m0, 0x0 //lds_offset initial value = 0 -var LDS_DMA_ENABLE = 0 -var UNROLL = 0 -if UNROLL==0 && LDS_DMA_ENABLE==1 - s_mov_b32 s3, 256*2 - s_nop 0 - s_nop 0 - s_nop 0 - L_SAVE_LDS_LOOP: - //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.??? - if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW - end - - s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes - s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete? - -elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss - // store from higest LDS address to lowest - s_mov_b32 s3, 256*2 - s_sub_u32 m0, s_save_alloc_size, s3 - s_add_u32 s_save_mem_offset, s_save_mem_offset, m0 - s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks... - s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest - s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction - s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc - s_nop 0 - s_nop 0 - s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes - s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved - s_add_u32 s0, s0,s_save_alloc_size - s_addc_u32 s1, s1, 0 - s_setpc_b64 s[0:1] - - - for var i =0; i< 128; i++ - // be careful to make here a 64Byte aligned address, which could improve performance... - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW - - if i!=127 - s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline - s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3 - end - end - -else // BUFFER_STORE v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0 v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid v_mul_i32_i24 v2, v3, 8 // tid*8 @@ -628,8 +413,6 @@ L_SAVE_LDS_LOOP_VECTOR: // restore rsrc3 s_mov_b32 s_save_buf_rsrc3, s0 -end - L_SAVE_LDS_DONE: @@ -647,44 +430,8 @@ L_SAVE_LDS_DONE: s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - - - // VGPR Allocated in 4-GPR granularity - -if G8SR_VGPR_SR_IN_DWX4 - // the const stride for DWx4 is 4*4 bytes - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes - - s_mov_b32 m0, 4 // skip first 4 VGPRs - s_cmp_lt_u32 m0, s_save_alloc_size - s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs - s_set_gpr_idx_on m0, 0x1 // This will change M0 - s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0 -L_SAVE_VGPR_LOOP: - v_mov_b32 v0, v0 // v0 = v[0+m0] - v_mov_b32 v1, v1 - v_mov_b32 v2, v2 - v_mov_b32 v3, v3 - - - buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - s_add_u32 m0, m0, 4 - s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 - s_cmp_lt_u32 m0, s_save_alloc_size - s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? - s_set_gpr_idx_off -L_SAVE_VGPR_LOOP_END: - - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes -else // VGPR store using dw burst s_mov_b32 m0, 0x4 //VGPR initial index value =0 s_cmp_lt_u32 m0, s_save_alloc_size @@ -700,52 +447,18 @@ else v_mov_b32 v2, v2 //v0 = v[0+m0] v_mov_b32 v3, v3 //v0 = v[0+m0] - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 - end s_add_u32 m0, m0, 4 //next vgpr index s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? s_set_gpr_idx_off -end L_SAVE_VGPR_END: - - - - - - - /* S_PGM_END_SAVED */ //FIXME graphics ONLY - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT)) - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - s_rfe_b64 s_save_pc_lo //Return to the main shader program - else - end - -// Save Done timestamp -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_save_d - // SGPR SR memory offset : size(VGPR) - get_vgpr_size_bytes(s_save_mem_offset) - s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET - s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? - // Need reset rsrc2?? - s_mov_b32 m0, s_save_mem_offset - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1 -end - - s_branch L_END_PGM @@ -756,27 +469,6 @@ end L_RESTORE: /* Setup Resource Contants */ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_restore_tmp, v9, 0 - s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 - s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE - s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL - else - end - -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_restore_s - s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? - // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case... - s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0] - s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored.. -end - - - s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE @@ -818,18 +510,12 @@ end s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow??? - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end s_mov_b32 m0, 0x0 //lds_offset initial value = 0 L_RESTORE_LDS_LOOP: - if (SAVE_LDS) buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW - end s_add_u32 m0, m0, 256*2 // 128 DW s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 @@ -848,40 +534,8 @@ end s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - -if G8SR_VGPR_SR_IN_DWX4 - get_vgpr_size_bytes(s_restore_mem_offset) - s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 - - // the const stride for DWx4 is 4*4 bytes - s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes - - s_mov_b32 m0, s_restore_alloc_size - s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0 - -L_RESTORE_VGPR_LOOP: - buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 - s_waitcnt vmcnt(0) - s_sub_u32 m0, m0, 4 - v_mov_b32 v0, v0 // v[0+m0] = v0 - v_mov_b32 v1, v1 - v_mov_b32 v2, v2 - v_mov_b32 v3, v3 - s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 - s_cmp_eq_u32 m0, 0x8000 - s_cbranch_scc0 L_RESTORE_VGPR_LOOP - s_set_gpr_idx_off - - s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes - -else + // VGPR load using dw burst s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 @@ -890,14 +544,10 @@ else s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later L_RESTORE_VGPR_LOOP: - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3 - end s_waitcnt vmcnt(0) //ensure data ready v_mov_b32 v0, v0 //v[0+m0] = v0 v_mov_b32 v1, v1 @@ -909,16 +559,10 @@ else s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete? s_set_gpr_idx_off /* VGPR restore on v0 */ - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 - end - -end /* restore SGPRs */ ////////////////////////////// @@ -934,16 +578,8 @@ end s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - if (SGPR_SAVE_USE_SQC) s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) - end - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end /* If 112 SGPRs ar allocated, 4 sgprs are not used TBA(108,109),TMA(110,111), However, we are safe to restore these 4 SGPRs anyway, since TBA,TMA will later be restored by HWREG @@ -972,12 +608,6 @@ end ////////////////////////////// L_RESTORE_HWREG: - -if G8SR_DEBUG_TIMESTAMP - s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo - s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi -end - // HWREG SR memory offset : size(VGPR)+size(SGPR) get_vgpr_size_bytes(s_restore_mem_offset) get_sgpr_size_bytes(s_restore_tmp) @@ -985,11 +615,7 @@ end s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0 read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC @@ -1006,16 +632,6 @@ end s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS - //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - s_mov_b32 m0, s_restore_m0 s_mov_b32 exec_lo, s_restore_exec_lo s_mov_b32 exec_hi, s_restore_exec_hi @@ -1048,11 +664,6 @@ end s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_restore_d - s_waitcnt lgkmcnt(0) -end - // s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index cee4cfd5182d..75f29d13c90f 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -24,75 +24,6 @@ * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex */ -/* HW (GFX9) source code for CWSR trap handler */ -/* Version 18 + multiple trap handler */ - -// this performance-optimal version was originally from Seven Xu at SRDC - -// Revison #18 --... -/* Rev History -** #1. Branch from gc dv. //gfxip/gfx9/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV) -** #4. SR Memory Layout: -** 1. VGPR-SGPR-HWREG-{LDS} -** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern.. -** #5. Update: 1. Accurate g8sr_ts_save_d timestamp -** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation) -** #7. Update: 1. don't barrier if noLDS -** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version -** 2. Fix SQ issue by s_sleep 2 -** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last -** 2. optimize s_buffer save by burst 16sgprs... -** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs. -** #11. Update 1. Add 2 more timestamp for debug version -** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance -** #13. Integ 1. Always use MUBUF for PV trap shader... -** #14. Update 1. s_buffer_store soft clause... -** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot. -** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree -** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part] -** 2. PERF - Save LDS before save VGPR to cover LDS save long latency... -** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32 -** 2. FUNC - Handle non-CWSR traps -*/ - -var G8SR_WDMEM_HWREG_OFFSET = 0 -var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes - -// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore. - -var G8SR_DEBUG_TIMESTAMP = 0 -var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset -var s_g8sr_ts_save_s = s[34:35] // save start -var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi -var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ -var s_g8sr_ts_save_d = s[40:41] // save end -var s_g8sr_ts_restore_s = s[42:43] // restore start -var s_g8sr_ts_restore_d = s[44:45] // restore end - -var G8SR_VGPR_SR_IN_DWX4 = 0 -var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes -var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 - - -/*************************************************************************/ -/* control on how to run the shader */ -/*************************************************************************/ -//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run) -var EMU_RUN_HACK = 0 -var EMU_RUN_HACK_RESTORE_NORMAL = 0 -var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0 -var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0 -var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK -var SAVE_LDS = 1 -var WG_BASE_ADDR_LO = 0x9000a000 -var WG_BASE_ADDR_HI = 0x0 -var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem -var CTX_SAVE_CONTROL = 0x0 -var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL -var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run) -var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write -var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes -var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised @@ -238,16 +169,7 @@ shader main type(CS) - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore - //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC - s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC - s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f. - s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE - //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE - s_branch L_SKIP_RESTORE //NOT restore, SAVE actually - else s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save - end L_JUMP_TO_RESTORE: s_branch L_RESTORE //restore @@ -278,7 +200,7 @@ end s_cbranch_scc1 L_SAVE //this is the operation for save // ********* Handle non-CWSR traps ******************* -if (!EMU_RUN_HACK) + // Illegal instruction is a non-maskable exception which blocks context save. // Halt the wavefront and return from the trap. s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK @@ -355,7 +277,7 @@ L_EXCP_CASE: set_status_without_spi_prio(s_save_status, ttmp2) s_rfe_b64 [ttmp0, ttmp1] -end + // ********* End handling of non-CWSR traps ******************* /**************************************************************************/ @@ -363,12 +285,6 @@ end /**************************************************************************/ L_SAVE: - -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_save_s - s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? -end - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] s_mov_b32 s_save_tmp, 0 //clear saveCtx bit @@ -390,16 +306,7 @@ end s_mov_b32 s_save_exec_hi, exec_hi s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_sq_save_msg - s_waitcnt lgkmcnt(0) -end - - if (EMU_RUN_HACK) - - else s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC - end // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for. s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT) @@ -408,33 +315,7 @@ end L_SLEEP: s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0 - if (EMU_RUN_HACK) - - else s_cbranch_execz L_SLEEP - end - -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_spi_wrexec - s_waitcnt lgkmcnt(0) -end - - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_save_tmp, v9, 0 - s_lshr_b32 s_save_tmp, s_save_tmp, 6 - s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE)) - s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL - else - end // Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40 @@ -484,20 +365,10 @@ end s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0 - - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME)) - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - end - write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset) write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC @@ -535,17 +406,9 @@ end s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - if (SGPR_SAVE_USE_SQC) s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) - end - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0 @@ -588,25 +451,11 @@ end s_mov_b32 xnack_mask_lo, 0x0 s_mov_b32 xnack_mask_hi, 0x0 - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end // VGPR Allocated in 4-GPR granularity -if G8SR_VGPR_SR_IN_DWX4 - // the const stride for DWx4 is 4*4 bytes - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes - - buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes -else if SAVE_AFTER_XNACK_ERROR check_if_tcp_store_ok() s_cbranch_scc1 L_SAVE_FIRST_VGPRS_WITH_TCP @@ -621,7 +470,6 @@ end buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 -end @@ -656,64 +504,11 @@ end s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes() - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end s_mov_b32 m0, 0x0 //lds_offset initial value = 0 -var LDS_DMA_ENABLE = 0 -var UNROLL = 0 -if UNROLL==0 && LDS_DMA_ENABLE==1 - s_mov_b32 s3, 256*2 - s_nop 0 - s_nop 0 - s_nop 0 - L_SAVE_LDS_LOOP: - //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.??? - if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW - end - - s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes - s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes - s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 - s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete? - -elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss - // store from higest LDS address to lowest - s_mov_b32 s3, 256*2 - s_sub_u32 m0, s_save_alloc_size, s3 - s_add_u32 s_save_mem_offset, s_save_mem_offset, m0 - s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks... - s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest - s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction - s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc - s_nop 0 - s_nop 0 - s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes - s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved - s_add_u32 s0, s0,s_save_alloc_size - s_addc_u32 s1, s1, 0 - s_setpc_b64 s[0:1] - - - for var i =0; i< 128; i++ - // be careful to make here a 64Byte aligned address, which could improve performance... - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW - buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW - - if i!=127 - s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline - s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3 - end - end - -else // BUFFER_STORE v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0 v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid @@ -757,8 +552,6 @@ L_SAVE_LDS_LOOP_VECTOR: // restore rsrc3 s_mov_b32 s_save_buf_rsrc3, s0 -end - L_SAVE_LDS_DONE: @@ -776,44 +569,9 @@ L_SAVE_LDS_DONE: s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) - if (SWIZZLE_EN) - s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end - // VGPR Allocated in 4-GPR granularity - -if G8SR_VGPR_SR_IN_DWX4 - // the const stride for DWx4 is 4*4 bytes - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes - - s_mov_b32 m0, 4 // skip first 4 VGPRs - s_cmp_lt_u32 m0, s_save_alloc_size - s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs - - s_set_gpr_idx_on m0, 0x1 // This will change M0 - s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0 -L_SAVE_VGPR_LOOP: - v_mov_b32 v0, v0 // v0 = v[0+m0] - v_mov_b32 v1, v1 - v_mov_b32 v2, v2 - v_mov_b32 v3, v3 - - - buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 - s_add_u32 m0, m0, 4 - s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 - s_cmp_lt_u32 m0, s_save_alloc_size - s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? - s_set_gpr_idx_off -L_SAVE_VGPR_LOOP_END: - - s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes -else // VGPR store using dw burst s_mov_b32 m0, 0x4 //VGPR initial index value =0 s_cmp_lt_u32 m0, s_save_alloc_size @@ -844,21 +602,16 @@ end v_mov_b32 v2, v2 //v0 = v[0+m0] v_mov_b32 v3, v3 //v0 = v[0+m0] - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 - end s_add_u32 m0, m0, 4 //next vgpr index s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? s_set_gpr_idx_off -end L_SAVE_VGPR_END: @@ -905,29 +658,6 @@ L_SAVE_ACCVGPR_LOOP: L_SAVE_ACCVGPR_END: end - /* S_PGM_END_SAVED */ //FIXME graphics ONLY - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT)) - s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] - s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 - s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over - s_rfe_b64 s_save_pc_lo //Return to the main shader program - else - end - -// Save Done timestamp -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_save_d - // SGPR SR memory offset : size(VGPR) - get_vgpr_size_bytes(s_save_mem_offset) - s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET - s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? - // Need reset rsrc2?? - s_mov_b32 m0, s_save_mem_offset - s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1 -end - - s_branch L_END_PGM @@ -938,27 +668,6 @@ end L_RESTORE: /* Setup Resource Contants */ - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - //calculate wd_addr using absolute thread id - v_readlane_b32 s_restore_tmp, v9, 0 - s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 - s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE - s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO - s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI - s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL - else - end - -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_restore_s - s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? - // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case... - s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0] - s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored.. -end - - - s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE @@ -1000,18 +709,12 @@ end s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow??? - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end s_mov_b32 m0, 0x0 //lds_offset initial value = 0 L_RESTORE_LDS_LOOP: - if (SAVE_LDS) buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW - end s_add_u32 m0, m0, 256*2 // 128 DW s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 @@ -1035,40 +738,8 @@ if ASIC_TARGET_ARCTURUS s_mov_b32 s_restore_accvgpr_offset, s_restore_buf_rsrc2 //ACC VGPRs at end of VGPRs end - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end -if G8SR_VGPR_SR_IN_DWX4 - get_vgpr_size_bytes(s_restore_mem_offset) - s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 - - // the const stride for DWx4 is 4*4 bytes - s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes - - s_mov_b32 m0, s_restore_alloc_size - s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0 - -L_RESTORE_VGPR_LOOP: - buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 - s_waitcnt vmcnt(0) - s_sub_u32 m0, m0, 4 - v_mov_b32 v0, v0 // v[0+m0] = v0 - v_mov_b32 v1, v1 - v_mov_b32 v2, v2 - v_mov_b32 v3, v3 - s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 - s_cmp_eq_u32 m0, 0x8000 - s_cbranch_scc0 L_RESTORE_VGPR_LOOP - s_set_gpr_idx_off - - s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0 - s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes - -else // VGPR load using dw burst s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 @@ -1081,9 +752,6 @@ end s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later L_RESTORE_VGPR_LOOP: - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else if ASIC_TARGET_ARCTURUS buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 @@ -1102,7 +770,6 @@ end buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3 - end s_waitcnt vmcnt(0) //ensure data ready v_mov_b32 v0, v0 //v[0+m0] = v0 v_mov_b32 v1, v1 @@ -1126,16 +793,10 @@ if ASIC_TARGET_ARCTURUS end end - if(USE_MTBUF_INSTEAD_OF_MUBUF) - tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 - else buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 - end - -end /* restore SGPRs */ ////////////////////////////// @@ -1151,16 +812,8 @@ end s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) - if (SGPR_SAVE_USE_SQC) s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes - else - s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) - end - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end s_mov_b32 m0, s_restore_alloc_size @@ -1188,11 +841,6 @@ end L_RESTORE_HWREG: -if G8SR_DEBUG_TIMESTAMP - s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo - s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi -end - // HWREG SR memory offset : size(VGPR)+size(SGPR) get_vgpr_size_bytes(s_restore_mem_offset) get_sgpr_size_bytes(s_restore_tmp) @@ -1200,11 +848,7 @@ end s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes - if (SWIZZLE_EN) - s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? - else s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes - end read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0 read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC @@ -1219,16 +863,6 @@ end s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS - //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: - if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL)) - s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal - s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over - end - s_mov_b32 m0, s_restore_m0 s_mov_b32 exec_lo, s_restore_exec_lo s_mov_b32 exec_hi, s_restore_exec_hi @@ -1275,11 +909,6 @@ end s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time -if G8SR_DEBUG_TIMESTAMP - s_memrealtime s_g8sr_ts_restore_d - s_waitcnt lgkmcnt(0) -end - // s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc -- cgit v1.2.3 From 64671c0fdc9193978cb93aaa79965e45b3cce437 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 9 Jul 2019 15:47:57 -0400 Subject: drm/amdgpu: add perfmon and fica atomics for df adding perfmon and fica atomic operations to adhere to data fabrics finite state machine requirements for indirect register access. Signed-off-by: Jonathan Kim Reviewed-by: Kent Russell Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 + drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 202 ++++++++++++++++++++++------------- 2 files changed, 128 insertions(+), 77 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 29123aa16113..ca8b325291a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -711,6 +711,9 @@ struct amdgpu_df_funcs { int is_disable); void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config, uint64_t *count); + uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); + void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, + uint32_t ficadl_val, uint32_t ficadh_val); }; /* Define the HW IP blocks will be used in driver , add more if necessary */ enum amd_hw_ip_block_type { diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index ef6e91f9f51c..5850c8e34caa 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -93,6 +93,96 @@ const struct attribute_group *df_v3_6_attr_groups[] = { NULL }; +static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, + uint32_t ficaa_val) +{ + unsigned long flags, address, data; + uint32_t ficadl_val, ficadh_val; + + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); + WREG32(data, ficaa_val); + + WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3); + ficadl_val = RREG32(data); + + WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3); + ficadh_val = RREG32(data); + + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + + return (((ficadh_val & 0xFFFFFFFFFFFFFFFF) << 32) | ficadl_val); +} + +static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val, + uint32_t ficadl_val, uint32_t ficadh_val) +{ + unsigned long flags, address, data; + + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); + WREG32(data, ficaa_val); + + WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3); + WREG32(data, ficadl_val); + + WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3); + WREG32(data, ficadh_val); + + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +/* + * df_v3_6_perfmon_rreg - read perfmon lo and hi + * + * required to be atomic. no mmio method provided so subsequent reads for lo + * and hi require to preserve df finite state machine + */ +static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev, + uint32_t lo_addr, uint32_t *lo_val, + uint32_t hi_addr, uint32_t *hi_val) +{ + unsigned long flags, address, data; + + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, lo_addr); + *lo_val = RREG32(data); + WREG32(address, hi_addr); + *hi_val = RREG32(data); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + +/* + * df_v3_6_perfmon_wreg - write to perfmon lo and hi + * + * required to be atomic. no mmio method provided so subsequent reads after + * data writes cannot occur to preserve data fabrics finite state machine. + */ +static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, + uint32_t lo_val, uint32_t hi_addr, uint32_t hi_val) +{ + unsigned long flags, address, data; + + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, lo_addr); + WREG32(data, lo_val); + WREG32(address, hi_addr); + WREG32(data, hi_val); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + /* get the number of df counters available */ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, struct device_attribute *attr, @@ -268,6 +358,10 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, uint32_t *lo_val, uint32_t *hi_val) { + + uint32_t eventsel, instance, unitmask; + uint32_t instance_10, instance_5432, instance_76; + df_v3_6_pmc_get_addr(adev, config, 1, lo_base_addr, hi_base_addr); if ((*lo_base_addr == 0) || (*hi_base_addr == 0)) { @@ -276,40 +370,33 @@ static int df_v3_6_pmc_get_ctrl_settings(struct amdgpu_device *adev, return -ENXIO; } - if (lo_val && hi_val) { - uint32_t eventsel, instance, unitmask; - uint32_t instance_10, instance_5432, instance_76; + eventsel = DF_V3_6_GET_EVENT(config) & 0x3f; + unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf; + instance = DF_V3_6_GET_INSTANCE(config); - eventsel = DF_V3_6_GET_EVENT(config) & 0x3f; - unitmask = DF_V3_6_GET_UNITMASK(config) & 0xf; - instance = DF_V3_6_GET_INSTANCE(config); + instance_10 = instance & 0x3; + instance_5432 = (instance >> 2) & 0xf; + instance_76 = (instance >> 6) & 0x3; - instance_10 = instance & 0x3; - instance_5432 = (instance >> 2) & 0xf; - instance_76 = (instance >> 6) & 0x3; + *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel | (1 << 22); + *hi_val = (instance_76 << 29) | instance_5432; - *lo_val = (unitmask << 8) | (instance_10 << 6) | eventsel; - *hi_val = (instance_76 << 29) | instance_5432; - } + DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", + config, *lo_base_addr, *hi_base_addr, *lo_val, *hi_val); return 0; } -/* assign df performance counters for read */ -static int df_v3_6_pmc_assign_cntr(struct amdgpu_device *adev, - uint64_t config, - int *is_assigned) +/* add df performance counters for read */ +static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, + uint64_t config) { int i, target_cntr; - *is_assigned = 0; - target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); - if (target_cntr >= 0) { - *is_assigned = 1; + if (target_cntr >= 0) return 0; - } for (i = 0; i < DF_V3_6_MAX_COUNTERS; i++) { if (adev->df_perfmon_config_assign_mask[i] == 0U) { @@ -344,45 +431,13 @@ static void df_v3_6_reset_perfmon_cntr(struct amdgpu_device *adev, if ((lo_base_addr == 0) || (hi_base_addr == 0)) return; - WREG32_PCIE(lo_base_addr, 0UL); - WREG32_PCIE(hi_base_addr, 0UL); -} - - -static int df_v3_6_add_perfmon_cntr(struct amdgpu_device *adev, - uint64_t config) -{ - uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; - int ret, is_assigned; - - ret = df_v3_6_pmc_assign_cntr(adev, config, &is_assigned); - - if (ret || is_assigned) - return ret; - - ret = df_v3_6_pmc_get_ctrl_settings(adev, - config, - &lo_base_addr, - &hi_base_addr, - &lo_val, - &hi_val); - - if (ret) - return ret; - - DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", - config, lo_base_addr, hi_base_addr, lo_val, hi_val); - - WREG32_PCIE(lo_base_addr, lo_val); - WREG32_PCIE(hi_base_addr, hi_val); - - return ret; + df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); } static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, int is_enable) { - uint32_t lo_base_addr, hi_base_addr, lo_val; + uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; int ret = 0; switch (adev->asic_type) { @@ -391,24 +446,20 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, df_v3_6_reset_perfmon_cntr(adev, config); if (is_enable) { - ret = df_v3_6_add_perfmon_cntr(adev, config); + ret = df_v3_6_pmc_add_cntr(adev, config); } else { ret = df_v3_6_pmc_get_ctrl_settings(adev, config, &lo_base_addr, &hi_base_addr, - NULL, - NULL); + &lo_val, + &hi_val); if (ret) return ret; - lo_val = RREG32_PCIE(lo_base_addr); - - DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x", - config, lo_base_addr, hi_base_addr, lo_val); - - WREG32_PCIE(lo_base_addr, lo_val | (1ULL << 22)); + df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, + hi_base_addr, hi_val); } break; @@ -422,7 +473,7 @@ static int df_v3_6_pmc_start(struct amdgpu_device *adev, uint64_t config, static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, int is_disable) { - uint32_t lo_base_addr, hi_base_addr, lo_val; + uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; int ret = 0; switch (adev->asic_type) { @@ -431,18 +482,13 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config, config, &lo_base_addr, &hi_base_addr, - NULL, - NULL); + &lo_val, + &hi_val); if (ret) return ret; - lo_val = RREG32_PCIE(lo_base_addr); - - DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x", - config, lo_base_addr, hi_base_addr, lo_val); - - WREG32_PCIE(lo_base_addr, lo_val & ~(1ULL << 22)); + df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); if (is_disable) df_v3_6_pmc_release_cntr(adev, config); @@ -471,8 +517,8 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, if ((lo_base_addr == 0) || (hi_base_addr == 0)) return; - lo_val = RREG32_PCIE(lo_base_addr); - hi_val = RREG32_PCIE(hi_base_addr); + df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, + hi_base_addr, &hi_val); *count = ((hi_val | 0ULL) << 32) | (lo_val | 0ULL); @@ -480,7 +526,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, *count = 0; DRM_DEBUG_DRIVER("config=%llx addr=%08x:%08x val=%08x:%08x", - config, lo_base_addr, hi_base_addr, lo_val, hi_val); + config, lo_base_addr, hi_base_addr, lo_val, hi_val); break; @@ -499,5 +545,7 @@ const struct amdgpu_df_funcs df_v3_6_funcs = { .get_clockgating_state = df_v3_6_get_clockgating_state, .pmc_start = df_v3_6_pmc_start, .pmc_stop = df_v3_6_pmc_stop, - .pmc_get_count = df_v3_6_pmc_get_count + .pmc_get_count = df_v3_6_pmc_get_count, + .get_fica = df_v3_6_get_fica, + .set_fica = df_v3_6_set_fica }; -- cgit v1.2.3 From 24f9aacfb0fbe724c94f6ffe24cc518bfdca4b1d Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 11 Jul 2019 13:14:02 -0400 Subject: drm/amdgpu: adding xgmi error monitoring monitor xgmi errors via mc pie status through fica registers. Signed-off-by: Jonathan Kim Reviewed-by: Kent Russell Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 38 ++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 4e8d60eec0fe..65aae75f80fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -25,7 +25,7 @@ #include "amdgpu.h" #include "amdgpu_xgmi.h" #include "amdgpu_smu.h" - +#include "df/df_3_6_offset.h" static DEFINE_MUTEX(xgmi_mutex); @@ -131,9 +131,37 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev, } +#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801) +static ssize_t amdgpu_xgmi_show_error(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in; + uint64_t fica_out; + unsigned int error_count = 0; + + ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); + ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); -static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL); + fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in); + if (fica_out != 0x1f) + pr_err("xGMI error counters not enabled!\n"); + + fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in); + + if ((fica_out & 0xffff) == 2) + error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63); + adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0); + + return snprintf(buf, PAGE_SIZE, "%d\n", error_count); +} + + +static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL); +static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL); static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) @@ -148,6 +176,12 @@ static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev, return ret; } + /* Create xgmi error file */ + ret = device_create_file(adev->dev, &dev_attr_xgmi_error); + if (ret) + pr_err("failed to create xgmi_error\n"); + + /* Create sysfs link to hive info folder on the first device */ if (adev != hive->adev) { ret = sysfs_create_link(&adev->dev->kobj, hive->kobj, -- cgit v1.2.3 From e4c4073b0139d055d43a9568690fc560aab4fa5c Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Mon, 15 Jul 2019 18:04:08 -0400 Subject: drm/amdgpu: Fix hard hang for S/G display BOs. HW requires for caching to be unset for scanout BO mappings when the BO placement is in GTT memory. Usually the flag to unset is passed from user mode but for FB mode this was missing. v2: Keep all BO placement logic in amdgpu_display_supported_domains Suggested-by: Alex Deucher Signed-off-by: Andrey Grodzovsky Reviewed-by: Alex Deucher Tested-by: Shirish S Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 7 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 3 ++- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index eb3569b46c1e..430c56f9544a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -139,14 +139,14 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, fb_tiled); domain = amdgpu_display_supported_domains(adev); - height = ALIGN(mode_cmd->height, 8); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED, + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + AMDGPU_GEM_CREATE_VRAM_CLEARED | + AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel, NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); @@ -168,7 +168,6 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, dev_err(adev->dev, "FB failed to set tiling flags\n"); } - ret = amdgpu_bo_pin(abo, domain); if (ret) { amdgpu_bo_unreserve(abo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 939f8305511b..fb291366d5ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -747,7 +747,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct amdgpu_device *adev = dev->dev_private; struct drm_gem_object *gobj; uint32_t handle; - u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_CPU_GTT_USWC; u32 domain; int r; -- cgit v1.2.3 From 3d1b8ec76b8f4f91641fd87cc19cd980426a2060 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Wed, 24 Jul 2019 10:04:27 -0400 Subject: drm/amdgpu: Create helper to clear AMDGPU_GEM_CREATE_CPU_GTT_USWC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the logic to clear AMDGPU_GEM_CREATE_CPU_GTT_USWC in amdgpu_bo_do_create into standalone helper so it can be reused in other functions. v4: Switch to return bool. v5: Fix typos. Signed-off-by: Andrey Grodzovsky Acked-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 61 +++++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 + 2 files changed, 37 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index bea6f298dfdc..531251dff150 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -413,6 +413,40 @@ fail: return false; } +bool amdgpu_bo_support_uswc(u64 bo_flags) +{ + +#ifdef CONFIG_X86_32 + /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit + * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 + */ + return false; +#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) + /* Don't try to enable write-combining when it can't work, or things + * may be slow + * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 + */ + +#ifndef CONFIG_COMPILE_TEST +#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ + thanks to write-combining +#endif + + if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " + "better performance thanks to write-combining\n"); + return false; +#else + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + return false; + + return true; +#endif +} + static int amdgpu_bo_do_create(struct amdgpu_device *adev, struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr) @@ -466,33 +500,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, bo->flags = bp->flags; -#ifdef CONFIG_X86_32 - /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit - * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 - */ - bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; -#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) - /* Don't try to enable write-combining when it can't work, or things - * may be slow - * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 - */ - -#ifndef CONFIG_COMPILE_TEST -#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ - thanks to write-combining -#endif - - if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) - DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " - "better performance thanks to write-combining\n"); - bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; -#else - /* For architectures that don't support WC memory, - * mask out the WC flag from the BO - */ - if (!drm_arch_can_wc_memory()) + if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; -#endif bo->tbo.bdev = &adev->mman.bdev; if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index d60593cc436e..dc44cf36d025 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -308,5 +308,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, struct seq_file *m); #endif +bool amdgpu_bo_support_uswc(u64 bo_flags); + #endif -- cgit v1.2.3 From ddcb7fc62f4be99faedfa1764c971a2f31468962 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Wed, 24 Jul 2019 11:09:03 -0400 Subject: drm/amdgpu: Add check for USWC support for amdgpu_display_supported_domains MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This verifies we don't add GTT as allowed domain for APUs when USWC is disabled. Signed-off-by: Andrey Grodzovsky Acked-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 535650967b1a..ddd8f5b0f2d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -500,8 +500,15 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev) uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; #if defined(CONFIG_DRM_AMD_DC) + /* + * if amdgpu_bo_validate_uswc returns false it means that USWC mappings + * is not supported for this board. But this mapping is required + * to avoid hang caused by placement of scanout BO in GTT on certain + * APUs. So force the BO placement to VRAM in case this architecture + * will not allow USWC mappings. + */ if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN && - adev->flags & AMD_IS_APU && + adev->flags & AMD_IS_APU && amdgpu_bo_support_uswc(0) && amdgpu_device_asic_has_dc_support(adev->asic_type)) domain |= AMDGPU_GEM_DOMAIN_GTT; #endif -- cgit v1.2.3 From 1c4259159132ae4ceaf7c6db37a6cf76417f73d9 Mon Sep 17 00:00:00 2001 From: Shirish S Date: Tue, 16 Jul 2019 14:49:48 +0530 Subject: drm/amd/display: enable S/G for RAVEN chip enables gpu_vm_support in dm and adds AMDGPU_GEM_DOMAIN_GTT as supported domain v2: Move BO placement logic into amdgpu_display_supported_domains v3: Use amdgpu_bo_validate_uswc in amdgpu_display_supported_domains. v4: amdgpu_bo_validate_uswc moved to sepperate patch. Signed-off-by: Shirish S Signed-off-by: Andrey Grodzovsky Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index ddd8f5b0f2d3..8b06150080aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -507,7 +507,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev) * APUs. So force the BO placement to VRAM in case this architecture * will not allow USWC mappings. */ - if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN && + if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type <= CHIP_RAVEN && adev->flags & AMD_IS_APU && amdgpu_bo_support_uswc(0) && amdgpu_device_asic_has_dc_support(adev->asic_type)) domain |= AMDGPU_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e177be3421a4..356d77387c42 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -688,7 +688,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) */ if (adev->flags & AMD_IS_APU && adev->asic_type >= CHIP_CARRIZO && - adev->asic_type < CHIP_RAVEN) + adev->asic_type <= CHIP_RAVEN) init_data.flags.gpu_vm_support = true; if (amdgpu_dc_feature_mask & DC_FBC_MASK) -- cgit v1.2.3 From fcd90fee8ac22da3bce1c6652cf36bc24e7a0749 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 24 Jul 2019 14:06:09 +0800 Subject: drm/amd/powerplay: minor fixes around SW SMU power and fan setting Add checking for possible invalid input and null pointer. And drop redundant code. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 22 ++++++++++------------ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 24 +++++++++++------------- 2 files changed, 21 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index d8041ced9910..9e090030bee6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1616,20 +1616,16 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; - if (is_support_sw_smu(adev)) { - err = kstrtoint(buf, 10, &value); - if (err) - return err; + err = kstrtoint(buf, 10, &value); + if (err) + return err; + if (is_support_sw_smu(adev)) { smu_set_fan_control_mode(&adev->smu, value); } else { if (!adev->powerplay.pp_funcs->set_fan_control_mode) return -EINVAL; - err = kstrtoint(buf, 10, &value); - if (err) - return err; - amdgpu_dpm_set_fan_control_mode(adev, value); } @@ -2049,16 +2045,18 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; value = value / 1000000; /* convert to Watt */ + if (is_support_sw_smu(adev)) { - adev->smu.funcs->set_power_limit(&adev->smu, value); + err = smu_set_power_limit(&adev->smu, value); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) { err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); - if (err) - return err; } else { - return -EINVAL; + err = -EINVAL; } + if (err) + return err; + return count; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index e2ea324644c1..d5d04d110838 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1100,6 +1100,8 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) max_power_limit *= (100 + smu->smu_table.TDPODLimit); max_power_limit /= 100; } + if (n > max_power_limit) + return -EINVAL; if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); @@ -1421,17 +1423,17 @@ smu_v11_0_get_fan_control_mode(struct smu_context *smu) } static int -smu_v11_0_smc_fan_control(struct smu_context *smu, bool start) +smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) { int ret = 0; if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) return 0; - ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start); + ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); if (ret) pr_err("[%s]%s smc FAN CONTROL feature failed!", - __func__, (start ? "Start" : "Stop")); + __func__, (auto_fan_control ? "Start" : "Stop")); return ret; } @@ -1455,16 +1457,15 @@ static int smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { struct amdgpu_device *adev = smu->adev; - uint32_t duty100; - uint32_t duty; + uint32_t duty100, duty; uint64_t tmp64; - bool stop = 0; if (speed > 100) speed = 100; - if (smu_v11_0_smc_fan_control(smu, stop)) + if (smu_v11_0_auto_fan_control(smu, 0)) return -EINVAL; + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), CG_FDO_CTRL1, FMAX_DUTY100); if (!duty100) @@ -1486,18 +1487,16 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode) { int ret = 0; - bool start = 1; - bool stop = 0; switch (mode) { case AMD_FAN_CTRL_NONE: ret = smu_v11_0_set_fan_speed_percent(smu, 100); break; case AMD_FAN_CTRL_MANUAL: - ret = smu_v11_0_smc_fan_control(smu, stop); + ret = smu_v11_0_auto_fan_control(smu, 0); break; case AMD_FAN_CTRL_AUTO: - ret = smu_v11_0_smc_fan_control(smu, start); + ret = smu_v11_0_auto_fan_control(smu, 1); break; default: break; @@ -1517,13 +1516,12 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, struct amdgpu_device *adev = smu->adev; int ret; uint32_t tach_period, crystal_clock_freq; - bool stop = 0; if (!speed) return -EINVAL; mutex_lock(&(smu->mutex)); - ret = smu_v11_0_smc_fan_control(smu, stop); + ret = smu_v11_0_auto_fan_control(smu, 0); if (ret) goto set_fan_speed_rpm_failed; -- cgit v1.2.3 From f0d2a7dc1154ed680a8422916ab6a38860800de4 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Jul 2019 12:10:34 +0800 Subject: drm/amd/powerplay: fix null pointer dereference around dpm state relates DPM state relates are not supported on the new SW SMU ASICs. But still it's not OK to trigger null pointer dereference on accessing them. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 18 +++++++++++++----- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 3 ++- 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 9e090030bee6..88ff38242033 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index d8bb18445086..24644f320073 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -305,7 +305,8 @@ int smu_get_power_num_states(struct smu_context *smu, /* not support power state */ memset(state_info, 0, sizeof(struct pp_states_info)); - state_info->nums = 0; + state_info->nums = 1; + state_info->states[0] = POWER_STATE_TYPE_DEFAULT; return 0; } -- cgit v1.2.3 From 4d7fd9e20b0784b07777728316da5bcc13f9f2ab Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Jul 2019 10:12:42 +0800 Subject: drm/amd/powerplay: enable SW SMU reset functionality Move SMU irq handler register to sw_init as that's totally software related. Otherwise, it will prevent SMU reset working. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 24644f320073..e8dba0256457 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -743,6 +743,12 @@ static int smu_sw_init(void *handle) return ret; } + ret = smu_register_irq_handler(smu); + if (ret) { + pr_err("Failed to register smc irq handler!\n"); + return ret; + } + return 0; } @@ -752,6 +758,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + kfree(smu->irq_source); + smu->irq_source = NULL; + ret = smu_smc_table_sw_fini(smu); if (ret) { pr_err("Failed to sw fini smc table!\n"); @@ -1111,10 +1120,6 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - ret = smu_register_irq_handler(smu); - if (ret) - goto failed; - if (!smu->pm_enabled) adev->pm.dpm_enabled = false; else @@ -1144,9 +1149,6 @@ static int smu_hw_fini(void *handle) kfree(table_context->overdrive_table); table_context->overdrive_table = NULL; - kfree(smu->irq_source); - smu->irq_source = NULL; - ret = smu_fini_fb_allocations(smu); if (ret) return ret; -- cgit v1.2.3 From 0cf3c64f294c1a03e7e1d826c6c9d1b126216c02 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Jul 2019 23:27:21 -0500 Subject: drm/amdgpu: add an asic callback to determine the reset method Sometimes the driver may have to behave differently depending on the method we are using to reset the GPU. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ca8b325291a3..5cbed256f006 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -532,6 +532,14 @@ struct amdgpu_allowed_register_entry { bool grbm_indexed; }; +enum amd_reset_method { + AMD_RESET_METHOD_LEGACY = 0, + AMD_RESET_METHOD_MODE0, + AMD_RESET_METHOD_MODE1, + AMD_RESET_METHOD_MODE2, + AMD_RESET_METHOD_BACO +}; + /* * ASIC specific functions. */ @@ -543,6 +551,7 @@ struct amdgpu_asic_funcs { u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); int (*reset)(struct amdgpu_device *adev); + enum amd_reset_method (*reset_method)(struct amdgpu_device *adev); /* get the reference clock */ u32 (*get_xclk)(struct amdgpu_device *adev); /* MM block clocks */ @@ -1109,6 +1118,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); */ #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) +#define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) -- cgit v1.2.3 From dd81eede7703fd7349be84f1830d7a1339918e38 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Jul 2019 23:44:54 -0500 Subject: drm/amdgpu: add reset_method asic callback for si SI always uses the legacy pci based reset. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 4d74453f3cfb..f09930a416ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1186,6 +1186,12 @@ static int si_asic_reset(struct amdgpu_device *adev) return 0; } +static enum amd_reset_method +si_asic_reset_method(struct amdgpu_device *adev) +{ + return AMD_RESET_METHOD_LEGACY; +} + static u32 si_get_config_memsize(struct amdgpu_device *adev) { return RREG32(mmCONFIG_MEMSIZE); @@ -1394,6 +1400,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .read_bios_from_rom = &si_read_bios_from_rom, .read_register = &si_read_register, .reset = &si_asic_reset, + .reset_method = &si_asic_reset_method, .set_vga_state = &si_vga_set_state, .get_xclk = &si_get_xclk, .set_uvd_clocks = &si_set_uvd_clocks, -- cgit v1.2.3 From 6d0f50dafed4771121f674aef2db2c13ef526ad0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Jul 2019 23:45:39 -0500 Subject: drm/amdgpu: add reset_method asic callback for cik CIK always uses the legacy pci based reset. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 1ffbc0d3d7a1..b81bb414fcb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1291,6 +1291,12 @@ static int cik_asic_reset(struct amdgpu_device *adev) return r; } +static enum amd_reset_method +cik_asic_reset_method(struct amdgpu_device *adev) +{ + return AMD_RESET_METHOD_LEGACY; +} + static u32 cik_get_config_memsize(struct amdgpu_device *adev) { return RREG32(mmCONFIG_MEMSIZE); @@ -1823,6 +1829,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .read_bios_from_rom = &cik_read_bios_from_rom, .read_register = &cik_read_register, .reset = &cik_asic_reset, + .reset_method = &cik_asic_reset_method, .set_vga_state = &cik_vga_set_state, .get_xclk = &cik_get_xclk, .set_uvd_clocks = &cik_set_uvd_clocks, -- cgit v1.2.3 From 9bc1932f5ce5cb601278b8a37d36d7939f3e2482 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Jul 2019 23:46:12 -0500 Subject: drm/amdgpu: add reset_method asic callback for vi VI always uses the legacy pci based reset. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 6575ddcfcf00..5f8c8786cac5 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -711,6 +711,12 @@ static int vi_asic_reset(struct amdgpu_device *adev) return r; } +static enum amd_reset_method +vi_asic_reset_method(struct amdgpu_device *adev) +{ + return AMD_RESET_METHOD_LEGACY; +} + static u32 vi_get_config_memsize(struct amdgpu_device *adev) { return RREG32(mmCONFIG_MEMSIZE); @@ -1023,6 +1029,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .read_bios_from_rom = &vi_read_bios_from_rom, .read_register = &vi_read_register, .reset = &vi_asic_reset, + .reset_method = &vi_asic_reset_method, .set_vga_state = &vi_vga_set_state, .get_xclk = &vi_get_xclk, .set_uvd_clocks = &vi_set_uvd_clocks, -- cgit v1.2.3 From ee360c0b7cc3148ff8216286f53f8b3577b2b1a0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Jul 2019 23:47:06 -0500 Subject: drm/amdgpu: add reset_method asic callback for soc15 APUs only support mode2 reset. dGPUs use either mode1 or baco depending on various conditions. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index cd518f402b3f..30ba94296a66 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -465,12 +465,14 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) return 0; } -static int soc15_asic_reset(struct amdgpu_device *adev) +static enum amd_reset_method +soc15_asic_reset_method(struct amdgpu_device *adev) { - int ret; bool baco_reset; switch (adev->asic_type) { + case CHIP_RAVEN: + return AMD_RESET_METHOD_MODE2; case CHIP_VEGA10: case CHIP_VEGA12: soc15_asic_get_baco_capability(adev, &baco_reset); @@ -494,6 +496,16 @@ static int soc15_asic_reset(struct amdgpu_device *adev) } if (baco_reset) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_MODE1; +} + +static int soc15_asic_reset(struct amdgpu_device *adev) +{ + int ret; + + if (soc15_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ret = soc15_asic_baco_reset(adev); else ret = soc15_asic_mode1_reset(adev); @@ -807,6 +819,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .read_bios_from_rom = &soc15_read_bios_from_rom, .read_register = &soc15_read_register, .reset = &soc15_asic_reset, + .reset_method = &soc15_asic_reset_method, .set_vga_state = &soc15_vga_set_state, .get_xclk = &soc15_get_xclk, .set_uvd_clocks = &soc15_set_uvd_clocks, -- cgit v1.2.3 From 2ddc6c3ef9d5f22ad004aad2e28a32810fe9c407 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Jul 2019 23:48:21 -0500 Subject: drm/amdgpu: add reset_method asic callback for navi Navi uses either mode1 or baco depending on various conditions. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 10ec0e81ee58..e4885e2d281a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -289,6 +289,18 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) return ret; } + +static enum amd_reset_method +nv_asic_reset_method(struct amdgpu_device *adev) +{ + struct smu_context *smu = &adev->smu; + + if (smu_baco_is_support(smu)) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_MODE1; +} + static int nv_asic_reset(struct amdgpu_device *adev) { @@ -303,7 +315,7 @@ static int nv_asic_reset(struct amdgpu_device *adev) int ret = 0; struct smu_context *smu = &adev->smu; - if (smu_baco_is_support(smu)) + if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ret = smu_baco_reset(smu); else ret = nv_asic_mode1_reset(adev); @@ -500,6 +512,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .read_bios_from_rom = &nv_read_bios_from_rom, .read_register = &nv_read_register, .reset = &nv_asic_reset, + .reset_method = &nv_asic_reset_method, .set_vga_state = &nv_vga_set_state, .get_xclk = &nv_get_xclk, .set_uvd_clocks = &nv_set_uvd_clocks, -- cgit v1.2.3 From a2c28e34f8c42e35f5f2990d558d367152e63c27 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 09:41:53 -0500 Subject: drm/amdgpu/powerplay: add a new interface to set the mp1 state This is required for certain cases such as various GPU resets (mode1, mode2), BACO, shutdown, unload, etc. to put the SMU into the appropriate state for when the hw is re-initialized. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 8 ++++++++ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 3 files changed, 25 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9733bbf9bc72..95edc3d3a9c4 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -171,6 +171,13 @@ enum PP_HWMON_TEMP { PP_TEMP_MAX }; +enum pp_mp1_state { + PP_MP1_STATE_NONE, + PP_MP1_STATE_SHUTDOWN, + PP_MP1_STATE_UNLOAD, + PP_MP1_STATE_RESET, +}; + #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 @@ -266,6 +273,7 @@ struct amd_pm_funcs { int (*get_power_profile_mode)(void *handle, char *buf); int (*set_power_profile_mode)(void *handle, long *input, uint32_t size); int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size); + int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state); /* export to DC */ u32 (*get_sclk)(void *handle, bool low); u32 (*get_mclk)(void *handle, bool low); diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index bea1587d352d..88a2ef75b7e1 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -924,6 +924,21 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size); } +static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->set_mp1_state == NULL) { + pr_info_ratelimited("%s was not implemented.\n", __func__); + return -EINVAL; + } + + return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state); +} + static int pp_dpm_switch_power_profile(void *handle, enum PP_SMC_POWER_PROFILE type, bool en) { @@ -1525,6 +1540,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .get_power_profile_mode = pp_get_power_profile_mode, .set_power_profile_mode = pp_set_power_profile_mode, .odn_edit_dpm_table = pp_odn_edit_dpm_table, + .set_mp1_state = pp_dpm_set_mp1_state, .set_power_limit = pp_set_power_limit, .get_power_limit = pp_get_power_limit, /* export to DC */ diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index c5989cb38b1b..07fd64aad2ae 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -344,6 +344,7 @@ struct pp_hwmgr_func { int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks); + int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state); }; struct pp_table_func { -- cgit v1.2.3 From 479baeacd82841e44cd860d84fa66fe6240f1814 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 30 Jul 2019 21:27:03 -0500 Subject: drm/amdgpu/powerplay: return success if set_mp1_state is not set Some asics (APUs) don't have this callback so we want to return success. Avoids spurious error messages on APUs. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 88a2ef75b7e1..2e3d9ef625bf 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -931,12 +931,10 @@ static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state) if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - if (hwmgr->hwmgr_func->set_mp1_state == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return -EINVAL; - } + if (hwmgr->hwmgr_func->set_mp1_state) + return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state); - return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state); + return 0; } static int pp_dpm_switch_power_profile(void *handle, -- cgit v1.2.3 From a906277d22f790418e68a2ad0feba6617b165d40 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 10:43:28 -0500 Subject: drm/amdgpu/powerplay: add set_mp1_state for vega20 This sets the SMU into the proper state for various operations (shutdown, unload, GPU reset, etc.). Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | 29 ++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f27c6fbb192e..0516c294b377 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -3063,6 +3063,34 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return 0; } +static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr, + enum pp_mp1_state mp1_state) +{ + uint16_t msg; + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_SHUTDOWN: + msg = PPSMC_MSG_PrepareMp1ForShutdown; + break; + case PP_MP1_STATE_UNLOAD: + msg = PPSMC_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_RESET: + msg = PPSMC_MSG_PrepareMp1ForReset; + break; + case PP_MP1_STATE_NONE: + default: + return 0; + } + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, + "[PrepareMp1] Failed!", + return ret); + + return 0; +} + static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) { static const char *ppfeature_name[] = { @@ -4123,6 +4151,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .get_asic_baco_capability = vega20_baco_get_capability, .get_asic_baco_state = vega20_baco_get_state, .set_asic_baco_state = vega20_baco_set_state, + .set_mp1_state = vega20_set_mp1_state, }; int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) -- cgit v1.2.3 From e254102d5079f83376b6b6b227e546bcde89683c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 10:55:25 -0500 Subject: drm/amdgpu/powerplay: add set_mp1_state for vega10 This sets the SMU into the proper state for various operations (shutdown, unload, GPU reset, etc.). Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 3be8eb21fd6e..948c54cb9c5d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -5219,6 +5219,30 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return 0; } +static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr, + enum pp_mp1_state mp1_state) +{ + uint16_t msg; + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + msg = PPSMC_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_SHUTDOWN: + case PP_MP1_STATE_RESET: + case PP_MP1_STATE_NONE: + default: + return 0; + } + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, + "[PrepareMp1] Failed!", + return ret); + + return 0; +} + static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, PHM_PerformanceLevelDesignation designation, uint32_t index, PHM_PerformanceLevel *level) @@ -5308,6 +5332,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost, .get_ppfeature_status = vega10_get_ppfeature_status, .set_ppfeature_status = vega10_set_ppfeature_status, + .set_mp1_state = vega10_set_mp1_state, }; int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) -- cgit v1.2.3 From 1c074a63834e7414031f33c23efd87bbb8c25cea Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 10:55:57 -0500 Subject: drm/amdgpu/powerplay: add set_mp1_state for vega12 This sets the SMU into the proper state for various operations (shutdown, unload, GPU reset, etc.). Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 26 +++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index efb6d3762feb..7af9ad450ac4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -2639,6 +2639,30 @@ static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ return 0; } +static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr, + enum pp_mp1_state mp1_state) +{ + uint16_t msg; + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + msg = PPSMC_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_SHUTDOWN: + case PP_MP1_STATE_RESET: + case PP_MP1_STATE_NONE: + default: + return 0; + } + + PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0, + "[PrepareMp1] Failed!", + return ret); + + return 0; +} + static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .backend_init = vega12_hwmgr_backend_init, .backend_fini = vega12_hwmgr_backend_fini, @@ -2695,7 +2719,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .set_asic_baco_state = vega12_baco_set_state, .get_ppfeature_status = vega12_get_ppfeature_status, .set_ppfeature_status = vega12_set_ppfeature_status, - + .set_mp1_state = vega12_set_mp1_state, }; int vega12_hwmgr_init(struct pp_hwmgr *hwmgr) -- cgit v1.2.3 From a3a09142f43c456fff7ddbc0ac867af8979a368c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 11:44:59 -0500 Subject: drm/amdgpu: put the SMC into the proper state on reset/unload When doing a GPU reset or unloading the driver, we need to put the SMU into the apprpriate state for the re-init after the reset or unload to reliably work. I don't think this is necessary for BACO because the SMU actually controls the BACO state to it needs to be active. For suspend (S3), the asic is put into D3 so the SMU would be powered down so I don't think we need to put the SMU into any special state. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 27 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 ++ 3 files changed, 30 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5cbed256f006..c87dfdb8aedb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -990,6 +990,7 @@ struct amdgpu_device { /* record last mm index being written through WREG32*/ unsigned long last_mm_index; bool in_gpu_reset; + enum pp_mp1_state mp1_state; struct mutex lock_reset; struct amdgpu_doorbell_index doorbell_index; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 14a9169446f5..2081649f49ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2175,6 +2175,21 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); } + /* handle putting the SMC in the appropriate state */ + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + if (is_support_sw_smu(adev)) { + /* todo */ + } else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_mp1_state) { + r = adev->powerplay.pp_funcs->set_mp1_state( + adev->powerplay.pp_handle, + adev->mp1_state); + if (r) { + DRM_ERROR("SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); + } + } + } } return 0; @@ -3640,6 +3655,17 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) atomic_inc(&adev->gpu_reset_counter); adev->in_gpu_reset = 1; + switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_MODE1: + adev->mp1_state = PP_MP1_STATE_SHUTDOWN; + break; + case AMD_RESET_METHOD_MODE2: + adev->mp1_state = PP_MP1_STATE_RESET; + break; + default: + adev->mp1_state = PP_MP1_STATE_NONE; + break; + } /* Block kfd: SRIOV would do it separately */ if (!amdgpu_sriov_vf(adev)) amdgpu_amdkfd_pre_reset(adev); @@ -3653,6 +3679,7 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) amdgpu_amdkfd_post_reset(adev); amdgpu_vf_error_trans_all(adev); + adev->mp1_state = PP_MP1_STATE_NONE; adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 56f807757d2c..e3e09e6d7f42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1096,7 +1096,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) * unfortunately we can't detect certain * hypervisors so just do this all the time. */ + adev->mp1_state = PP_MP1_STATE_UNLOAD; amdgpu_device_ip_suspend(adev); + adev->mp1_state = PP_MP1_STATE_NONE; } static int amdgpu_pmops_suspend(struct device *dev) -- cgit v1.2.3 From 354e6e14ef947f07055d3570b4bd7a33196b57f6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 22:28:58 -0500 Subject: drm/amdgpu/powerplay: use proper revision id for navi The PCI revision id determines the sku. Reviewed-by: Feifei Xu Reviewed-by: Kevin Wang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 502067c354d2..bdb9be75c0be 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -23,6 +23,7 @@ #include "pp_debug.h" #include +#include #include "amdgpu.h" #include "amdgpu_smu.h" #include "atomfirmware.h" @@ -1592,7 +1593,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t uclk_level = 0; - switch (adev->rev_id) { + switch (adev->pdev->revision) { case 0xf0: /* XTX */ case 0xc0: sclk_freq = NAVI10_PEAK_SCLK_XTX; -- cgit v1.2.3 From f2bd8a0ed7e79579d61cea01bab2dfb09099d379 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 26 Jul 2019 09:24:35 -0400 Subject: drm/amdgpu: Fix amdgpu_display_supported_domains logic. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add restriction to dissallow GTT domain if the relevant BO doesn't have USWC flag set to avoid the APU hang scenario. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 16 +++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_display.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 12 ++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 6 files changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 8b06150080aa..dc65592f41b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -191,7 +191,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, } if (!adev->enable_virtual_display) { - r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev)); + r = amdgpu_bo_pin(new_abo, + amdgpu_display_supported_domains(adev, new_abo->flags)); if (unlikely(r != 0)) { DRM_ERROR("failed to pin new abo buffer before flip\n"); goto unreserve; @@ -495,20 +496,25 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { .create_handle = drm_gem_fb_create_handle, }; -uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev) +uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, + uint64_t bo_flags) { uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; #if defined(CONFIG_DRM_AMD_DC) /* - * if amdgpu_bo_validate_uswc returns false it means that USWC mappings + * if amdgpu_bo_support_uswc returns false it means that USWC mappings * is not supported for this board. But this mapping is required * to avoid hang caused by placement of scanout BO in GTT on certain * APUs. So force the BO placement to VRAM in case this architecture * will not allow USWC mappings. + * Also, don't allow GTT domain if the BO doens't have USWC falg set. */ - if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type <= CHIP_RAVEN && - adev->flags & AMD_IS_APU && amdgpu_bo_support_uswc(0) && + if (adev->asic_type >= CHIP_CARRIZO && + adev->asic_type <= CHIP_RAVEN && + (adev->flags & AMD_IS_APU) && + (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) && + amdgpu_bo_support_uswc(bo_flags) && amdgpu_device_asic_has_dc_support(adev->asic_type)) domain |= AMDGPU_GEM_DOMAIN_GTT; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index 06b922fe0d42..3620b24785e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -38,7 +38,8 @@ int amdgpu_display_freesync_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); void amdgpu_display_update_priority(struct amdgpu_device *adev); -uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev); +uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, + uint64_t bo_flags); struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 489041df1f45..974472fdfc6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -299,7 +299,7 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_operation_ctx ctx = { true, false }; - u32 domain = amdgpu_display_supported_domains(adev); + u32 domain = amdgpu_display_supported_domains(adev, bo->flags); int ret; bool reads = (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 430c56f9544a..143753d237e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -131,6 +131,10 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, int aligned_size, size; int height = mode_cmd->height; u32 cpp; + u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + AMDGPU_GEM_CREATE_VRAM_CLEARED | + AMDGPU_GEM_CREATE_CPU_GTT_USWC; info = drm_get_format_info(adev->ddev, mode_cmd); cpp = info->cpp[0]; @@ -138,15 +142,11 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, /* need to align pitch with crtc limits */ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, fb_tiled); - domain = amdgpu_display_supported_domains(adev); + domain = amdgpu_display_supported_domains(adev, flags); height = ALIGN(mode_cmd->height, 8); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); - ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC, + ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, ttm_bo_type_kernel, NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index fb291366d5ad..ddb07e9a71ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -765,7 +765,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); domain = amdgpu_bo_get_preferred_pin_domain(adev, - amdgpu_display_supported_domains(adev)); + amdgpu_display_supported_domains(adev, flags)); r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, ttm_bo_type_device, NULL, &gobj); if (r) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 356d77387c42..94438117bcda 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4454,7 +4454,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, } if (plane->type != DRM_PLANE_TYPE_CURSOR) - domain = amdgpu_display_supported_domains(adev); + domain = amdgpu_display_supported_domains(adev, rbo->flags); else domain = AMDGPU_GEM_DOMAIN_VRAM; -- cgit v1.2.3 From d3b9f39d8417ee2f2cd87b5e5410015ce6f78491 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 26 Jul 2019 11:04:39 -0500 Subject: drm/amdgpu/display: fix the build without CONFIG_DRM_AMD_DC_DSC_SUPPORT Some code was missing the CONFIG_DRM_AMD_DC_DSC_SUPPORT guard. Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 69e4d0d96c7f..38b3c89b2a59 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -630,9 +630,11 @@ static void dcn20_init_hw(struct dc *dc) } } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Power gate DSCs */ for (i = 0; i < res_pool->res_cap->num_dsc; i++) dcn20_dsc_pg_control(hws, res_pool->dscs[i]->inst, false); +#endif /* Blank pixel data with OPP DPG */ for (i = 0; i < dc->res_pool->timing_generator_count; i++) { -- cgit v1.2.3 From 333fe325febabe3df2bc3019d4b97f879d8cef73 Mon Sep 17 00:00:00 2001 From: Thong Thai Date: Thu, 25 Jul 2019 11:21:58 -0400 Subject: drm/amd/amdgpu/vcn_v2_0: Mark RB commands as KMD commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sets the CMD_SOURCE bit for VCN 2.0 Decoder Ring Buffer commands. This bit was previously set by the RBC HW on older firmware. Newer firmware uses a SW RBC and this bit has to be set by the driver. Signed-off-by: Thong Thai Reviewed-by: Leo Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 + drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 38f0d53a6381..dface275c81a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -35,6 +35,7 @@ #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) +#define VCN_DEC_KMD_CMD 0x80000000 #define VCN_DEC_CMD_FENCE 0x00000000 #define VCN_DEC_CMD_TRAP 0x00000001 #define VCN_DEC_CMD_WRITE_REG 0x00000004 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index a022e47f2a1d..80bb49736ee4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1494,7 +1494,7 @@ void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1)); } /** @@ -1509,7 +1509,7 @@ void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1)); } /** @@ -1556,7 +1556,7 @@ void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1)); amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.data0, 0)); amdgpu_ring_write(ring, 0); @@ -1566,7 +1566,7 @@ void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1)); } /** @@ -1612,7 +1612,7 @@ void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1)); } void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, @@ -1643,7 +1643,7 @@ void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1)); } /** -- cgit v1.2.3 From 2c8973180392d1835d07eafd361e821f4aa28dfa Mon Sep 17 00:00:00 2001 From: Joseph Greathouse Date: Fri, 26 Jul 2019 15:52:05 -0500 Subject: drm/amdgpu: Default disable GDS for compute+gfx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Units in the GDS block default to allowing all VMIDs access to all entries. Disable shader access to the GDS, GWS, and OA blocks from all compute and gfx VMIDs by default. For compute, HWS firmware will set up the access bits for the appropriate VMID when a compute queue requires access to these blocks. The driver will handle enabling access on-demand for graphics VMIDs. Leaving VMID0 with full access because otherwise HWS cannot save or restore values during task switch. v2: Fixed code and comment styling. Signed-off-by: Joseph Greathouse Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 25 ++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 24 +++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 24 +++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 24 +++++++++++++++++------- 4 files changed, 69 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 98d6cb9e9543..ef9f552b7001 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1516,17 +1516,27 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev) } nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); +} - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { - WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); +static void gfx_v10_0_init_gds_vmid(struct amdgpu_device *adev) +{ + int vmid; + + /* + * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA + * access. Compute VMIDs should be enabled by FW for target VMIDs, + * the driver can enable them for graphics. VMID0 should maintain + * access so that HWS firmware can save/restore entries. + */ + for (vmid = 1; vmid < 16; vmid++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0); } } + static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) { int i, j, k; @@ -1629,6 +1639,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v10_0_init_compute_vmid(adev); + gfx_v10_0_init_gds_vmid(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 21187275dfd3..53eee129b168 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1879,14 +1879,23 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) } cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); +} - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { - WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); - WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); - WREG32(amdgpu_gds_reg_offset[i].gws, 0); - WREG32(amdgpu_gds_reg_offset[i].oa, 0); +static void gfx_v7_0_init_gds_vmid(struct amdgpu_device *adev) +{ + int vmid; + + /* + * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA + * access. Compute VMIDs should be enabled by FW for target VMIDs, + * the driver can enable them for graphics. VMID0 should maintain + * access so that HWS firmware can save/restore entries. + */ + for (vmid = 1; vmid < 16; vmid++) { + WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[vmid].gws, 0); + WREG32(amdgpu_gds_reg_offset[vmid].oa, 0); } } @@ -1968,6 +1977,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v7_0_init_compute_vmid(adev); + gfx_v7_0_init_gds_vmid(adev); WREG32(mmSX_DEBUG_1, 0x20); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 751567f78567..249526317f81 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3706,14 +3706,23 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev) } vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); +} - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { - WREG32(amdgpu_gds_reg_offset[i].mem_base, 0); - WREG32(amdgpu_gds_reg_offset[i].mem_size, 0); - WREG32(amdgpu_gds_reg_offset[i].gws, 0); - WREG32(amdgpu_gds_reg_offset[i].oa, 0); +static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev) +{ + int vmid; + + /* + * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA + * access. Compute VMIDs should be enabled by FW for target VMIDs, + * the driver can enable them for graphics. VMID0 should maintain + * access so that HWS firmware can save/restore entries. + */ + for (vmid = 1; vmid < 16; vmid++) { + WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0); + WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0); + WREG32(amdgpu_gds_reg_offset[vmid].gws, 0); + WREG32(amdgpu_gds_reg_offset[vmid].oa, 0); } } @@ -3783,6 +3792,7 @@ static void gfx_v8_0_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v8_0_init_compute_vmid(adev); + gfx_v8_0_init_gds_vmid(adev); mutex_lock(&adev->grbm_idx_mutex); /* diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f0a7f85990bc..65be73eb02d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2029,14 +2029,23 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) } soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); +} - /* Initialize all compute VMIDs to have no GDS, GWS, or OA - acccess. These should be enabled by FW for target VMIDs. */ - for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { - WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0); - WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0); +static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev) +{ + int vmid; + + /* + * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA + * access. Compute VMIDs should be enabled by FW for target VMIDs, + * the driver can enable them for graphics. VMID0 should maintain + * access so that HWS firmware can save/restore entries. + */ + for (vmid = 1; vmid < 16; vmid++) { + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0); + WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0); } } @@ -2084,6 +2093,7 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); gfx_v9_0_init_compute_vmid(adev); + gfx_v9_0_init_gds_vmid(adev); } static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) -- cgit v1.2.3 From 0ba5eda81a82719c229c819929e8297559522429 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Thu, 25 Jul 2019 09:59:46 +0800 Subject: drm/amd/powerplay: move smu types to smu_types.h move some enum type (message, feature, clock) to smu_types.h. these types is too long in amdgpu_smu.h, and not clearly. Signed-off-by: Kevin Wang Reviewed-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 186 +--------------------- drivers/gpu/drm/amd/powerplay/inc/smu_types.h | 210 +++++++++++++++++++++++++ 2 files changed, 211 insertions(+), 185 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu_types.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 33d2d75ba903..397040a4d1b4 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -26,6 +26,7 @@ #include "kgd_pp_interface.h" #include "dm_pp_interface.h" #include "dm_pp_smu.h" +#include "smu_types.h" #define SMU_THERMAL_MINIMUM_ALERT_TEMP 0 #define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255 @@ -150,134 +151,6 @@ struct smu_power_state { struct smu_hw_power_state hardware; }; -enum smu_message_type -{ - SMU_MSG_TestMessage = 0, - SMU_MSG_GetSmuVersion, - SMU_MSG_GetDriverIfVersion, - SMU_MSG_SetAllowedFeaturesMaskLow, - SMU_MSG_SetAllowedFeaturesMaskHigh, - SMU_MSG_EnableAllSmuFeatures, - SMU_MSG_DisableAllSmuFeatures, - SMU_MSG_EnableSmuFeaturesLow, - SMU_MSG_EnableSmuFeaturesHigh, - SMU_MSG_DisableSmuFeaturesLow, - SMU_MSG_DisableSmuFeaturesHigh, - SMU_MSG_GetEnabledSmuFeaturesLow, - SMU_MSG_GetEnabledSmuFeaturesHigh, - SMU_MSG_SetWorkloadMask, - SMU_MSG_SetPptLimit, - SMU_MSG_SetDriverDramAddrHigh, - SMU_MSG_SetDriverDramAddrLow, - SMU_MSG_SetToolsDramAddrHigh, - SMU_MSG_SetToolsDramAddrLow, - SMU_MSG_TransferTableSmu2Dram, - SMU_MSG_TransferTableDram2Smu, - SMU_MSG_UseDefaultPPTable, - SMU_MSG_UseBackupPPTable, - SMU_MSG_RunBtc, - SMU_MSG_RequestI2CBus, - SMU_MSG_ReleaseI2CBus, - SMU_MSG_SetFloorSocVoltage, - SMU_MSG_SoftReset, - SMU_MSG_StartBacoMonitor, - SMU_MSG_CancelBacoMonitor, - SMU_MSG_EnterBaco, - SMU_MSG_SetSoftMinByFreq, - SMU_MSG_SetSoftMaxByFreq, - SMU_MSG_SetHardMinByFreq, - SMU_MSG_SetHardMaxByFreq, - SMU_MSG_GetMinDpmFreq, - SMU_MSG_GetMaxDpmFreq, - SMU_MSG_GetDpmFreqByIndex, - SMU_MSG_GetDpmClockFreq, - SMU_MSG_GetSsVoltageByDpm, - SMU_MSG_SetMemoryChannelConfig, - SMU_MSG_SetGeminiMode, - SMU_MSG_SetGeminiApertureHigh, - SMU_MSG_SetGeminiApertureLow, - SMU_MSG_SetMinLinkDpmByIndex, - SMU_MSG_OverridePcieParameters, - SMU_MSG_OverDriveSetPercentage, - SMU_MSG_SetMinDeepSleepDcefclk, - SMU_MSG_ReenableAcDcInterrupt, - SMU_MSG_NotifyPowerSource, - SMU_MSG_SetUclkFastSwitch, - SMU_MSG_SetUclkDownHyst, - SMU_MSG_GfxDeviceDriverReset, - SMU_MSG_GetCurrentRpm, - SMU_MSG_SetVideoFps, - SMU_MSG_SetTjMax, - SMU_MSG_SetFanTemperatureTarget, - SMU_MSG_PrepareMp1ForUnload, - SMU_MSG_DramLogSetDramAddrHigh, - SMU_MSG_DramLogSetDramAddrLow, - SMU_MSG_DramLogSetDramSize, - SMU_MSG_SetFanMaxRpm, - SMU_MSG_SetFanMinPwm, - SMU_MSG_ConfigureGfxDidt, - SMU_MSG_NumOfDisplays, - SMU_MSG_RemoveMargins, - SMU_MSG_ReadSerialNumTop32, - SMU_MSG_ReadSerialNumBottom32, - SMU_MSG_SetSystemVirtualDramAddrHigh, - SMU_MSG_SetSystemVirtualDramAddrLow, - SMU_MSG_WaflTest, - SMU_MSG_SetFclkGfxClkRatio, - SMU_MSG_AllowGfxOff, - SMU_MSG_DisallowGfxOff, - SMU_MSG_GetPptLimit, - SMU_MSG_GetDcModeMaxDpmFreq, - SMU_MSG_GetDebugData, - SMU_MSG_SetXgmiMode, - SMU_MSG_RunAfllBtc, - SMU_MSG_ExitBaco, - SMU_MSG_PrepareMp1ForReset, - SMU_MSG_PrepareMp1ForShutdown, - SMU_MSG_SetMGpuFanBoostLimitRpm, - SMU_MSG_GetAVFSVoltageByDpm, - SMU_MSG_PowerUpVcn, - SMU_MSG_PowerDownVcn, - SMU_MSG_PowerUpJpeg, - SMU_MSG_PowerDownJpeg, - SMU_MSG_BacoAudioD3PME, - SMU_MSG_ArmD3, - SMU_MSG_RunGfxDcBtc, - SMU_MSG_RunSocDcBtc, - SMU_MSG_SetMemoryChannelEnable, - SMU_MSG_SetDfSwitchType, - SMU_MSG_GetVoltageByDpm, - SMU_MSG_GetVoltageByDpmOverdrive, - SMU_MSG_PowerUpVcn0, - SMU_MSG_PowerDownVcn01, - SMU_MSG_PowerUpVcn1, - SMU_MSG_PowerDownVcn1, - SMU_MSG_MAX_COUNT, -}; - -enum smu_clk_type -{ - SMU_GFXCLK, - SMU_VCLK, - SMU_DCLK, - SMU_ECLK, - SMU_SOCCLK, - SMU_UCLK, - SMU_DCEFCLK, - SMU_DISPCLK, - SMU_PIXCLK, - SMU_PHYCLK, - SMU_FCLK, - SMU_SCLK, - SMU_MCLK, - SMU_PCIE, - SMU_OD_SCLK, - SMU_OD_MCLK, - SMU_OD_VDDC_CURVE, - SMU_OD_RANGE, - SMU_CLK_COUNT, -}; - enum smu_power_src_type { SMU_POWER_SOURCE_AC, @@ -285,63 +158,6 @@ enum smu_power_src_type SMU_POWER_SOURCE_COUNT, }; -enum smu_feature_mask -{ - SMU_FEATURE_DPM_PREFETCHER_BIT, - SMU_FEATURE_DPM_GFXCLK_BIT, - SMU_FEATURE_DPM_UCLK_BIT, - SMU_FEATURE_DPM_SOCCLK_BIT, - SMU_FEATURE_DPM_UVD_BIT, - SMU_FEATURE_DPM_VCE_BIT, - SMU_FEATURE_ULV_BIT, - SMU_FEATURE_DPM_MP0CLK_BIT, - SMU_FEATURE_DPM_LINK_BIT, - SMU_FEATURE_DPM_DCEFCLK_BIT, - SMU_FEATURE_DS_GFXCLK_BIT, - SMU_FEATURE_DS_SOCCLK_BIT, - SMU_FEATURE_DS_LCLK_BIT, - SMU_FEATURE_PPT_BIT, - SMU_FEATURE_TDC_BIT, - SMU_FEATURE_THERMAL_BIT, - SMU_FEATURE_GFX_PER_CU_CG_BIT, - SMU_FEATURE_RM_BIT, - SMU_FEATURE_DS_DCEFCLK_BIT, - SMU_FEATURE_ACDC_BIT, - SMU_FEATURE_VR0HOT_BIT, - SMU_FEATURE_VR1HOT_BIT, - SMU_FEATURE_FW_CTF_BIT, - SMU_FEATURE_LED_DISPLAY_BIT, - SMU_FEATURE_FAN_CONTROL_BIT, - SMU_FEATURE_GFX_EDC_BIT, - SMU_FEATURE_GFXOFF_BIT, - SMU_FEATURE_CG_BIT, - SMU_FEATURE_DPM_FCLK_BIT, - SMU_FEATURE_DS_FCLK_BIT, - SMU_FEATURE_DS_MP1CLK_BIT, - SMU_FEATURE_DS_MP0CLK_BIT, - SMU_FEATURE_XGMI_BIT, - SMU_FEATURE_DPM_GFX_PACE_BIT, - SMU_FEATURE_MEM_VDDCI_SCALING_BIT, - SMU_FEATURE_MEM_MVDD_SCALING_BIT, - SMU_FEATURE_DS_UCLK_BIT, - SMU_FEATURE_GFX_ULV_BIT, - SMU_FEATURE_FW_DSTATE_BIT, - SMU_FEATURE_BACO_BIT, - SMU_FEATURE_VCN_PG_BIT, - SMU_FEATURE_JPEG_PG_BIT, - SMU_FEATURE_USB_PG_BIT, - SMU_FEATURE_RSMU_SMN_CG_BIT, - SMU_FEATURE_APCC_PLUS_BIT, - SMU_FEATURE_GTHR_BIT, - SMU_FEATURE_GFX_DCS_BIT, - SMU_FEATURE_GFX_SS_BIT, - SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, - SMU_FEATURE_TEMP_DEPENDENT_VMIN_BIT, - SMU_FEATURE_MMHUB_PG_BIT, - SMU_FEATURE_ATHUB_PG_BIT, - SMU_FEATURE_COUNT, -}; - enum smu_memory_pool_size { SMU_MEMORY_POOL_SIZE_ZERO = 0, diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h new file mode 100644 index 000000000000..29d14c162417 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -0,0 +1,210 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __SMU_TYPES_H__ +#define __SMU_TYPES_H__ + + +enum smu_message_type { + SMU_MSG_TestMessage = 0, + SMU_MSG_GetSmuVersion, + SMU_MSG_GetDriverIfVersion, + SMU_MSG_SetAllowedFeaturesMaskLow, + SMU_MSG_SetAllowedFeaturesMaskHigh, + SMU_MSG_EnableAllSmuFeatures, + SMU_MSG_DisableAllSmuFeatures, + SMU_MSG_EnableSmuFeaturesLow, + SMU_MSG_EnableSmuFeaturesHigh, + SMU_MSG_DisableSmuFeaturesLow, + SMU_MSG_DisableSmuFeaturesHigh, + SMU_MSG_GetEnabledSmuFeaturesLow, + SMU_MSG_GetEnabledSmuFeaturesHigh, + SMU_MSG_SetWorkloadMask, + SMU_MSG_SetPptLimit, + SMU_MSG_SetDriverDramAddrHigh, + SMU_MSG_SetDriverDramAddrLow, + SMU_MSG_SetToolsDramAddrHigh, + SMU_MSG_SetToolsDramAddrLow, + SMU_MSG_TransferTableSmu2Dram, + SMU_MSG_TransferTableDram2Smu, + SMU_MSG_UseDefaultPPTable, + SMU_MSG_UseBackupPPTable, + SMU_MSG_RunBtc, + SMU_MSG_RequestI2CBus, + SMU_MSG_ReleaseI2CBus, + SMU_MSG_SetFloorSocVoltage, + SMU_MSG_SoftReset, + SMU_MSG_StartBacoMonitor, + SMU_MSG_CancelBacoMonitor, + SMU_MSG_EnterBaco, + SMU_MSG_SetSoftMinByFreq, + SMU_MSG_SetSoftMaxByFreq, + SMU_MSG_SetHardMinByFreq, + SMU_MSG_SetHardMaxByFreq, + SMU_MSG_GetMinDpmFreq, + SMU_MSG_GetMaxDpmFreq, + SMU_MSG_GetDpmFreqByIndex, + SMU_MSG_GetDpmClockFreq, + SMU_MSG_GetSsVoltageByDpm, + SMU_MSG_SetMemoryChannelConfig, + SMU_MSG_SetGeminiMode, + SMU_MSG_SetGeminiApertureHigh, + SMU_MSG_SetGeminiApertureLow, + SMU_MSG_SetMinLinkDpmByIndex, + SMU_MSG_OverridePcieParameters, + SMU_MSG_OverDriveSetPercentage, + SMU_MSG_SetMinDeepSleepDcefclk, + SMU_MSG_ReenableAcDcInterrupt, + SMU_MSG_NotifyPowerSource, + SMU_MSG_SetUclkFastSwitch, + SMU_MSG_SetUclkDownHyst, + SMU_MSG_GfxDeviceDriverReset, + SMU_MSG_GetCurrentRpm, + SMU_MSG_SetVideoFps, + SMU_MSG_SetTjMax, + SMU_MSG_SetFanTemperatureTarget, + SMU_MSG_PrepareMp1ForUnload, + SMU_MSG_DramLogSetDramAddrHigh, + SMU_MSG_DramLogSetDramAddrLow, + SMU_MSG_DramLogSetDramSize, + SMU_MSG_SetFanMaxRpm, + SMU_MSG_SetFanMinPwm, + SMU_MSG_ConfigureGfxDidt, + SMU_MSG_NumOfDisplays, + SMU_MSG_RemoveMargins, + SMU_MSG_ReadSerialNumTop32, + SMU_MSG_ReadSerialNumBottom32, + SMU_MSG_SetSystemVirtualDramAddrHigh, + SMU_MSG_SetSystemVirtualDramAddrLow, + SMU_MSG_WaflTest, + SMU_MSG_SetFclkGfxClkRatio, + SMU_MSG_AllowGfxOff, + SMU_MSG_DisallowGfxOff, + SMU_MSG_GetPptLimit, + SMU_MSG_GetDcModeMaxDpmFreq, + SMU_MSG_GetDebugData, + SMU_MSG_SetXgmiMode, + SMU_MSG_RunAfllBtc, + SMU_MSG_ExitBaco, + SMU_MSG_PrepareMp1ForReset, + SMU_MSG_PrepareMp1ForShutdown, + SMU_MSG_SetMGpuFanBoostLimitRpm, + SMU_MSG_GetAVFSVoltageByDpm, + SMU_MSG_PowerUpVcn, + SMU_MSG_PowerDownVcn, + SMU_MSG_PowerUpJpeg, + SMU_MSG_PowerDownJpeg, + SMU_MSG_BacoAudioD3PME, + SMU_MSG_ArmD3, + SMU_MSG_RunGfxDcBtc, + SMU_MSG_RunSocDcBtc, + SMU_MSG_SetMemoryChannelEnable, + SMU_MSG_SetDfSwitchType, + SMU_MSG_GetVoltageByDpm, + SMU_MSG_GetVoltageByDpmOverdrive, + SMU_MSG_PowerUpVcn0, + SMU_MSG_PowerDownVcn01, + SMU_MSG_PowerUpVcn1, + SMU_MSG_PowerDownVcn1, + SMU_MSG_MAX_COUNT, +}; + +enum smu_clk_type { + SMU_GFXCLK, + SMU_VCLK, + SMU_DCLK, + SMU_ECLK, + SMU_SOCCLK, + SMU_UCLK, + SMU_DCEFCLK, + SMU_DISPCLK, + SMU_PIXCLK, + SMU_PHYCLK, + SMU_FCLK, + SMU_SCLK, + SMU_MCLK, + SMU_PCIE, + SMU_OD_SCLK, + SMU_OD_MCLK, + SMU_OD_VDDC_CURVE, + SMU_OD_RANGE, + SMU_CLK_COUNT, +}; + +enum smu_feature_mask { + SMU_FEATURE_DPM_PREFETCHER_BIT, + SMU_FEATURE_DPM_GFXCLK_BIT, + SMU_FEATURE_DPM_UCLK_BIT, + SMU_FEATURE_DPM_SOCCLK_BIT, + SMU_FEATURE_DPM_UVD_BIT, + SMU_FEATURE_DPM_VCE_BIT, + SMU_FEATURE_ULV_BIT, + SMU_FEATURE_DPM_MP0CLK_BIT, + SMU_FEATURE_DPM_LINK_BIT, + SMU_FEATURE_DPM_DCEFCLK_BIT, + SMU_FEATURE_DS_GFXCLK_BIT, + SMU_FEATURE_DS_SOCCLK_BIT, + SMU_FEATURE_DS_LCLK_BIT, + SMU_FEATURE_PPT_BIT, + SMU_FEATURE_TDC_BIT, + SMU_FEATURE_THERMAL_BIT, + SMU_FEATURE_GFX_PER_CU_CG_BIT, + SMU_FEATURE_RM_BIT, + SMU_FEATURE_DS_DCEFCLK_BIT, + SMU_FEATURE_ACDC_BIT, + SMU_FEATURE_VR0HOT_BIT, + SMU_FEATURE_VR1HOT_BIT, + SMU_FEATURE_FW_CTF_BIT, + SMU_FEATURE_LED_DISPLAY_BIT, + SMU_FEATURE_FAN_CONTROL_BIT, + SMU_FEATURE_GFX_EDC_BIT, + SMU_FEATURE_GFXOFF_BIT, + SMU_FEATURE_CG_BIT, + SMU_FEATURE_DPM_FCLK_BIT, + SMU_FEATURE_DS_FCLK_BIT, + SMU_FEATURE_DS_MP1CLK_BIT, + SMU_FEATURE_DS_MP0CLK_BIT, + SMU_FEATURE_XGMI_BIT, + SMU_FEATURE_DPM_GFX_PACE_BIT, + SMU_FEATURE_MEM_VDDCI_SCALING_BIT, + SMU_FEATURE_MEM_MVDD_SCALING_BIT, + SMU_FEATURE_DS_UCLK_BIT, + SMU_FEATURE_GFX_ULV_BIT, + SMU_FEATURE_FW_DSTATE_BIT, + SMU_FEATURE_BACO_BIT, + SMU_FEATURE_VCN_PG_BIT, + SMU_FEATURE_JPEG_PG_BIT, + SMU_FEATURE_USB_PG_BIT, + SMU_FEATURE_RSMU_SMN_CG_BIT, + SMU_FEATURE_APCC_PLUS_BIT, + SMU_FEATURE_GTHR_BIT, + SMU_FEATURE_GFX_DCS_BIT, + SMU_FEATURE_GFX_SS_BIT, + SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, + SMU_FEATURE_TEMP_DEPENDENT_VMIN_BIT, + SMU_FEATURE_MMHUB_PG_BIT, + SMU_FEATURE_ATHUB_PG_BIT, + SMU_FEATURE_COUNT, +}; + +#endif + -- cgit v1.2.3 From 6b294793e3846988ca1234c324ae256b25cee9cf Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Thu, 25 Jul 2019 10:32:48 +0800 Subject: drm/amd/powerplay: add smu message name support add smu_get_message_name support in smu. Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 13 ++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 + drivers/gpu/drm/amd/powerplay/inc/smu_types.h | 205 +++++++++++++------------ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 12 +- 4 files changed, 124 insertions(+), 107 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index e8dba0256457..fb967c6d5046 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -30,6 +30,19 @@ #include "atom.h" #include "amd_pcie.h" +#undef __SMU_DUMMY_MAP +#define __SMU_DUMMY_MAP(type) #type +static const char* __smu_message_names[] = { + SMU_MESSAGE_TYPES +}; + +const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) +{ + if (type < 0 || type > SMU_MSG_MAX_COUNT) + return "unknow smu message"; + return __smu_message_names[type]; +} + int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) { int ret = 0; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 397040a4d1b4..035f857922ec 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -804,5 +804,6 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); +const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index 29d14c162417..d42e3424e704 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -23,108 +23,112 @@ #ifndef __SMU_TYPES_H__ #define __SMU_TYPES_H__ +#define SMU_MESSAGE_TYPES \ + __SMU_DUMMY_MAP(TestMessage), \ + __SMU_DUMMY_MAP(GetSmuVersion), \ + __SMU_DUMMY_MAP(GetDriverIfVersion), \ + __SMU_DUMMY_MAP(SetAllowedFeaturesMaskLow), \ + __SMU_DUMMY_MAP(SetAllowedFeaturesMaskHigh), \ + __SMU_DUMMY_MAP(EnableAllSmuFeatures), \ + __SMU_DUMMY_MAP(DisableAllSmuFeatures), \ + __SMU_DUMMY_MAP(EnableSmuFeaturesLow), \ + __SMU_DUMMY_MAP(EnableSmuFeaturesHigh), \ + __SMU_DUMMY_MAP(DisableSmuFeaturesLow), \ + __SMU_DUMMY_MAP(DisableSmuFeaturesHigh), \ + __SMU_DUMMY_MAP(GetEnabledSmuFeaturesLow), \ + __SMU_DUMMY_MAP(GetEnabledSmuFeaturesHigh), \ + __SMU_DUMMY_MAP(SetWorkloadMask), \ + __SMU_DUMMY_MAP(SetPptLimit), \ + __SMU_DUMMY_MAP(SetDriverDramAddrHigh), \ + __SMU_DUMMY_MAP(SetDriverDramAddrLow), \ + __SMU_DUMMY_MAP(SetToolsDramAddrHigh), \ + __SMU_DUMMY_MAP(SetToolsDramAddrLow), \ + __SMU_DUMMY_MAP(TransferTableSmu2Dram), \ + __SMU_DUMMY_MAP(TransferTableDram2Smu), \ + __SMU_DUMMY_MAP(UseDefaultPPTable), \ + __SMU_DUMMY_MAP(UseBackupPPTable), \ + __SMU_DUMMY_MAP(RunBtc), \ + __SMU_DUMMY_MAP(RequestI2CBus), \ + __SMU_DUMMY_MAP(ReleaseI2CBus), \ + __SMU_DUMMY_MAP(SetFloorSocVoltage), \ + __SMU_DUMMY_MAP(SoftReset), \ + __SMU_DUMMY_MAP(StartBacoMonitor), \ + __SMU_DUMMY_MAP(CancelBacoMonitor), \ + __SMU_DUMMY_MAP(EnterBaco), \ + __SMU_DUMMY_MAP(SetSoftMinByFreq), \ + __SMU_DUMMY_MAP(SetSoftMaxByFreq), \ + __SMU_DUMMY_MAP(SetHardMinByFreq), \ + __SMU_DUMMY_MAP(SetHardMaxByFreq), \ + __SMU_DUMMY_MAP(GetMinDpmFreq), \ + __SMU_DUMMY_MAP(GetMaxDpmFreq), \ + __SMU_DUMMY_MAP(GetDpmFreqByIndex), \ + __SMU_DUMMY_MAP(GetDpmClockFreq), \ + __SMU_DUMMY_MAP(GetSsVoltageByDpm), \ + __SMU_DUMMY_MAP(SetMemoryChannelConfig), \ + __SMU_DUMMY_MAP(SetGeminiMode), \ + __SMU_DUMMY_MAP(SetGeminiApertureHigh), \ + __SMU_DUMMY_MAP(SetGeminiApertureLow), \ + __SMU_DUMMY_MAP(SetMinLinkDpmByIndex), \ + __SMU_DUMMY_MAP(OverridePcieParameters), \ + __SMU_DUMMY_MAP(OverDriveSetPercentage), \ + __SMU_DUMMY_MAP(SetMinDeepSleepDcefclk), \ + __SMU_DUMMY_MAP(ReenableAcDcInterrupt), \ + __SMU_DUMMY_MAP(NotifyPowerSource), \ + __SMU_DUMMY_MAP(SetUclkFastSwitch), \ + __SMU_DUMMY_MAP(SetUclkDownHyst), \ + __SMU_DUMMY_MAP(GfxDeviceDriverReset), \ + __SMU_DUMMY_MAP(GetCurrentRpm), \ + __SMU_DUMMY_MAP(SetVideoFps), \ + __SMU_DUMMY_MAP(SetTjMax), \ + __SMU_DUMMY_MAP(SetFanTemperatureTarget), \ + __SMU_DUMMY_MAP(PrepareMp1ForUnload), \ + __SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \ + __SMU_DUMMY_MAP(DramLogSetDramAddrLow), \ + __SMU_DUMMY_MAP(DramLogSetDramSize), \ + __SMU_DUMMY_MAP(SetFanMaxRpm), \ + __SMU_DUMMY_MAP(SetFanMinPwm), \ + __SMU_DUMMY_MAP(ConfigureGfxDidt), \ + __SMU_DUMMY_MAP(NumOfDisplays), \ + __SMU_DUMMY_MAP(RemoveMargins), \ + __SMU_DUMMY_MAP(ReadSerialNumTop32), \ + __SMU_DUMMY_MAP(ReadSerialNumBottom32), \ + __SMU_DUMMY_MAP(SetSystemVirtualDramAddrHigh), \ + __SMU_DUMMY_MAP(SetSystemVirtualDramAddrLow), \ + __SMU_DUMMY_MAP(WaflTest), \ + __SMU_DUMMY_MAP(SetFclkGfxClkRatio), \ + __SMU_DUMMY_MAP(AllowGfxOff), \ + __SMU_DUMMY_MAP(DisallowGfxOff), \ + __SMU_DUMMY_MAP(GetPptLimit), \ + __SMU_DUMMY_MAP(GetDcModeMaxDpmFreq), \ + __SMU_DUMMY_MAP(GetDebugData), \ + __SMU_DUMMY_MAP(SetXgmiMode), \ + __SMU_DUMMY_MAP(RunAfllBtc), \ + __SMU_DUMMY_MAP(ExitBaco), \ + __SMU_DUMMY_MAP(PrepareMp1ForReset), \ + __SMU_DUMMY_MAP(PrepareMp1ForShutdown), \ + __SMU_DUMMY_MAP(SetMGpuFanBoostLimitRpm), \ + __SMU_DUMMY_MAP(GetAVFSVoltageByDpm), \ + __SMU_DUMMY_MAP(PowerUpVcn), \ + __SMU_DUMMY_MAP(PowerDownVcn), \ + __SMU_DUMMY_MAP(PowerUpJpeg), \ + __SMU_DUMMY_MAP(PowerDownJpeg), \ + __SMU_DUMMY_MAP(BacoAudioD3PME), \ + __SMU_DUMMY_MAP(ArmD3), \ + __SMU_DUMMY_MAP(RunGfxDcBtc), \ + __SMU_DUMMY_MAP(RunSocDcBtc), \ + __SMU_DUMMY_MAP(SetMemoryChannelEnable), \ + __SMU_DUMMY_MAP(SetDfSwitchType), \ + __SMU_DUMMY_MAP(GetVoltageByDpm), \ + __SMU_DUMMY_MAP(GetVoltageByDpmOverdrive), \ + __SMU_DUMMY_MAP(PowerUpVcn0), \ + __SMU_DUMMY_MAP(PowerDownVcn01), \ + __SMU_DUMMY_MAP(PowerUpVcn1), \ + __SMU_DUMMY_MAP(PowerDownVcn1), \ +#undef __SMU_DUMMY_MAP +#define __SMU_DUMMY_MAP(type) SMU_MSG_##type enum smu_message_type { - SMU_MSG_TestMessage = 0, - SMU_MSG_GetSmuVersion, - SMU_MSG_GetDriverIfVersion, - SMU_MSG_SetAllowedFeaturesMaskLow, - SMU_MSG_SetAllowedFeaturesMaskHigh, - SMU_MSG_EnableAllSmuFeatures, - SMU_MSG_DisableAllSmuFeatures, - SMU_MSG_EnableSmuFeaturesLow, - SMU_MSG_EnableSmuFeaturesHigh, - SMU_MSG_DisableSmuFeaturesLow, - SMU_MSG_DisableSmuFeaturesHigh, - SMU_MSG_GetEnabledSmuFeaturesLow, - SMU_MSG_GetEnabledSmuFeaturesHigh, - SMU_MSG_SetWorkloadMask, - SMU_MSG_SetPptLimit, - SMU_MSG_SetDriverDramAddrHigh, - SMU_MSG_SetDriverDramAddrLow, - SMU_MSG_SetToolsDramAddrHigh, - SMU_MSG_SetToolsDramAddrLow, - SMU_MSG_TransferTableSmu2Dram, - SMU_MSG_TransferTableDram2Smu, - SMU_MSG_UseDefaultPPTable, - SMU_MSG_UseBackupPPTable, - SMU_MSG_RunBtc, - SMU_MSG_RequestI2CBus, - SMU_MSG_ReleaseI2CBus, - SMU_MSG_SetFloorSocVoltage, - SMU_MSG_SoftReset, - SMU_MSG_StartBacoMonitor, - SMU_MSG_CancelBacoMonitor, - SMU_MSG_EnterBaco, - SMU_MSG_SetSoftMinByFreq, - SMU_MSG_SetSoftMaxByFreq, - SMU_MSG_SetHardMinByFreq, - SMU_MSG_SetHardMaxByFreq, - SMU_MSG_GetMinDpmFreq, - SMU_MSG_GetMaxDpmFreq, - SMU_MSG_GetDpmFreqByIndex, - SMU_MSG_GetDpmClockFreq, - SMU_MSG_GetSsVoltageByDpm, - SMU_MSG_SetMemoryChannelConfig, - SMU_MSG_SetGeminiMode, - SMU_MSG_SetGeminiApertureHigh, - SMU_MSG_SetGeminiApertureLow, - SMU_MSG_SetMinLinkDpmByIndex, - SMU_MSG_OverridePcieParameters, - SMU_MSG_OverDriveSetPercentage, - SMU_MSG_SetMinDeepSleepDcefclk, - SMU_MSG_ReenableAcDcInterrupt, - SMU_MSG_NotifyPowerSource, - SMU_MSG_SetUclkFastSwitch, - SMU_MSG_SetUclkDownHyst, - SMU_MSG_GfxDeviceDriverReset, - SMU_MSG_GetCurrentRpm, - SMU_MSG_SetVideoFps, - SMU_MSG_SetTjMax, - SMU_MSG_SetFanTemperatureTarget, - SMU_MSG_PrepareMp1ForUnload, - SMU_MSG_DramLogSetDramAddrHigh, - SMU_MSG_DramLogSetDramAddrLow, - SMU_MSG_DramLogSetDramSize, - SMU_MSG_SetFanMaxRpm, - SMU_MSG_SetFanMinPwm, - SMU_MSG_ConfigureGfxDidt, - SMU_MSG_NumOfDisplays, - SMU_MSG_RemoveMargins, - SMU_MSG_ReadSerialNumTop32, - SMU_MSG_ReadSerialNumBottom32, - SMU_MSG_SetSystemVirtualDramAddrHigh, - SMU_MSG_SetSystemVirtualDramAddrLow, - SMU_MSG_WaflTest, - SMU_MSG_SetFclkGfxClkRatio, - SMU_MSG_AllowGfxOff, - SMU_MSG_DisallowGfxOff, - SMU_MSG_GetPptLimit, - SMU_MSG_GetDcModeMaxDpmFreq, - SMU_MSG_GetDebugData, - SMU_MSG_SetXgmiMode, - SMU_MSG_RunAfllBtc, - SMU_MSG_ExitBaco, - SMU_MSG_PrepareMp1ForReset, - SMU_MSG_PrepareMp1ForShutdown, - SMU_MSG_SetMGpuFanBoostLimitRpm, - SMU_MSG_GetAVFSVoltageByDpm, - SMU_MSG_PowerUpVcn, - SMU_MSG_PowerDownVcn, - SMU_MSG_PowerUpJpeg, - SMU_MSG_PowerDownJpeg, - SMU_MSG_BacoAudioD3PME, - SMU_MSG_ArmD3, - SMU_MSG_RunGfxDcBtc, - SMU_MSG_RunSocDcBtc, - SMU_MSG_SetMemoryChannelEnable, - SMU_MSG_SetDfSwitchType, - SMU_MSG_GetVoltageByDpm, - SMU_MSG_GetVoltageByDpmOverdrive, - SMU_MSG_PowerUpVcn0, - SMU_MSG_PowerDownVcn01, - SMU_MSG_PowerUpVcn1, - SMU_MSG_PowerDownVcn1, + SMU_MESSAGE_TYPES SMU_MSG_MAX_COUNT, }; @@ -207,4 +211,3 @@ enum smu_feature_mask { }; #endif - diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index d5d04d110838..a6398cf961f7 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -105,8 +105,8 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) ret = smu_v11_0_wait_for_response(smu); if (ret) - pr_err("Failed to send message 0x%x, response 0x%x\n", index, - ret); + pr_err("failed send message: %10s (%d) response %#x\n", + smu_get_message_name(smu, msg), index, ret); return ret; @@ -126,8 +126,8 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, ret = smu_v11_0_wait_for_response(smu); if (ret) - pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n", - index, ret, param); + pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", + smu_get_message_name(smu, msg), index, param, ret); WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); @@ -137,8 +137,8 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, ret = smu_v11_0_wait_for_response(smu); if (ret) - pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n", - index, ret, param); + pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n", + smu_get_message_name(smu, msg), index, param, ret); return ret; } -- cgit v1.2.3 From cb33363d0e85341c8781d9095341a981ab410a80 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Thu, 25 Jul 2019 11:08:42 +0800 Subject: drm/amd/powerplay: add smu feature name support add smu_get_feature_name support in smu. Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 13 +++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 + drivers/gpu/drm/amd/powerplay/inc/smu_types.h | 109 +++++++++++++------------ 3 files changed, 71 insertions(+), 52 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index fb967c6d5046..22b581ab8549 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -43,6 +43,19 @@ const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type return __smu_message_names[type]; } +#undef __SMU_DUMMY_MAP +#define __SMU_DUMMY_MAP(fea) #fea +static const char* __smu_feature_names[] = { + SMU_FEATURE_MASKS +}; + +const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) +{ + if (feature < 0 || feature > SMU_FEATURE_COUNT) + return "unknow smu feature"; + return __smu_feature_names[feature]; +} + int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) { int ret = 0; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 035f857922ec..ba2385026b89 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -805,5 +805,6 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev int smu_set_display_count(struct smu_context *smu, uint32_t count); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); +const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index d42e3424e704..8793c8d0dc52 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -154,59 +154,64 @@ enum smu_clk_type { SMU_CLK_COUNT, }; +#define SMU_FEATURE_MASKS \ + __SMU_DUMMY_MAP(DPM_PREFETCHER), \ + __SMU_DUMMY_MAP(DPM_GFXCLK), \ + __SMU_DUMMY_MAP(DPM_UCLK), \ + __SMU_DUMMY_MAP(DPM_SOCCLK), \ + __SMU_DUMMY_MAP(DPM_UVD), \ + __SMU_DUMMY_MAP(DPM_VCE), \ + __SMU_DUMMY_MAP(ULV), \ + __SMU_DUMMY_MAP(DPM_MP0CLK), \ + __SMU_DUMMY_MAP(DPM_LINK), \ + __SMU_DUMMY_MAP(DPM_DCEFCLK), \ + __SMU_DUMMY_MAP(DS_GFXCLK), \ + __SMU_DUMMY_MAP(DS_SOCCLK), \ + __SMU_DUMMY_MAP(DS_LCLK), \ + __SMU_DUMMY_MAP(PPT), \ + __SMU_DUMMY_MAP(TDC), \ + __SMU_DUMMY_MAP(THERMAL), \ + __SMU_DUMMY_MAP(GFX_PER_CU_CG), \ + __SMU_DUMMY_MAP(RM), \ + __SMU_DUMMY_MAP(DS_DCEFCLK), \ + __SMU_DUMMY_MAP(ACDC), \ + __SMU_DUMMY_MAP(VR0HOT), \ + __SMU_DUMMY_MAP(VR1HOT), \ + __SMU_DUMMY_MAP(FW_CTF), \ + __SMU_DUMMY_MAP(LED_DISPLAY), \ + __SMU_DUMMY_MAP(FAN_CONTROL), \ + __SMU_DUMMY_MAP(GFX_EDC), \ + __SMU_DUMMY_MAP(GFXOFF), \ + __SMU_DUMMY_MAP(CG), \ + __SMU_DUMMY_MAP(DPM_FCLK), \ + __SMU_DUMMY_MAP(DS_FCLK), \ + __SMU_DUMMY_MAP(DS_MP1CLK), \ + __SMU_DUMMY_MAP(DS_MP0CLK), \ + __SMU_DUMMY_MAP(XGMI), \ + __SMU_DUMMY_MAP(DPM_GFX_PACE), \ + __SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \ + __SMU_DUMMY_MAP(MEM_MVDD_SCALING), \ + __SMU_DUMMY_MAP(DS_UCLK), \ + __SMU_DUMMY_MAP(GFX_ULV), \ + __SMU_DUMMY_MAP(FW_DSTATE), \ + __SMU_DUMMY_MAP(BACO), \ + __SMU_DUMMY_MAP(VCN_PG), \ + __SMU_DUMMY_MAP(JPEG_PG), \ + __SMU_DUMMY_MAP(USB_PG), \ + __SMU_DUMMY_MAP(RSMU_SMN_CG), \ + __SMU_DUMMY_MAP(APCC_PLUS), \ + __SMU_DUMMY_MAP(GTHR), \ + __SMU_DUMMY_MAP(GFX_DCS), \ + __SMU_DUMMY_MAP(GFX_SS), \ + __SMU_DUMMY_MAP(OUT_OF_BAND_MONITOR), \ + __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \ + __SMU_DUMMY_MAP(MMHUB_PG), \ + __SMU_DUMMY_MAP(ATHUB_PG), \ + +#undef __SMU_DUMMY_MAP +#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT enum smu_feature_mask { - SMU_FEATURE_DPM_PREFETCHER_BIT, - SMU_FEATURE_DPM_GFXCLK_BIT, - SMU_FEATURE_DPM_UCLK_BIT, - SMU_FEATURE_DPM_SOCCLK_BIT, - SMU_FEATURE_DPM_UVD_BIT, - SMU_FEATURE_DPM_VCE_BIT, - SMU_FEATURE_ULV_BIT, - SMU_FEATURE_DPM_MP0CLK_BIT, - SMU_FEATURE_DPM_LINK_BIT, - SMU_FEATURE_DPM_DCEFCLK_BIT, - SMU_FEATURE_DS_GFXCLK_BIT, - SMU_FEATURE_DS_SOCCLK_BIT, - SMU_FEATURE_DS_LCLK_BIT, - SMU_FEATURE_PPT_BIT, - SMU_FEATURE_TDC_BIT, - SMU_FEATURE_THERMAL_BIT, - SMU_FEATURE_GFX_PER_CU_CG_BIT, - SMU_FEATURE_RM_BIT, - SMU_FEATURE_DS_DCEFCLK_BIT, - SMU_FEATURE_ACDC_BIT, - SMU_FEATURE_VR0HOT_BIT, - SMU_FEATURE_VR1HOT_BIT, - SMU_FEATURE_FW_CTF_BIT, - SMU_FEATURE_LED_DISPLAY_BIT, - SMU_FEATURE_FAN_CONTROL_BIT, - SMU_FEATURE_GFX_EDC_BIT, - SMU_FEATURE_GFXOFF_BIT, - SMU_FEATURE_CG_BIT, - SMU_FEATURE_DPM_FCLK_BIT, - SMU_FEATURE_DS_FCLK_BIT, - SMU_FEATURE_DS_MP1CLK_BIT, - SMU_FEATURE_DS_MP0CLK_BIT, - SMU_FEATURE_XGMI_BIT, - SMU_FEATURE_DPM_GFX_PACE_BIT, - SMU_FEATURE_MEM_VDDCI_SCALING_BIT, - SMU_FEATURE_MEM_MVDD_SCALING_BIT, - SMU_FEATURE_DS_UCLK_BIT, - SMU_FEATURE_GFX_ULV_BIT, - SMU_FEATURE_FW_DSTATE_BIT, - SMU_FEATURE_BACO_BIT, - SMU_FEATURE_VCN_PG_BIT, - SMU_FEATURE_JPEG_PG_BIT, - SMU_FEATURE_USB_PG_BIT, - SMU_FEATURE_RSMU_SMN_CG_BIT, - SMU_FEATURE_APCC_PLUS_BIT, - SMU_FEATURE_GTHR_BIT, - SMU_FEATURE_GFX_DCS_BIT, - SMU_FEATURE_GFX_SS_BIT, - SMU_FEATURE_OUT_OF_BAND_MONITOR_BIT, - SMU_FEATURE_TEMP_DEPENDENT_VMIN_BIT, - SMU_FEATURE_MMHUB_PG_BIT, - SMU_FEATURE_ATHUB_PG_BIT, + SMU_FEATURE_MASKS SMU_FEATURE_COUNT, }; -- cgit v1.2.3 From 26dd668155b24eb24401aed9d73e2f9c7b694ea6 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Thu, 25 Jul 2019 11:57:25 +0800 Subject: drm/amd/powerplay: move smu_feature_update_enable_state to up level this function is not ip or asic related function, so move it to top level as public api in smu. Signed-off-by: Kevin Wang Reviewed-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 40 +++++++++++++++++++++++++- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 4 +-- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 39 ------------------------- 3 files changed, 40 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 22b581ab8549..f19e5957bd13 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -509,6 +509,41 @@ int smu_feature_init_dpm(struct smu_context *smu) return ret; } +int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled) +{ + uint32_t feature_low = 0, feature_high = 0; + int ret = 0; + + if (!smu->pm_enabled) + return ret; + + feature_low = (feature_mask >> 0 ) & 0xffffffff; + feature_high = (feature_mask >> 32) & 0xffffffff; + + if (enabled) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + + } else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + + } + + return ret; +} int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { @@ -534,6 +569,7 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, { struct smu_feature *feature = &smu->smu_feature; int feature_id; + uint64_t feature_mask = 0; int ret = 0; feature_id = smu_feature_get_index(smu, mask); @@ -542,8 +578,10 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, WARN_ON(feature_id > feature->feature_num); + feature_mask = 1ULL << feature_id; + mutex_lock(&feature->mutex); - ret = smu_feature_update_enable_state(smu, feature_id, enable); + ret = smu_feature_update_enable_state(smu, feature_mask, enable); if (ret) goto failed; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index ba2385026b89..abc2644b4c07 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -479,7 +479,6 @@ struct smu_funcs int (*init_display_count)(struct smu_context *smu, uint32_t count); int (*set_allowed_mask)(struct smu_context *smu); int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); - int (*update_feature_enable_state)(struct smu_context *smu, uint32_t feature_id, bool enabled); int (*notify_display_change)(struct smu_context *smu); int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def); int (*set_power_limit)(struct smu_context *smu, uint32_t n); @@ -595,8 +594,6 @@ struct smu_funcs ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0) #define smu_is_dpm_running(smu) \ ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) -#define smu_feature_update_enable_state(smu, feature_id, enabled) \ - ((smu)->funcs->update_feature_enable_state? (smu)->funcs->update_feature_enable_state((smu), (feature_id), (enabled)) : 0) #define smu_notify_display_change(smu) \ ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0) #define smu_store_powerplay_table(smu) \ @@ -804,6 +801,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); +int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index a6398cf961f7..be93b3b9fd58 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -798,44 +798,6 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) return ret; } -static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled) -{ - uint32_t feature_low = 0, feature_high = 0; - int ret = 0; - - if (!smu->pm_enabled) - return ret; - if (feature_id >= 0 && feature_id < 31) - feature_low = (1 << feature_id); - else if (feature_id > 31 && feature_id < 63) - feature_high = (1 << feature_id); - else - return -EINVAL; - - if (enabled) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } - - return ret; -} static int smu_v11_0_set_allowed_mask(struct smu_context *smu) { @@ -1783,7 +1745,6 @@ static const struct smu_funcs smu_v11_0_funcs = { .set_allowed_mask = smu_v11_0_set_allowed_mask, .get_enabled_mask = smu_v11_0_get_enabled_mask, .system_features_control = smu_v11_0_system_features_control, - .update_feature_enable_state = smu_v11_0_update_feature_enable_state, .notify_display_change = smu_v11_0_notify_display_change, .get_power_limit = smu_v11_0_get_power_limit, .set_power_limit = smu_v11_0_set_power_limit, -- cgit v1.2.3 From 98eb03bbf0175f009a74c80ac12b91a9680292f4 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Thu, 25 Jul 2019 11:47:44 +0800 Subject: drm/amd/powerplay: implment sysfs feature status function in smu 1. Unified feature enable status format in sysfs 2. Rename ppfeature to pp_features to adapt other pp sysfs node name 3. this function support all asic, not asic related function. Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Acked-by: Rui Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 24 ++-- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 61 +++++++++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 8 +- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 165 ------------------------- drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 153 ----------------------- 5 files changed, 75 insertions(+), 336 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 88ff38242033..71cd7fa5a925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -745,10 +745,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } /** - * DOC: ppfeatures + * DOC: pp_features * * The amdgpu driver provides a sysfs API for adjusting what powerplay - * features to be enabled. The file ppfeatures is used for this. And + * features to be enabled. The file pp_features is used for this. And * this is only available for Vega10 and later dGPUs. * * Reading back the file will show you the followings: @@ -760,7 +760,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, * the corresponding bit from original ppfeature masks and input the * new ppfeature masks. */ -static ssize_t amdgpu_set_ppfeature_status(struct device *dev, +static ssize_t amdgpu_set_pp_feature_status(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -777,7 +777,7 @@ static ssize_t amdgpu_set_ppfeature_status(struct device *dev, pr_debug("featuremask = 0x%llx\n", featuremask); if (is_support_sw_smu(adev)) { - ret = smu_set_ppfeature_status(&adev->smu, featuremask); + ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); if (ret) return -EINVAL; } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { @@ -789,7 +789,7 @@ static ssize_t amdgpu_set_ppfeature_status(struct device *dev, return count; } -static ssize_t amdgpu_get_ppfeature_status(struct device *dev, +static ssize_t amdgpu_get_pp_feature_status(struct device *dev, struct device_attribute *attr, char *buf) { @@ -797,7 +797,7 @@ static ssize_t amdgpu_get_ppfeature_status(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; if (is_support_sw_smu(adev)) { - return smu_get_ppfeature_status(&adev->smu, buf); + return smu_sys_get_pp_feature_mask(&adev->smu, buf); } else if (adev->powerplay.pp_funcs->get_ppfeature_status) return amdgpu_dpm_get_ppfeature_status(adev, buf); @@ -1457,9 +1457,9 @@ static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, static DEVICE_ATTR(mem_busy_percent, S_IRUGO, amdgpu_get_memory_busy_percent, NULL); static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); -static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR, - amdgpu_get_ppfeature_status, - amdgpu_set_ppfeature_status); +static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, + amdgpu_get_pp_feature_status, + amdgpu_set_pp_feature_status); static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, @@ -2914,10 +2914,10 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if ((adev->asic_type >= CHIP_VEGA10) && !(adev->flags & AMD_IS_APU)) { ret = device_create_file(adev->dev, - &dev_attr_ppfeatures); + &dev_attr_pp_features); if (ret) { DRM_ERROR("failed to create device file " - "ppfeatures\n"); + "pp_features\n"); return ret; } } @@ -2971,7 +2971,7 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_unique_id); if ((adev->asic_type >= CHIP_VEGA10) && !(adev->flags & AMD_IS_APU)) - device_remove_file(adev->dev, &dev_attr_ppfeatures); + device_remove_file(adev->dev, &dev_attr_pp_features); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index f19e5957bd13..4a2ccc5fae73 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -56,6 +56,67 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask return __smu_feature_names[feature]; } +size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) +{ + size_t size = 0; + int ret = 0, i = 0; + uint32_t feature_mask[2] = { 0 }; + int32_t feature_index = 0; + uint32_t count = 0; + + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + if (ret) + goto failed; + + size = sprintf(buf + size, "features high: 0x%08x low: 0x%08x\n", + feature_mask[1], feature_mask[0]); + + for (i = 0; i < SMU_FEATURE_COUNT; i++) { + feature_index = smu_feature_get_index(smu, i); + if (feature_index < 0) + continue; + size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n", + count++, + smu_get_feature_name(smu, i), + feature_index, + !!smu_feature_is_enabled(smu, i) ? "enabeld" : "disabled"); + } + +failed: + return size; +} + +int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) +{ + int ret = 0; + uint32_t feature_mask[2] = { 0 }; + uint64_t feature_2_enabled = 0; + uint64_t feature_2_disabled = 0; + uint64_t feature_enables = 0; + + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + if (ret) + return ret; + + feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); + + feature_2_enabled = ~feature_enables & new_mask; + feature_2_disabled = feature_enables & ~new_mask; + + if (feature_2_enabled) { + ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); + if (ret) + return ret; + } + if (feature_2_disabled) { + ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); + if (ret) + return ret; + } + + return ret; +} + int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version) { int ret = 0; diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index abc2644b4c07..ac9e9d5d8a5c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -432,8 +432,6 @@ struct pptable_funcs { uint32_t *mclk_mask, uint32_t *soc_mask); int (*set_cpu_power_state)(struct smu_context *smu); - int (*set_ppfeature_status)(struct smu_context *smu, uint64_t ppfeatures); - int (*get_ppfeature_status)(struct smu_context *smu, char *buf); bool (*is_dpm_running)(struct smu_context *smu); int (*tables_init)(struct smu_context *smu, struct smu_table *tables); int (*set_thermal_fan_table)(struct smu_context *smu); @@ -713,10 +711,6 @@ struct smu_funcs ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) #define smu_set_xgmi_pstate(smu, pstate) \ ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) -#define smu_set_ppfeature_status(smu, ppfeatures) \ - ((smu)->ppt_funcs->set_ppfeature_status ? (smu)->ppt_funcs->set_ppfeature_status((smu), (ppfeatures)) : -EINVAL) -#define smu_get_ppfeature_status(smu, buf) \ - ((smu)->ppt_funcs->get_ppfeature_status ? (smu)->ppt_funcs->get_ppfeature_status((smu), (buf)) : -EINVAL) #define smu_set_watermarks_table(smu, tab, clock_ranges) \ ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) #define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ @@ -804,5 +798,7 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); +size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); +int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); #endif diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index bdb9be75c0be..5a0b14f1c4f7 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1423,169 +1423,6 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_ return 0; } -static int navi10_get_ppfeature_status(struct smu_context *smu, - char *buf) -{ - static const char *ppfeature_name[] = { - "DPM_PREFETCHER", - "DPM_GFXCLK", - "DPM_GFX_PACE", - "DPM_UCLK", - "DPM_SOCCLK", - "DPM_MP0CLK", - "DPM_LINK", - "DPM_DCEFCLK", - "MEM_VDDCI_SCALING", - "MEM_MVDD_SCALING", - "DS_GFXCLK", - "DS_SOCCLK", - "DS_LCLK", - "DS_DCEFCLK", - "DS_UCLK", - "GFX_ULV", - "FW_DSTATE", - "GFXOFF", - "BACO", - "VCN_PG", - "JPEG_PG", - "USB_PG", - "RSMU_SMN_CG", - "PPT", - "TDC", - "GFX_EDC", - "APCC_PLUS", - "GTHR", - "ACDC", - "VR0HOT", - "VR1HOT", - "FW_CTF", - "FAN_CONTROL", - "THERMAL", - "GFX_DCS", - "RM", - "LED_DISPLAY", - "GFX_SS", - "OUT_OF_BAND_MONITOR", - "TEMP_DEPENDENT_VMIN", - "MMHUB_PG", - "ATHUB_PG"}; - static const char *output_title[] = { - "FEATURES", - "BITMASK", - "ENABLEMENT"}; - uint64_t features_enabled; - uint32_t feature_mask[2]; - int i; - int ret = 0; - int size = 0; - - ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); - PP_ASSERT_WITH_CODE(!ret, - "[GetPPfeatureStatus] Failed to get enabled smc features!", - return ret); - features_enabled = (uint64_t)feature_mask[0] | - (uint64_t)feature_mask[1] << 32; - - size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); - size += sprintf(buf + size, "%-19s %-22s %s\n", - output_title[0], - output_title[1], - output_title[2]); - for (i = 0; i < (sizeof(ppfeature_name) / sizeof(ppfeature_name[0])); i++) { - size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", - ppfeature_name[i], - 1ULL << i, - (features_enabled & (1ULL << i)) ? "Y" : "N"); - } - - return size; -} - -static int navi10_enable_smc_features(struct smu_context *smu, - bool enabled, - uint64_t feature_masks) -{ - struct smu_feature *feature = &smu->smu_feature; - uint32_t feature_low, feature_high; - uint32_t feature_mask[2]; - int ret = 0; - - feature_low = (uint32_t)(feature_masks & 0xFFFFFFFF); - feature_high = (uint32_t)((feature_masks & 0xFFFFFFFF00000000ULL) >> 32); - - if (enabled) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - } - - ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); - if (ret) - return ret; - - mutex_lock(&feature->mutex); - bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, - feature->feature_num); - mutex_unlock(&feature->mutex); - - return 0; -} - -static int navi10_set_ppfeature_status(struct smu_context *smu, - uint64_t new_ppfeature_masks) -{ - uint64_t features_enabled; - uint32_t feature_mask[2]; - uint64_t features_to_enable; - uint64_t features_to_disable; - int ret = 0; - - ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); - PP_ASSERT_WITH_CODE(!ret, - "[SetPPfeatureStatus] Failed to get enabled smc features!", - return ret); - features_enabled = (uint64_t)feature_mask[0] | - (uint64_t)feature_mask[1] << 32; - - features_to_disable = - features_enabled & ~new_ppfeature_masks; - features_to_enable = - ~features_enabled & new_ppfeature_masks; - - pr_debug("features_to_disable 0x%llx\n", features_to_disable); - pr_debug("features_to_enable 0x%llx\n", features_to_enable); - - if (features_to_disable) { - ret = navi10_enable_smc_features(smu, false, features_to_disable); - PP_ASSERT_WITH_CODE(!ret, - "[SetPPfeatureStatus] Failed to disable smc features!", - return ret); - } - - if (features_to_enable) { - ret = navi10_enable_smc_features(smu, true, features_to_enable); - PP_ASSERT_WITH_CODE(!ret, - "[SetPPfeatureStatus] Failed to enable smc features!", - return ret); - } - - return 0; -} - static int navi10_set_peak_clock_by_device(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1690,8 +1527,6 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_watermarks_table = navi10_set_watermarks_table, .read_sensor = navi10_read_sensor, .get_uclk_dpm_states = navi10_get_uclk_dpm_states, - .get_ppfeature_status = navi10_get_ppfeature_status, - .set_ppfeature_status = navi10_set_ppfeature_status, .set_performance_level = navi10_set_performance_level, .get_thermal_temperature_range = navi10_get_thermal_temperature_range, }; diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 95afc153a924..0102e24063d4 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -2858,157 +2858,6 @@ static int vega20_dpm_set_vce_enable(struct smu_context *smu, bool enable) return smu_feature_set_enabled(smu, SMU_FEATURE_DPM_VCE_BIT, enable); } -static int vega20_get_enabled_smc_features(struct smu_context *smu, - uint64_t *features_enabled) -{ - uint32_t feature_mask[2] = {0, 0}; - int ret = 0; - - ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); - if (ret) - return ret; - - *features_enabled = ((((uint64_t)feature_mask[0] << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | - (((uint64_t)feature_mask[1] << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); - - return ret; -} - -static int vega20_enable_smc_features(struct smu_context *smu, - bool enable, uint64_t feature_mask) -{ - uint32_t smu_features_low, smu_features_high; - int ret = 0; - - smu_features_low = (uint32_t)((feature_mask & SMU_FEATURES_LOW_MASK) >> SMU_FEATURES_LOW_SHIFT); - smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT); - - if (enable) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - smu_features_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - smu_features_high); - if (ret) - return ret; - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - smu_features_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - smu_features_high); - if (ret) - return ret; - } - - return 0; - -} - -static int vega20_get_ppfeature_status(struct smu_context *smu, char *buf) -{ - static const char *ppfeature_name[] = { - "DPM_PREFETCHER", - "GFXCLK_DPM", - "UCLK_DPM", - "SOCCLK_DPM", - "UVD_DPM", - "VCE_DPM", - "ULV", - "MP0CLK_DPM", - "LINK_DPM", - "DCEFCLK_DPM", - "GFXCLK_DS", - "SOCCLK_DS", - "LCLK_DS", - "PPT", - "TDC", - "THERMAL", - "GFX_PER_CU_CG", - "RM", - "DCEFCLK_DS", - "ACDC", - "VR0HOT", - "VR1HOT", - "FW_CTF", - "LED_DISPLAY", - "FAN_CONTROL", - "GFX_EDC", - "GFXOFF", - "CG", - "FCLK_DPM", - "FCLK_DS", - "MP1CLK_DS", - "MP0CLK_DS", - "XGMI", - "ECC"}; - static const char *output_title[] = { - "FEATURES", - "BITMASK", - "ENABLEMENT"}; - uint64_t features_enabled; - int i; - int ret = 0; - int size = 0; - - ret = vega20_get_enabled_smc_features(smu, &features_enabled); - if (ret) - return ret; - - size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled); - size += sprintf(buf + size, "%-19s %-22s %s\n", - output_title[0], - output_title[1], - output_title[2]); - for (i = 0; i < GNLD_FEATURES_MAX; i++) { - size += sprintf(buf + size, "%-19s 0x%016llx %6s\n", - ppfeature_name[i], - 1ULL << i, - (features_enabled & (1ULL << i)) ? "Y" : "N"); - } - - return size; -} - -static int vega20_set_ppfeature_status(struct smu_context *smu, uint64_t new_ppfeature_masks) -{ - uint64_t features_enabled; - uint64_t features_to_enable; - uint64_t features_to_disable; - int ret = 0; - - if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) - return -EINVAL; - - ret = vega20_get_enabled_smc_features(smu, &features_enabled); - if (ret) - return ret; - - features_to_disable = - features_enabled & ~new_ppfeature_masks; - features_to_enable = - ~features_enabled & new_ppfeature_masks; - - pr_debug("features_to_disable 0x%llx\n", features_to_disable); - pr_debug("features_to_enable 0x%llx\n", features_to_enable); - - if (features_to_disable) { - ret = vega20_enable_smc_features(smu, false, features_to_disable); - if (ret) - return ret; - } - - if (features_to_enable) { - ret = vega20_enable_smc_features(smu, true, features_to_enable); - if (ret) - return ret; - } - - return 0; -} - static bool vega20_is_dpm_running(struct smu_context *smu) { int ret = 0; @@ -3311,8 +3160,6 @@ static const struct pptable_funcs vega20_ppt_funcs = { .force_dpm_limit_value = vega20_force_dpm_limit_value, .unforce_dpm_levels = vega20_unforce_dpm_levels, .get_profiling_clk_mask = vega20_get_profiling_clk_mask, - .set_ppfeature_status = vega20_set_ppfeature_status, - .get_ppfeature_status = vega20_get_ppfeature_status, .is_dpm_running = vega20_is_dpm_running, .set_thermal_fan_table = vega20_set_thermal_fan_table, .get_fan_speed_percent = vega20_get_fan_speed_percent, -- cgit v1.2.3 From 3207dcf3afd6bffe5e887d483e7071616b97bd8f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 26 Jul 2019 14:14:15 -0500 Subject: drm/amdgpu/gfx10: update golden settings for navi14 Updated settings for hw team. Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index ef9f552b7001..e12478a5b902 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -138,7 +138,6 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000043), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x000007ff, 0x000001fe), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), -- cgit v1.2.3 From c74dbe44eacf00a5ccc229b5cc340a9b7f6851a0 Mon Sep 17 00:00:00 2001 From: Thong Thai Date: Thu, 25 Jul 2019 11:26:56 -0400 Subject: drm/amd/amdgpu/vcn_v2_0: Move VCN 2.0 specific dec ring test to vcn_v2_0 VCN 2.0 firmware now requires a packet start command to be sent before any other decode ring buffer command. Signed-off-by: Thong Thai Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 80bb49736ee4..eef3ec5449af 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -2092,6 +2092,36 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 4); + if (r) + return r; + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.cmd, 0)); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + + static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state) { @@ -2155,7 +2185,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, -- cgit v1.2.3 From 6e92e156aacce9b5d2f8003b92d71e676e5e9139 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Mon, 29 Jul 2019 17:51:55 +0800 Subject: drm/amdgpu/powerplay: provide the interface to disable uclk switch for DAL provide the interface for DAL to disable uclk switch on navi10. in this case, the uclk will be fixed to maximum. this is a workaround when display configuration causes underflow issue. Signed-off-by: Kenneth Feng Reviewed-by: Kevin Wang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 14 ++++++++++++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 5 +++++ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 25 ++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 7 ++++++ 4 files changed, 51 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 592fa499c9f8..e4d0b5b845c5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -801,6 +801,19 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) return PP_SMU_RESULT_OK; } +enum pp_smu_status pp_nv_set_pstate_handshake_support( + struct pp_smu *pp, BOOLEAN pstate_handshake_supported) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, enum pp_smu_nv_clock_id clock_id, int mhz) { @@ -916,6 +929,7 @@ void dm_pp_get_funcs( funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks; /*todo compare data with window driver */ funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; + funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; #endif default: diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index ac9e9d5d8a5c..fcd0db362977 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -365,6 +365,8 @@ struct smu_context #define WATERMARKS_EXIST (1 << 0) #define WATERMARKS_LOADED (1 << 1) uint32_t watermarks_bitmap; + uint32_t hard_min_uclk_req_from_dal; + bool disable_uclk_switch; uint32_t workload_mask; uint32_t workload_prority[WORKLOAD_POLICY_MAX]; @@ -446,6 +448,7 @@ struct pptable_funcs { int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); int (*set_default_od_settings)(struct smu_context *smu, bool initialize); int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); + int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); }; struct smu_funcs @@ -695,6 +698,8 @@ struct smu_funcs ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0) #define smu_display_clock_voltage_request(smu, clock_req) \ ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0) +#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \ + ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL) #define smu_get_dal_power_level(smu, clocks) \ ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0) #define smu_get_perf_level(smu, designation, level) \ diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 5a0b14f1c4f7..b92e109ed46d 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1493,6 +1493,30 @@ static int navi10_get_thermal_temperature_range(struct smu_context *smu, return 0; } +static int navi10_display_disable_memory_clock_switch(struct smu_context *smu, + bool disable_memory_clock_switch) +{ + int ret = 0; + struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks = + (struct smu_11_0_max_sustainable_clocks *) + smu->smu_table.max_sustainable_clocks; + uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal; + uint32_t max_memory_clock = max_sustainable_clocks->uclock; + + if(smu->disable_uclk_switch == disable_memory_clock_switch) + return 0; + + if(disable_memory_clock_switch) + ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0); + else + ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0); + + if(!ret) + smu->disable_uclk_switch = disable_memory_clock_switch; + + return ret; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1529,6 +1553,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_uclk_dpm_states = navi10_get_uclk_dpm_states, .set_performance_level = navi10_set_performance_level, .get_thermal_temperature_range = navi10_get_thermal_temperature_range, + .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index be93b3b9fd58..89f4c9e19642 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1313,16 +1313,23 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, if (ret) goto failed; + if (clk_select == SMU_UCLK && smu->disable_uclk_switch) + return 0; + clk_id = smu_clk_get_index(smu, clk_select); if (clk_id < 0) { ret = -EINVAL; goto failed; } + mutex_lock(&smu->mutex); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, (clk_id << 16) | clk_freq); mutex_unlock(&smu->mutex); + + if(clk_select == SMU_UCLK) + smu->hard_min_uclk_req_from_dal = clk_freq; } failed: -- cgit v1.2.3 From 649412069501c1710f42ad50ab420e2fd5eaea70 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 30 Jul 2019 11:17:03 +0200 Subject: drm/amdgpu: fix error handling in amdgpu_cs_process_fence_dep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We always need to drop the ctx reference and should check for errors first and then dereference the fence pointer. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e069de8b54e6..4e4094f842e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return r; } - fence = amdgpu_ctx_get_fence(ctx, entity, - deps[i].handle); + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + amdgpu_ctx_put(ctx); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + else if (!fence) + continue; if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { - struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct drm_sched_fence *s_fence; struct dma_fence *old = fence; + s_fence = to_drm_sched_fence(fence); fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(old); } - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + dma_fence_put(fence); + if (r) return r; - } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, - true); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } } return 0; } -- cgit v1.2.3 From 1a2c29bce06083e0f53f3a328f0bb43a2856b622 Mon Sep 17 00:00:00 2001 From: Wang Xiayang Date: Sat, 27 Jul 2019 17:30:30 +0800 Subject: drm/amdgpu: fix a potential information leaking bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Coccinelle reports a path that the array "data" is never initialized. The path skips the checks in the conditional branches when either of callback functions, read_wave_vgprs and read_wave_sgprs, is not registered. Later, the uninitialized "data" array is read in the while-loop below and passed to put_user(). Fix the path by allocating the array with kcalloc(). The patch is simplier than adding a fall-back branch that explicitly calls memset(data, 0, ...). Also it does not need the multiplication 1024*sizeof(*data) as the size parameter for memset() though there is no risk of integer overflow. Signed-off-by: Wang Xiayang Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6d54decef7f8..5652cc72ed3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; -- cgit v1.2.3 From fb6959ae50176758a073687dbb081d26521f4576 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 30 Jul 2019 09:08:34 -0400 Subject: drm/amd/display: Embed DCN2 SOC bounding box [Why] In order to support uclk switching on NV10 the SOC bounding box needs to be updated. [How] We currently read the constants from the gpu info FW, but supporting workarounds in DC for different versions of the FW adds additional complexity to the codebase. NV10 has been released so it's cleanest to keep the bounding box and source code in sync by embedding the bounding box like we do for other ASICs. Fixes: 02316e963a5a ("drm/amd/display: Force uclk to max for every state") Signed-off-by: Nicholas Kazlauskas Acked-by: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 114 ++++++++++++++++++++- 1 file changed, 112 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index ad015290e17b..955cae6571cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -82,7 +82,7 @@ #include "amdgpu_socbb.h" -#define SOC_BOUNDING_BOX_VALID false +#define SOC_BOUNDING_BOX_VALID true #define DC_LOGGER_INIT(logger) struct _vcs_dpi_ip_params_st dcn2_0_ip = { @@ -156,7 +156,117 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = { .xfc_fill_constant_bytes = 0, }; -struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { + /* Defaults that get patched on driver load from firmware. */ + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 8960.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 11104.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 14000.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 16000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + /*Extra state, no dispclk ramping*/ + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 16000.0, + }, + }, + .num_states = 5, + .sr_exit_time_us = 8.6, + .sr_enter_plus_exit_time_us = 10.9, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 2, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 16, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404.0, + .dummy_pstate_latency_us = 5.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 4, + .use_urgent_burst_bw = 0 +}; #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL -- cgit v1.2.3 From f16d523f9d83a73c274fb97a8fed17114d67b30e Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 30 Jul 2019 09:45:33 -0400 Subject: drm/amd/display: Support uclk switching for DCN2 [Why] We were previously forcing the uclk for every state to max and reducing the switch time to prevent uclk switching from occuring. This workaround was previously needed in order to avoid hangs + underflow under certain display configurations. Now that DC has the proper fix complete we can drop the hacks and improve power for most display configurations. [How] We still need the function pointers hooked up to grab the real uclk states from pplib. The rest of the prior hack can be reverted. The key requirements here are really just DC support, updated firmware, and support for disabling p-state support when needed in pplib/smu. When these requirements are met uclk switching works without underflow or hangs. Fixes: 02316e963a5a ("drm/amd/display: Force uclk to max for every state") Signed-off-by: Nicholas Kazlauskas Acked-by: Alex Deucher Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 955cae6571cb..e90b6bcad05b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2819,9 +2819,6 @@ static void cap_soc_clocks( && max_clocks.uClockInKhz != 0) bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16; - // HACK: Force every uclk to max for now to "disable" uclk switching. - bb->clock_limits[i].dram_speed_mts = (max_clocks.uClockInKhz / 1000) * 16; - if ((bb->clock_limits[i].fabricclk_mhz > (max_clocks.fabricClockInKhz / 1000)) && max_clocks.fabricClockInKhz != 0) bb->clock_limits[i].fabricclk_mhz = (max_clocks.fabricClockInKhz / 1000); @@ -3037,8 +3034,6 @@ static bool init_soc_bounding_box(struct dc *dc, le32_to_cpu(bb->vmm_page_size_bytes); dcn2_0_soc.dram_clock_change_latency_us = fixed16_to_double_to_cpu(bb->dram_clock_change_latency_us); - // HACK!! Lower uclock latency switch time so we don't switch - dcn2_0_soc.dram_clock_change_latency_us = 10; dcn2_0_soc.writeback_dram_clock_change_latency_us = fixed16_to_double_to_cpu(bb->writeback_dram_clock_change_latency_us); dcn2_0_soc.return_bus_width_bytes = @@ -3080,7 +3075,6 @@ static bool init_soc_bounding_box(struct dc *dc, struct pp_smu_nv_clock_table max_clocks = {0}; unsigned int uclk_states[8] = {0}; unsigned int num_states = 0; - int i; enum pp_smu_status status; bool clock_limits_available = false; bool uclk_states_available = false; @@ -3102,10 +3096,6 @@ static bool init_soc_bounding_box(struct dc *dc, clock_limits_available = (status == PP_SMU_RESULT_OK); } - // HACK: Use the max uclk_states value for all elements. - for (i = 0; i < num_states; i++) - uclk_states[i] = uclk_states[num_states - 1]; - if (clock_limits_available && uclk_states_available && num_states) update_bounding_box(dc, &dcn2_0_soc, &max_clocks, uclk_states, num_states); else if (clock_limits_available) -- cgit v1.2.3 From 7ce55e0b6fbbbddf4984e8343fbb4254afc78dc9 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Sun, 28 Jul 2019 15:24:40 -0500 Subject: drm/amdkfd: Fix gfx10 wave64 VGPR context restore Copy/paste error, first 4 VGPRs are separated by 64 dwords (256 bytes). Cc: Shaoyun Liu Signed-off-by: Jay Cornwall Acked-by: Alex Deucher Reviewed-by: shaoyunl Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 6 +++--- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 2b3d7017f142..c10e424dd1f5 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -982,9 +982,9 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x00000080, 0xbf0a6f7c, 0xbf85fff7, 0xbeff03c1, 0xe0304000, 0x725d0000, - 0xe0304080, 0x725d0100, - 0xe0304100, 0x725d0200, - 0xe0304180, 0x725d0300, + 0xe0304100, 0x725d0100, + 0xe0304200, 0x725d0200, + 0xe0304300, 0x725d0300, 0xb9782a05, 0x80788178, 0x907c9973, 0x877c817c, 0xbf06817c, 0xbf850002, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 261e05430852..be6f7d1847fa 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -747,9 +747,9 @@ L_RESTORE_SHARED_VGPR_WAVE64_LOOP: /* VGPR restore on v0 */ L_RESTORE_V0: buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 - buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128 - buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2 - buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 /* restore SGPRs */ //will be 2+8+16*6 -- cgit v1.2.3 From f9e346aba193d82286dbb3aab358906a7416568b Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Sun, 28 Jul 2019 15:25:05 -0500 Subject: drm/amdkfd: Save/restore flat_scratch_lo/hi on gfx10 These moved from SGPRs in gfx9 to HWREG in gfx10. Cc: Shaoyun Liu Signed-off-by: Jay Cornwall Acked-by: Alex Deucher Reviewed-by: shaoyunl Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 56 +++++++++++++--------- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm | 14 ++++++ 2 files changed, 48 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index c10e424dd1f5..8089bb37f393 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -680,7 +680,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { }; static const uint32_t cwsr_trap_gfx10_hex[] = { - 0xbf820001, 0xbf8201b2, + 0xbf820001, 0xbf8201c0, 0xb0804004, 0xb978f802, 0x8a788678, 0xb971f803, 0x876eff71, 0x00000400, @@ -772,6 +772,13 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xb97bf801, 0xbefe037c, 0xbefc037a, 0xf4611efa, 0xf8000000, 0x807a847a, + 0xbefc037e, 0xb97bf814, + 0xbefe037c, 0xbefc037a, + 0xf4611efa, 0xf8000000, + 0x807a847a, 0xbefc037e, + 0xb97bf815, 0xbefe037c, + 0xbefc037a, 0xf4611efa, + 0xf8000000, 0x807a847a, 0xbefc037e, 0x8776ff7f, 0x04000000, 0xbeef0380, 0x886f6f76, 0xb97a2a05, @@ -897,7 +904,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xe0704000, 0x7a5d0000, 0x807c817c, 0x807aff7a, 0x00000080, 0xbf0a717c, - 0xbf85fff8, 0xbf820138, + 0xbf85fff8, 0xbf820141, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, @@ -1033,30 +1040,35 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x80788478, 0xf4211e7a, 0xf0000000, 0x80788478, 0xf4211cfa, 0xf0000000, + 0x80788478, 0xf4211bba, + 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef814, + 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xbef2036d, 0x876dff72, - 0x0000ffff, 0xbefc036f, - 0xbefe037a, 0xbeff037b, - 0x876f71ff, 0x000003ff, - 0xb9ef4803, 0xb9f9f816, - 0x876f71ff, 0xfffff800, - 0x906f8b6f, 0xb9efa2c3, - 0xb9f3f801, 0x876fff72, - 0xfc000000, 0x906f9a6f, - 0x8f6f906f, 0xbef30380, + 0xb9eef815, 0xbef2036d, + 0x876dff72, 0x0000ffff, + 0xbefc036f, 0xbefe037a, + 0xbeff037b, 0x876f71ff, + 0x000003ff, 0xb9ef4803, + 0xb9f9f816, 0x876f71ff, + 0xfffff800, 0x906f8b6f, + 0xb9efa2c3, 0xb9f3f801, + 0x876fff72, 0xfc000000, + 0x906f9a6f, 0x8f6f906f, + 0xbef30380, 0x88736f73, + 0x876fff72, 0x02000000, + 0x906f996f, 0x8f6f8f6f, 0x88736f73, 0x876fff72, - 0x02000000, 0x906f996f, - 0x8f6f8f6f, 0x88736f73, - 0x876fff72, 0x01000000, - 0x906f986f, 0x8f6f996f, - 0x88736f73, 0x876fff70, - 0x00800000, 0x906f976f, - 0xb9f3f807, 0x87fe7e7e, - 0x87ea6a6a, 0xb9f0f802, - 0xbf8a0000, 0xbe80226c, - 0xbf810000, 0xbf9f0000, + 0x01000000, 0x906f986f, + 0x8f6f996f, 0x88736f73, + 0x876fff70, 0x00800000, + 0x906f976f, 0xb9f3f807, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9f0f802, 0xbf8a0000, + 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index be6f7d1847fa..fafdfd2ac610 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -132,6 +132,7 @@ var s_restore_tmp = ttmp6 var s_restore_mem_offset_save = s_restore_tmp var s_restore_m0 = s_restore_alloc_size var s_restore_mode = ttmp7 +var s_restore_flat_scratch = ttmp2 var s_restore_pc_lo = ttmp0 var s_restore_pc_hi = ttmp1 var s_restore_exec_lo = ttmp14 @@ -313,6 +314,12 @@ L_SAVE_HWREG: s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) + s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO) + write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) + + s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI) + write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) + /* the first wave in the threadgroup */ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK s_mov_b32 s_save_exec_hi, 0x0 @@ -824,9 +831,16 @@ L_RESTORE_HWREG: read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset) read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset) read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset) + s_waitcnt lgkmcnt(0) + + s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO), s_restore_flat_scratch + read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset) s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS + s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch + s_mov_b32 s_restore_tmp, s_restore_pc_hi s_and_b32 s_restore_pc_hi, s_restore_tmp, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS -- cgit v1.2.3 From 1faa3b805473d7f4197b943419781d9fd21e4352 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Sun, 28 Jul 2019 16:00:59 -0500 Subject: drm/amdkfd: Save/restore vcc on gfx10 VCC moved out of user SGPR allocation in gfx10. It's now stored in SGPRs 106-107. Also fixes incorrect SGPR read offsets. Cc: Shaoyun Liu Signed-off-by: Jay Cornwall Acked-by: Alex Deucher Reviewed-by: shaoyunl Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 452 ++++++++++----------- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm | 34 +- 2 files changed, 243 insertions(+), 243 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 8089bb37f393..a8cf82d46109 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -680,7 +680,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { }; static const uint32_t cwsr_trap_gfx10_hex[] = { - 0xbf820001, 0xbf8201c0, + 0xbf820001, 0xbf8201c1, 0xb0804004, 0xb978f802, 0x8a788678, 0xb971f803, 0x876eff71, 0x00000400, @@ -804,271 +804,271 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x00000060, 0xbf85ffea, 0xbe802f00, 0xbe822f02, 0xbe842f04, 0xbe862f06, - 0xbe882f08, 0xf469003a, - 0xfa000000, 0xf469013a, - 0xfa000010, 0xf465023a, - 0xfa000020, 0x8074c074, - 0x82758075, 0xbef40372, - 0xbefa0380, 0xbefe03c1, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820002, - 0xbeff03c1, 0xbf82000b, + 0xbe882f08, 0xbe8a2f0a, + 0xf469003a, 0xfa000000, + 0xf469013a, 0xfa000010, + 0xf469023a, 0xfa000020, + 0x8074b074, 0x82758075, + 0xbef40372, 0xbefa0380, + 0xbefe03c1, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0xbeff0380, + 0xbf820002, 0xbeff03c1, + 0xbf82000b, 0xbef603ff, + 0x01000000, 0xe0704000, + 0x7a5d0000, 0xe0704080, + 0x7a5d0100, 0xe0704100, + 0x7a5d0200, 0xe0704180, + 0x7a5d0300, 0xbf82000a, 0xbef603ff, 0x01000000, 0xe0704000, 0x7a5d0000, - 0xe0704080, 0x7a5d0100, - 0xe0704100, 0x7a5d0200, - 0xe0704180, 0x7a5d0300, - 0xbf82000a, 0xbef603ff, - 0x01000000, 0xe0704000, - 0x7a5d0000, 0xe0704100, - 0x7a5d0100, 0xe0704200, - 0x7a5d0200, 0xe0704300, - 0x7a5d0300, 0xbefe03c1, + 0xe0704100, 0x7a5d0100, + 0xe0704200, 0x7a5d0200, + 0xe0704300, 0x7a5d0300, + 0xbefe03c1, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0xbeff0380, + 0xbf820001, 0xbeff03c1, + 0xb9714306, 0x8771c171, + 0xbf840046, 0xbf8a0000, + 0x8776ff6f, 0x04000000, + 0xbf840042, 0x8f718671, + 0x8f718271, 0xbef60371, + 0xb97a2a05, 0x807a817a, 0x907c9973, 0x877c817c, 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820001, - 0xbeff03c1, 0xb9714306, - 0x8771c171, 0xbf840046, - 0xbf8a0000, 0x8776ff6f, - 0x04000000, 0xbf840042, - 0x8f718671, 0x8f718271, - 0xbef60371, 0xb97a2a05, - 0x807a817a, 0x907c9973, + 0x8f7a897a, 0xbf820001, + 0x8f7a8a7a, 0xb9761e06, + 0x8f768a76, 0x807a767a, + 0x807aff7a, 0x00000200, + 0x807aff7a, 0x00000080, + 0xbef603ff, 0x01000000, + 0xd7650000, 0x000100c1, + 0xd7660000, 0x000200c1, + 0x16000084, 0x907c9973, 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f7a897a, - 0xbf820001, 0x8f7a8a7a, - 0xb9761e06, 0x8f768a76, - 0x807a767a, 0x807aff7a, - 0x00000200, 0x807aff7a, - 0x00000080, 0xbef603ff, - 0x01000000, 0xd7650000, - 0x000100c1, 0xd7660000, - 0x000200c1, 0x16000084, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbefc0380, - 0xbf850012, 0xbe8303ff, - 0x00000080, 0xbf800000, + 0xbefc0380, 0xbf850012, + 0xbe8303ff, 0x00000080, 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf8c0000, 0xe0704000, - 0x7a5d0100, 0x807c037c, - 0x807a037a, 0xd5250000, - 0x0001ff00, 0x00000080, - 0xbf0a717c, 0xbf85fff4, - 0xbf820011, 0xbe8303ff, - 0x00000100, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf8c0000, + 0xe0704000, 0x7a5d0100, + 0x807c037c, 0x807a037a, + 0xd5250000, 0x0001ff00, + 0x00000080, 0xbf0a717c, + 0xbf85fff4, 0xbf820011, + 0xbe8303ff, 0x00000100, 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf8c0000, 0xe0704000, - 0x7a5d0100, 0x807c037c, - 0x807a037a, 0xd5250000, - 0x0001ff00, 0x00000100, - 0xbf0a717c, 0xbf85fff4, - 0xbefe03c1, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850004, 0xbefa03ff, - 0x00000200, 0xbeff0380, - 0xbf820003, 0xbefa03ff, - 0x00000400, 0xbeff03c1, - 0xb9712a05, 0x80718171, - 0x8f718271, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850017, 0xbef603ff, - 0x01000000, 0xbefc0384, - 0xbf0a717c, 0xbf840037, - 0x7e008700, 0x7e028701, - 0x7e048702, 0x7e068703, - 0xe0704000, 0x7a5d0000, - 0xe0704080, 0x7a5d0100, - 0xe0704100, 0x7a5d0200, - 0xe0704180, 0x7a5d0300, - 0x807c847c, 0x807aff7a, - 0x00000200, 0xbf0a717c, - 0xbf85ffef, 0xbf820025, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf8c0000, + 0xe0704000, 0x7a5d0100, + 0x807c037c, 0x807a037a, + 0xd5250000, 0x0001ff00, + 0x00000100, 0xbf0a717c, + 0xbf85fff4, 0xbefe03c1, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850004, + 0xbefa03ff, 0x00000200, + 0xbeff0380, 0xbf820003, + 0xbefa03ff, 0x00000400, + 0xbeff03c1, 0xb9712a05, + 0x80718171, 0x8f718271, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850017, 0xbef603ff, 0x01000000, 0xbefc0384, 0xbf0a717c, - 0xbf840020, 0x7e008700, + 0xbf840037, 0x7e008700, 0x7e028701, 0x7e048702, 0x7e068703, 0xe0704000, - 0x7a5d0000, 0xe0704100, - 0x7a5d0100, 0xe0704200, - 0x7a5d0200, 0xe0704300, + 0x7a5d0000, 0xe0704080, + 0x7a5d0100, 0xe0704100, + 0x7a5d0200, 0xe0704180, 0x7a5d0300, 0x807c847c, - 0x807aff7a, 0x00000400, + 0x807aff7a, 0x00000200, 0xbf0a717c, 0xbf85ffef, - 0xb9711e06, 0x8771c171, - 0xbf84000c, 0x8f718371, - 0x80717c71, 0xbefe03c1, - 0xbeff0380, 0x7e008700, + 0xbf820025, 0xbef603ff, + 0x01000000, 0xbefc0384, + 0xbf0a717c, 0xbf840020, + 0x7e008700, 0x7e028701, + 0x7e048702, 0x7e068703, 0xe0704000, 0x7a5d0000, - 0x807c817c, 0x807aff7a, - 0x00000080, 0xbf0a717c, - 0xbf85fff8, 0xbf820141, - 0xbef4037e, 0x8775ff7f, - 0x0000ffff, 0x8875ff75, - 0x00040000, 0xbef60380, - 0xbef703ff, 0x10807fac, - 0x8772ff7f, 0x08000000, - 0x90728372, 0x88777277, - 0x8772ff7f, 0x70000000, - 0x90728172, 0x88777277, - 0xb97302dc, 0x8f739973, - 0x8873737f, 0x8772ff7f, - 0x04000000, 0xbf840036, - 0xbefe03c1, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0xbeff0380, - 0xbf820001, 0xbeff03c1, - 0xb96f4306, 0x876fc16f, - 0xbf84002b, 0x8f6f866f, - 0x8f6f826f, 0xbef6036f, - 0xb9782a05, 0x80788178, + 0xe0704100, 0x7a5d0100, + 0xe0704200, 0x7a5d0200, + 0xe0704300, 0x7a5d0300, + 0x807c847c, 0x807aff7a, + 0x00000400, 0xbf0a717c, + 0xbf85ffef, 0xb9711e06, + 0x8771c171, 0xbf84000c, + 0x8f718371, 0x80717c71, + 0xbefe03c1, 0xbeff0380, + 0x7e008700, 0xe0704000, + 0x7a5d0000, 0x807c817c, + 0x807aff7a, 0x00000080, + 0xbf0a717c, 0xbf85fff8, + 0xbf820141, 0xbef4037e, + 0x8775ff7f, 0x0000ffff, + 0x8875ff75, 0x00040000, + 0xbef60380, 0xbef703ff, + 0x10807fac, 0x8772ff7f, + 0x08000000, 0x90728372, + 0x88777277, 0x8772ff7f, + 0x70000000, 0x90728172, + 0x88777277, 0xb97302dc, + 0x8f739973, 0x8873737f, + 0x8772ff7f, 0x04000000, + 0xbf840036, 0xbefe03c1, 0x907c9973, 0x877c817c, 0xbf06817c, 0xbf850002, - 0x8f788978, 0xbf820001, - 0x8f788a78, 0xb9721e06, - 0x8f728a72, 0x80787278, - 0x8078ff78, 0x00000200, - 0x8078ff78, 0x00000080, - 0xbef603ff, 0x01000000, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbefc0380, - 0xbf850009, 0xe0310000, - 0x781d0000, 0x807cff7c, - 0x00000080, 0x8078ff78, - 0x00000080, 0xbf0a6f7c, - 0xbf85fff8, 0xbf820008, + 0xbeff0380, 0xbf820001, + 0xbeff03c1, 0xb96f4306, + 0x876fc16f, 0xbf84002b, + 0x8f6f866f, 0x8f6f826f, + 0xbef6036f, 0xb9782a05, + 0x80788178, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0x8f788978, + 0xbf820001, 0x8f788a78, + 0xb9721e06, 0x8f728a72, + 0x80787278, 0x8078ff78, + 0x00000200, 0x8078ff78, + 0x00000080, 0xbef603ff, + 0x01000000, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbefc0380, 0xbf850009, 0xe0310000, 0x781d0000, - 0x807cff7c, 0x00000100, - 0x8078ff78, 0x00000100, + 0x807cff7c, 0x00000080, + 0x8078ff78, 0x00000080, 0xbf0a6f7c, 0xbf85fff8, - 0xbef80380, 0xbefe03c1, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0xbeff0380, 0xbf820001, - 0xbeff03c1, 0xb96f2a05, - 0x806f816f, 0x8f6f826f, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850021, - 0xbef603ff, 0x01000000, - 0xbef20378, 0x8078ff78, - 0x00000200, 0xbefc0384, - 0xe0304000, 0x785d0000, - 0xe0304080, 0x785d0100, - 0xe0304100, 0x785d0200, - 0xe0304180, 0x785d0300, - 0xbf8c3f70, 0x7e008500, - 0x7e028501, 0x7e048502, - 0x7e068503, 0x807c847c, - 0x8078ff78, 0x00000200, - 0xbf0a6f7c, 0xbf85ffee, - 0xe0304000, 0x725d0000, - 0xe0304080, 0x725d0100, - 0xe0304100, 0x725d0200, - 0xe0304180, 0x725d0300, - 0xbf820031, 0xbef603ff, + 0xbf820008, 0xe0310000, + 0x781d0000, 0x807cff7c, + 0x00000100, 0x8078ff78, + 0x00000100, 0xbf0a6f7c, + 0xbf85fff8, 0xbef80380, + 0xbefe03c1, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0xbeff0380, + 0xbf820001, 0xbeff03c1, + 0xb96f2a05, 0x806f816f, + 0x8f6f826f, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850021, 0xbef603ff, 0x01000000, 0xbef20378, - 0x8078ff78, 0x00000400, + 0x8078ff78, 0x00000200, 0xbefc0384, 0xe0304000, - 0x785d0000, 0xe0304100, - 0x785d0100, 0xe0304200, - 0x785d0200, 0xe0304300, + 0x785d0000, 0xe0304080, + 0x785d0100, 0xe0304100, + 0x785d0200, 0xe0304180, 0x785d0300, 0xbf8c3f70, 0x7e008500, 0x7e028501, 0x7e048502, 0x7e068503, 0x807c847c, 0x8078ff78, - 0x00000400, 0xbf0a6f7c, - 0xbf85ffee, 0xb96f1e06, - 0x876fc16f, 0xbf84000e, - 0x8f6f836f, 0x806f7c6f, - 0xbefe03c1, 0xbeff0380, + 0x00000200, 0xbf0a6f7c, + 0xbf85ffee, 0xe0304000, + 0x725d0000, 0xe0304080, + 0x725d0100, 0xe0304100, + 0x725d0200, 0xe0304180, + 0x725d0300, 0xbf820031, + 0xbef603ff, 0x01000000, + 0xbef20378, 0x8078ff78, + 0x00000400, 0xbefc0384, 0xe0304000, 0x785d0000, + 0xe0304100, 0x785d0100, + 0xe0304200, 0x785d0200, + 0xe0304300, 0x785d0300, 0xbf8c3f70, 0x7e008500, - 0x807c817c, 0x8078ff78, - 0x00000080, 0xbf0a6f7c, - 0xbf85fff7, 0xbeff03c1, - 0xe0304000, 0x725d0000, - 0xe0304100, 0x725d0100, - 0xe0304200, 0x725d0200, - 0xe0304300, 0x725d0300, - 0xb9782a05, 0x80788178, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0x8f788978, 0xbf820001, - 0x8f788a78, 0xb9721e06, - 0x8f728a72, 0x80787278, - 0x8078ff78, 0x00000200, - 0x80f8ff78, 0x00000058, - 0x80f88878, 0xbef603ff, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807c847c, + 0x8078ff78, 0x00000400, + 0xbf0a6f7c, 0xbf85ffee, + 0xb96f1e06, 0x876fc16f, + 0xbf84000e, 0x8f6f836f, + 0x806f7c6f, 0xbefe03c1, + 0xbeff0380, 0xe0304000, + 0x785d0000, 0xbf8c3f70, + 0x7e008500, 0x807c817c, + 0x8078ff78, 0x00000080, + 0xbf0a6f7c, 0xbf85fff7, + 0xbeff03c1, 0xe0304000, + 0x725d0000, 0xe0304100, + 0x725d0100, 0xe0304200, + 0x725d0200, 0xe0304300, + 0x725d0300, 0xb9782a05, + 0x80788178, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0x8f788978, + 0xbf820001, 0x8f788a78, + 0xb9721e06, 0x8f728a72, + 0x80787278, 0x8078ff78, + 0x00000200, 0x80f8ff78, + 0x00000050, 0xbef603ff, 0x01000000, 0xbefc03ff, - 0x0000006a, 0xf425003a, - 0xf0000000, 0x80f8a078, - 0xbf8cc07f, 0x80fc827c, + 0x0000006c, 0x80f89078, + 0xf429003a, 0xf0000000, + 0xbf8cc07f, 0x80fc847c, 0xbf800000, 0xbe803100, + 0xbe823102, 0x80f8a078, 0xf42d003a, 0xf0000000, - 0x80f8c078, 0xbf8cc07f, - 0x80fc887c, 0xbf800000, - 0xbe803100, 0xbe823102, - 0xbe843104, 0xbe863106, + 0xbf8cc07f, 0x80fc887c, + 0xbf800000, 0xbe803100, + 0xbe823102, 0xbe843104, + 0xbe863106, 0x80f8c078, 0xf431003a, 0xf0000000, - 0x80f8c078, 0xbf8cc07f, - 0x80fc907c, 0xbf800000, - 0xbe803100, 0xbe823102, - 0xbe843104, 0xbe863106, - 0xbe883108, 0xbe8a310a, - 0xbe8c310c, 0xbe8e310e, - 0xbf06807c, 0xbf84fff0, - 0xb9782a05, 0x80788178, - 0x907c9973, 0x877c817c, - 0xbf06817c, 0xbf850002, - 0x8f788978, 0xbf820001, - 0x8f788a78, 0xb9721e06, - 0x8f728a72, 0x80787278, - 0x8078ff78, 0x00000200, - 0xbef603ff, 0x01000000, - 0xf4211bfa, 0xf0000000, - 0x80788478, 0xf4211b3a, + 0xbf8cc07f, 0x80fc907c, + 0xbf800000, 0xbe803100, + 0xbe823102, 0xbe843104, + 0xbe863106, 0xbe883108, + 0xbe8a310a, 0xbe8c310c, + 0xbe8e310e, 0xbf06807c, + 0xbf84fff0, 0xb9782a05, + 0x80788178, 0x907c9973, + 0x877c817c, 0xbf06817c, + 0xbf850002, 0x8f788978, + 0xbf820001, 0x8f788a78, + 0xb9721e06, 0x8f728a72, + 0x80787278, 0x8078ff78, + 0x00000200, 0xbef603ff, + 0x01000000, 0xf4211bfa, 0xf0000000, 0x80788478, - 0xf4211b7a, 0xf0000000, - 0x80788478, 0xf4211eba, + 0xf4211b3a, 0xf0000000, + 0x80788478, 0xf4211b7a, 0xf0000000, 0x80788478, - 0xf4211efa, 0xf0000000, - 0x80788478, 0xf4211c3a, + 0xf4211eba, 0xf0000000, + 0x80788478, 0xf4211efa, 0xf0000000, 0x80788478, - 0xf4211c7a, 0xf0000000, - 0x80788478, 0xf4211e7a, + 0xf4211c3a, 0xf0000000, + 0x80788478, 0xf4211c7a, 0xf0000000, 0x80788478, - 0xf4211cfa, 0xf0000000, - 0x80788478, 0xf4211bba, + 0xf4211e7a, 0xf0000000, + 0x80788478, 0xf4211cfa, 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef815, 0xbef2036d, - 0x876dff72, 0x0000ffff, - 0xbefc036f, 0xbefe037a, - 0xbeff037b, 0x876f71ff, - 0x000003ff, 0xb9ef4803, - 0xb9f9f816, 0x876f71ff, - 0xfffff800, 0x906f8b6f, - 0xb9efa2c3, 0xb9f3f801, - 0x876fff72, 0xfc000000, - 0x906f9a6f, 0x8f6f906f, - 0xbef30380, 0x88736f73, - 0x876fff72, 0x02000000, - 0x906f996f, 0x8f6f8f6f, + 0xb9eef814, 0xf4211bba, + 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef815, + 0xbef2036d, 0x876dff72, + 0x0000ffff, 0xbefc036f, + 0xbefe037a, 0xbeff037b, + 0x876f71ff, 0x000003ff, + 0xb9ef4803, 0xb9f9f816, + 0x876f71ff, 0xfffff800, + 0x906f8b6f, 0xb9efa2c3, + 0xb9f3f801, 0x876fff72, + 0xfc000000, 0x906f9a6f, + 0x8f6f906f, 0xbef30380, 0x88736f73, 0x876fff72, - 0x01000000, 0x906f986f, - 0x8f6f996f, 0x88736f73, - 0x876fff70, 0x00800000, - 0x906f976f, 0xb9f3f807, - 0x87fe7e7e, 0x87ea6a6a, - 0xb9f0f802, 0xbf8a0000, - 0xbe80226c, 0xbf810000, + 0x02000000, 0x906f996f, + 0x8f6f8f6f, 0x88736f73, + 0x876fff72, 0x01000000, + 0x906f986f, 0x8f6f996f, + 0x88736f73, 0x876fff70, + 0x00800000, 0x906f976f, + 0xb9f3f807, 0x87fe7e7e, + 0x87ea6a6a, 0xb9f0f802, + 0xbf8a0000, 0xbe80226c, + 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, - 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index fafdfd2ac610..35986219ce5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -87,7 +87,7 @@ var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x02000000 var S_SAVE_PC_HI_REPLAY_W64H_SHIFT = 24 var S_SAVE_PC_HI_REPLAY_W64H_MASK = 0x01000000 -var s_sgpr_save_num = 106 +var s_sgpr_save_num = 108 var s_save_spi_init_lo = exec_lo var s_save_spi_init_hi = exec_hi @@ -357,13 +357,14 @@ L_SAVE_SGPR_LOOP: s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0 s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete? - //save the rest 10 SGPR + //save the rest 12 SGPR s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0] s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0] s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0] s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0] s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0] - write_10sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) + s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0] + write_12sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) // restore s_save_buf_rsrc0,1 s_mov_b32 s_save_buf_rsrc0, s_save_xnack_mask @@ -766,26 +767,25 @@ L_RESTORE_SGPR: get_svgpr_size_bytes(s_restore_tmp) s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes() - s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 22*4 //s106~s127 is not saved - s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 2*4 // restore SGPR from S[n] to S[0], by 2 sgprs group + s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes s_mov_b32 m0, s_sgpr_save_num - read_2sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) - + read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) s_waitcnt lgkmcnt(0) - s_sub_u32 m0, m0, 2 // Restore from S[n] to S[0] + s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104] s_nop 0 // hazard SALU M0=> S_MOVREL s_movreld_b64 s0, s0 //s[0+m0] = s0 + s_movreld_b64 s2, s2 read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) s_waitcnt lgkmcnt(0) - s_sub_u32 m0, m0, 8 // Restore from S[n] to S[0] + s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96] s_nop 0 // hazard SALU M0=> S_MOVREL s_movreld_b64 s0, s0 //s[0+m0] = s0 @@ -903,11 +903,11 @@ function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset) s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 end -function write_10sgpr_to_mem(s, s_rsrc, s_mem_offset) +function write_12sgpr_to_mem(s, s_rsrc, s_mem_offset) s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1 s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1 - s_buffer_store_dwordx2 s[8], s_rsrc, 32 glc:1 - s_add_u32 s_rsrc[0], s_rsrc[0], 4*16 + s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1 + s_add_u32 s_rsrc[0], s_rsrc[0], 4*12 s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 end @@ -918,18 +918,18 @@ function read_hwreg_from_mem(s, s_rsrc, s_mem_offset) end function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset) - s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1 s_sub_u32 s_mem_offset, s_mem_offset, 4*16 + s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1 end function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_sub_u32 s_mem_offset, s_mem_offset, 4*8 s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset glc:1 - s_sub_u32 s_mem_offset, s_mem_offset, 4*16 end -function read_2sgpr_from_mem(s, s_rsrc, s_mem_offset) - s_buffer_load_dwordx2 s, s_rsrc, s_mem_offset glc:1 - s_sub_u32 s_mem_offset, s_mem_offset, 4*8 +function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_sub_u32 s_mem_offset, s_mem_offset, 4*4 + s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset glc:1 end -- cgit v1.2.3 From 4c35e77865a9037c32b0354663d23c33b08ae188 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 10 Jul 2019 09:29:57 +0800 Subject: drm/amd/powerplay: add smcdpminfo table v4_6 support New smcdpminfo table used in arcturus. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 86 ++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 24cfe84d7322..e88541d67aa0 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1789,6 +1789,92 @@ struct atom_smc_dpm_info_v4_5 }; +struct atom_smc_dpm_info_v4_6 +{ + struct atom_common_table_header table_header; + // section: board parameters + uint32_t i2c_padding[3]; // old i2c control are moved to new area + + uint16_t maxvoltagestepgfx; // in mv(q2) max voltage step that smu will request. multiple steps are taken if voltage change exceeds this value. + uint16_t maxvoltagestepsoc; // in mv(q2) max voltage step that smu will request. multiple steps are taken if voltage change exceeds this value. + + uint8_t vddgfxvrmapping; // use vr_mapping* bitfields + uint8_t vddsocvrmapping; // use vr_mapping* bitfields + uint8_t vddmemvrmapping; // use vr_mapping* bitfields + uint8_t boardvrmapping; // use vr_mapping* bitfields + + uint8_t gfxulvphasesheddingmask; // set this to 1 to set psi0/1 to 1 in ulv mode + uint8_t externalsensorpresent; // external rdi connected to tmon (aka temp in) + uint8_t padding8_v[2]; + + // telemetry settings + uint16_t gfxmaxcurrent; // in amps + uint8_t gfxoffset; // in amps + uint8_t padding_telemetrygfx; + + uint16_t socmaxcurrent; // in amps + uint8_t socoffset; // in amps + uint8_t padding_telemetrysoc; + + uint16_t memmaxcurrent; // in amps + uint8_t memoffset; // in amps + uint8_t padding_telemetrymem; + + uint16_t boardmaxcurrent; // in amps + uint8_t boardoffset; // in amps + uint8_t padding_telemetryboardinput; + + // gpio settings + uint8_t vr0hotgpio; // gpio pin configured for vr0 hot event + uint8_t vr0hotpolarity; // gpio polarity for vr0 hot event + uint8_t vr1hotgpio; // gpio pin configured for vr1 hot event + uint8_t vr1hotpolarity; // gpio polarity for vr1 hot event + + // gfxclk pll spread spectrum + uint8_t pllgfxclkspreadenabled; // on or off + uint8_t pllgfxclkspreadpercent; // q4.4 + uint16_t pllgfxclkspreadfreq; // khz + + // uclk spread spectrum + uint8_t uclkspreadenabled; // on or off + uint8_t uclkspreadpercent; // q4.4 + uint16_t uclkspreadfreq; // khz + + // fclk spread spectrum + uint8_t fclkspreadenabled; // on or off + uint8_t fclkspreadpercent; // q4.4 + uint16_t fclkspreadfreq; // khz + + + // gfxclk fll spread spectrum + uint8_t fllgfxclkspreadenabled; // on or off + uint8_t fllgfxclkspreadpercent; // q4.4 + uint16_t fllgfxclkspreadfreq; // khz + + // i2c controller structure + struct smudpm_i2c_controller_config_v2 i2ccontrollers[8]; + + // memory section + uint32_t memorychannelenabled; // for dram use only, max 32 channels enabled bit mask. + + uint8_t drambitwidth; // for dram use only. see dram bit width type defines + uint8_t paddingmem[3]; + + // total board power + uint16_t totalboardpower; //only needed for tcp estimated case, where tcp = tgp+total board power + uint16_t boardpadding; + + // section: xgmi training + uint8_t xgmilinkspeed[4]; + uint8_t xgmilinkwidth[4]; + + uint16_t xgmifclkfreq[4]; + uint16_t xgmisocvoltage[4]; + + // reserved + uint32_t boardreserved[10]; +}; + /* *************************************************************************** Data Table asic_profiling_info structure -- cgit v1.2.3 From 7c8bcaf4085af03365e86f11dba6a8363ef87c07 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 30 Jul 2019 22:50:14 -0500 Subject: drm/amd/powerplay: add SW SMU interface for dumping pptable out (v2) This is especially useful in early bring up phase. v2: disabled by default (Alex) Signed-off-by: Evan Quan Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 ++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 4a2ccc5fae73..fd0485b29429 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1064,6 +1064,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return ret; } + /* smu_dump_pptable(smu); */ + /* * Copy pptable bo in the vram to smc with SMU MSGs such as * SetDriverDramAddr and TransferTableDram2Smu. diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index fcd0db362977..76edb2ccf160 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -449,6 +449,7 @@ struct pptable_funcs { int (*set_default_od_settings)(struct smu_context *smu, bool initialize); int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); + void (*dump_pptable)(struct smu_context *smu); }; struct smu_funcs @@ -742,7 +743,8 @@ struct smu_funcs ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) #define smu_asic_set_performance_level(smu, level) \ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); - +#define smu_dump_pptable(smu) \ + ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0) extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, -- cgit v1.2.3 From 22808306f2b2704be87378bf58e009a61598a5a7 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 12 Jul 2019 16:24:34 +0800 Subject: drm/amd/powerplay: update smu11_driver_if_arcturus.h It guides how driver should interface with SMU in arcturus. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- .../amd/powerplay/inc/smu11_driver_if_arcturus.h | 58 ++++++++++++---------- 1 file changed, 33 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h index 7a9969e075d4..c7a7953b52b7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h @@ -21,16 +21,15 @@ * */ - #ifndef SMU11_DRIVER_IF_ARCTURUS_H #define SMU11_DRIVER_IF_ARCTURUS_H // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU11_DRIVER_IF_VERSION 0x06 +#define SMU11_DRIVER_IF_VERSION 0x08 -#define PPTABLE_ARCTURUS_SMU_VERSION 3 +#define PPTABLE_ARCTURUS_SMU_VERSION 4 #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_VCLK_DPM_LEVELS 8 @@ -40,6 +39,7 @@ #define NUM_UCLK_DPM_LEVELS 4 #define NUM_FCLK_DPM_LEVELS 8 #define NUM_XGMI_LEVELS 2 +#define NUM_XGMI_PSTATE_LEVELS 4 #define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) #define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) @@ -49,6 +49,7 @@ #define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) #define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) #define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1) +#define MAX_XGMI_PSTATE_LEVEL (NUM_XGMI_PSTATE_LEVELS - 1) // Feature Control Defines // DPM @@ -213,8 +214,8 @@ #define WORKLOAD_PPLIB_COUNT 5 //XGMI performance states -#define XGMI_STATE_D0 1 -#define XGMI_STATE_D3 0 +#define XGMI_STATE_D0 1 +#define XGMI_STATE_D3 0 #define NUM_I2C_CONTROLLERS 8 @@ -314,7 +315,6 @@ typedef struct { } SwI2cRequest_t; // SW I2C Request Table //D3HOT sequences -//sequence codes from spec: atlvp4p01.amd.com:1677@//gpu/doc/soc_arch/spec/feature/BACO/Navi/Navi2x/ typedef enum { BACO_SEQUENCE, MSR_SEQUENCE, @@ -368,6 +368,12 @@ typedef enum { PPCLK_COUNT, } PPCLK_e; +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + typedef enum { TEMP_EDGE, TEMP_HOTSPOT, @@ -568,14 +574,9 @@ typedef struct { uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 - uint16_t SsFmin[10]; // PPtable value to function similar to VFTFmin for SS Curve; Size is PPCLK_COUNT rounded to nearest multiple of 2 - // SECTION: XGMI - uint8_t XgmiLinkSpeed [NUM_XGMI_LEVELS]; - uint8_t XgmiLinkWidth [NUM_XGMI_LEVELS]; - - uint16_t XgmiFclkFreq [NUM_XGMI_LEVELS]; - uint16_t XgmiSocVoltage [NUM_XGMI_LEVELS]; + uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. + uint8_t XgmiDpmSpare[2]; // Temperature Dependent Vmin uint16_t VDDGFX_TVmin; //Celcius @@ -683,6 +684,13 @@ typedef struct { uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power uint16_t BoardPadding; + // SECTION: XGMI Training + uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; + + uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; + uint32_t BoardReserved[10]; // Padding for MMHUB - do not modify this @@ -698,7 +706,7 @@ typedef struct { uint16_t GfxActivityLpfTau; uint16_t UclkActivityLpfTau; - uint16_t Padding; + uint16_t SocketPowerLpfTau; // Padding - ignore uint32_t MmHubPadding[8]; // SMU internal use @@ -715,7 +723,7 @@ typedef struct { uint8_t CurrGfxVoltageOffset ; uint8_t CurrMemVidOffset ; uint8_t Padding8 ; - uint16_t CurrSocketPower ; + uint16_t AverageSocketPower ; uint16_t TemperatureEdge ; uint16_t TemperatureHotspot ; uint16_t TemperatureHBM ; @@ -724,23 +732,23 @@ typedef struct { uint16_t TemperatureVrMem ; uint32_t ThrottlerStatus ; + uint16_t CurrFanSpeed ; + uint16_t Padding16; + + uint32_t Padding[4]; + // Padding - ignore uint32_t MmHubPadding[7]; // SMU internal use } SmuMetrics_t; typedef struct { - uint16_t avgPsmCount[45]; - uint16_t minPsmCount[45]; - float avgPsmVoltage[45]; - float minPsmVoltage[45]; - - uint16_t avgScsPsmCount; - uint16_t minScsPsmCount; - float avgScsPsmVoltage; - float minScsPsmVoltage; + uint16_t avgPsmCount[75]; + uint16_t minPsmCount[75]; + float avgPsmVoltage[75]; + float minPsmVoltage[75]; - uint32_t MmHubPadding[6]; // SMU internal use + uint32_t MmHubPadding[3]; // SMU internal use } AvfsDebugTable_t; typedef struct { -- cgit v1.2.3 From c8893d5ce716459db532c72b9814b355abe23ac1 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 12 Jul 2019 16:28:02 +0800 Subject: drm/amd/powerplay: update arcturus_ppsmc.h Correct header and fix typo. Signed-off-by: Evan Quan Reviewed-by: Le Ma Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h index b86bb2bc8a31..78e5927b7711 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h @@ -1,5 +1,5 @@ /* - * Copyright 2018 Advanced Micro Devices, Inc. + * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -83,7 +83,7 @@ //Power Gating #define PPSMC_MSG_PowerUpVcn0 0x28 -#define PPSMC_MSG_PowerDownVcn01 0x29 +#define PPSMC_MSG_PowerDownVcn0 0x29 #define PPSMC_MSG_PowerUpVcn1 0x2A #define PPSMC_MSG_PowerDownVcn1 0x2B -- cgit v1.2.3 From a94235af11b3f7d33eed56c7aef1864ea6e16db4 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Jul 2019 12:43:28 -0500 Subject: drm/amd/powerplay: update arcturus_ppt.c/h V3 Arcturus ASIC specific powerplay interfaces. V2: correct SMU msg naming drop unnecessary debugs V3: rebase (Alex) Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 1193 ++++++++++++++++++++++++- drivers/gpu/drm/amd/powerplay/arcturus_ppt.h | 44 + drivers/gpu/drm/amd/powerplay/inc/smu_types.h | 2 +- 3 files changed, 1237 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 7d680f33ce3c..b284ebcbe545 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -33,12 +33,22 @@ #include "atom.h" #include "power_state.h" #include "arcturus_ppt.h" +#include "smu_v11_0_pptable.h" #include "arcturus_ppsmc.h" #include "nbio/nbio_7_4_sh_mask.h" +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_HBM 5 + #define MSG_MAP(msg, index) \ [SMU_MSG_##msg] = {1, (index)} +#define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF +#define SMU_FEATURES_LOW_SHIFT 0 +#define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 +#define SMU_FEATURES_HIGH_SHIFT 32 + static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion), @@ -80,7 +90,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit), MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0), - MSG_MAP(PowerDownVcn01, PPSMC_MSG_PowerDownVcn01), + MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0), MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1), MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload), @@ -99,6 +109,65 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable), }; +static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(SCLK, PPCLK_GFXCLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_FCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(DCLK, PPCLK_DCLK), + CLK_MAP(VCLK, PPCLK_VCLK), +}; + +static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_COUNT] = { + FEA_MAP(DPM_PREFETCHER), + FEA_MAP(DPM_GFXCLK), + FEA_MAP(DPM_UCLK), + FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_MP0CLK), + FEA_MAP(DS_GFXCLK), + FEA_MAP(DS_SOCCLK), + FEA_MAP(DS_LCLK), + FEA_MAP(DS_UCLK), + FEA_MAP(GFX_ULV), + FEA_MAP(RSMU_SMN_CG), + FEA_MAP(PPT), + FEA_MAP(TDC), + FEA_MAP(APCC_PLUS), + FEA_MAP(VR0HOT), + FEA_MAP(VR1HOT), + FEA_MAP(FW_CTF), + FEA_MAP(FAN_CONTROL), + FEA_MAP(THERMAL), + FEA_MAP(OUT_OF_BAND_MONITOR), + FEA_MAP(TEMP_DEPENDENT_VMIN), +}; + +static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(AVFS), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(AVFS_FUSE_OVERRIDE), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(OVERDRIVE), +}; + +static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { + PWR_MAP(AC), + PWR_MAP(DC), +}; + +static struct smu_11_0_cmn2aisc_mapping arcturus_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), +}; + static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index) { struct smu_11_0_cmn2aisc_mapping mapping; @@ -115,12 +184,1134 @@ static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index) return mapping.map_to; } +static int arcturus_get_smu_clk_index(struct smu_context *smc, uint32_t index) +{ + struct smu_11_0_cmn2aisc_mapping mapping; + + if (index >= SMU_CLK_COUNT) + return -EINVAL; + + mapping = arcturus_clk_map[index]; + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU clk: %d\n", index); + return -EINVAL; + } + + return mapping.map_to; +} + +static int arcturus_get_smu_feature_index(struct smu_context *smc, uint32_t index) +{ + struct smu_11_0_cmn2aisc_mapping mapping; + + if (index >= SMU_FEATURE_COUNT) + return -EINVAL; + + mapping = arcturus_feature_mask_map[index]; + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU feature: %d\n", index); + return -EINVAL; + } + + return mapping.map_to; +} + +static int arcturus_get_smu_table_index(struct smu_context *smc, uint32_t index) +{ + struct smu_11_0_cmn2aisc_mapping mapping; + + if (index >= SMU_TABLE_COUNT) + return -EINVAL; + + mapping = arcturus_table_map[index]; + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU table: %d\n", index); + return -EINVAL; + } + + return mapping.map_to; +} + +static int arcturus_get_pwr_src_index(struct smu_context *smc, uint32_t index) +{ + struct smu_11_0_cmn2aisc_mapping mapping; + + if (index >= SMU_POWER_SOURCE_COUNT) + return -EINVAL; + + mapping = arcturus_pwr_src_map[index]; + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU power source: %d\n", index); + return -EINVAL; + } + + return mapping.map_to; +} + + +static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile) +{ + struct smu_11_0_cmn2aisc_mapping mapping; + + if (profile > PP_SMC_POWER_PROFILE_CUSTOM) + return -EINVAL; + + mapping = arcturus_workload_map[profile]; + if (!(mapping.valid_mapping)) { + pr_warn("Unsupported SMU power source: %d\n", profile); + return -EINVAL; + } + + return mapping.map_to; +} + +static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables) +{ + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + return 0; +} + +static int arcturus_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + if (smu_dpm->dpm_context) + return -EINVAL; + + smu_dpm->dpm_context = kzalloc(sizeof(struct arcturus_dpm_table), + GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + + if (smu_dpm->golden_dpm_context) + return -EINVAL; + + smu_dpm->golden_dpm_context = kzalloc(sizeof(struct arcturus_dpm_table), + GFP_KERNEL); + if (!smu_dpm->golden_dpm_context) + return -ENOMEM; + + smu_dpm->dpm_context_size = sizeof(struct arcturus_dpm_table); + + smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state), + GFP_KERNEL); + if (!smu_dpm->dpm_current_power_state) + return -ENOMEM; + + smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state), + GFP_KERNEL); + if (!smu_dpm->dpm_request_power_state) + return -ENOMEM; + + return 0; +} + +#define FEATURE_MASK(feature) (1ULL << feature) +static int +arcturus_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + if (num > 2) + return -EINVAL; + + memset(feature_mask, 0, sizeof(uint32_t) * num); + + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) + | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + + return 0; +} + +static int +arcturus_set_single_dpm_table(struct smu_context *smu, + struct arcturus_single_dpm_table *single_dpm_table, + PPCLK_e clk_id) +{ + int ret = 0; + uint32_t i, num_of_levels = 0, clk; + + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_GetDpmFreqByIndex, + (clk_id << 16 | 0xFF)); + if (ret) { + pr_err("[%s] failed to get dpm levels!\n", __func__); + return ret; + } + + smu_read_smc_arg(smu, &num_of_levels); + if (!num_of_levels) { + pr_err("[%s] number of clk levels is invalid!\n", __func__); + return -EINVAL; + } + + single_dpm_table->count = num_of_levels; + for (i = 0; i < num_of_levels; i++) { + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_GetDpmFreqByIndex, + (clk_id << 16 | i)); + if (ret) { + pr_err("[%s] failed to get dpm freq by index!\n", __func__); + return ret; + } + smu_read_smc_arg(smu, &clk); + if (!clk) { + pr_err("[%s] clk value is invalid!\n", __func__); + return -EINVAL; + } + single_dpm_table->dpm_levels[i].value = clk; + single_dpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static void arcturus_init_single_dpm_state(struct arcturus_dpm_state *dpm_state) +{ + dpm_state->soft_min_level = 0x0; + dpm_state->soft_max_level = 0xffff; + dpm_state->hard_min_level = 0x0; + dpm_state->hard_max_level = 0xffff; +} + +static int arcturus_set_default_dpm_table(struct smu_context *smu) +{ + int ret; + + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct arcturus_dpm_table *dpm_table = NULL; + struct arcturus_single_dpm_table *single_dpm_table; + + dpm_table = smu_dpm->dpm_context; + + /* socclk */ + single_dpm_table = &(dpm_table->soc_table); + if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) { + ret = arcturus_set_single_dpm_table(smu, single_dpm_table, + PPCLK_SOCCLK); + if (ret) { + pr_err("[%s] failed to get socclk dpm levels!\n", __func__); + return ret; + } + } else { + single_dpm_table->count = 1; + single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; + } + arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state)); + + /* gfxclk */ + single_dpm_table = &(dpm_table->gfx_table); + if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) { + ret = arcturus_set_single_dpm_table(smu, single_dpm_table, + PPCLK_GFXCLK); + if (ret) { + pr_err("[SetupDefaultDpmTable] failed to get gfxclk dpm levels!"); + return ret; + } + } else { + single_dpm_table->count = 1; + single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; + } + arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state)); + + /* memclk */ + single_dpm_table = &(dpm_table->mem_table); + if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { + ret = arcturus_set_single_dpm_table(smu, single_dpm_table, + PPCLK_UCLK); + if (ret) { + pr_err("[SetupDefaultDpmTable] failed to get memclk dpm levels!"); + return ret; + } + } else { + single_dpm_table->count = 1; + single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; + } + arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state)); + + /* fclk */ + single_dpm_table = &(dpm_table->fclk_table); + if (smu_feature_is_enabled(smu,FEATURE_DPM_FCLK_BIT)) { + ret = arcturus_set_single_dpm_table(smu, single_dpm_table, + PPCLK_FCLK); + if (ret) { + pr_err("[SetupDefaultDpmTable] failed to get fclk dpm levels!"); + return ret; + } + } else { + single_dpm_table->count = 0; + } + arcturus_init_single_dpm_state(&(single_dpm_table->dpm_state)); + + memcpy(smu_dpm->golden_dpm_context, dpm_table, + sizeof(struct arcturus_dpm_table)); + + return 0; +} + +static int arcturus_check_powerplay_table(struct smu_context *smu) +{ + return 0; +} + +static int arcturus_store_powerplay_table(struct smu_context *smu) +{ + struct smu_11_0_powerplay_table *powerplay_table = NULL; + struct smu_table_context *table_context = &smu->smu_table; + int ret = 0; + + if (!table_context->power_play_table) + return -EINVAL; + + powerplay_table = table_context->power_play_table; + + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, + sizeof(PPTable_t)); + + table_context->thermal_controller_type = powerplay_table->thermal_controller_type; + + return ret; +} + +static int arcturus_append_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + struct atom_smc_dpm_info_v4_6 *smc_dpm_table; + int index, ret; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smc_dpm_info); + + ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL, + (uint8_t **)&smc_dpm_table); + if (ret) + return ret; + + pr_info("smc_dpm_info table revision(format.content): %d.%d\n", + smc_dpm_table->table_header.format_revision, + smc_dpm_table->table_header.content_revision); + + if ((smc_dpm_table->table_header.format_revision == 4) && + (smc_dpm_table->table_header.content_revision == 6)) + memcpy(&smc_pptable->MaxVoltageStepGfx, + &smc_dpm_table->maxvoltagestepgfx, + sizeof(*smc_dpm_table) - offsetof(struct atom_smc_dpm_info_v4_6, maxvoltagestepgfx)); + + return 0; +} + +static int arcturus_run_btc_afll(struct smu_context *smu) +{ + return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); +} + +static int arcturus_populate_umd_state_clk(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct arcturus_dpm_table *dpm_table = NULL; + struct arcturus_single_dpm_table *gfx_table = NULL; + struct arcturus_single_dpm_table *mem_table = NULL; + + dpm_table = smu_dpm->dpm_context; + gfx_table = &(dpm_table->gfx_table); + mem_table = &(dpm_table->mem_table); + + smu->pstate_sclk = gfx_table->dpm_levels[0].value; + smu->pstate_mclk = mem_table->dpm_levels[0].value; + + if (gfx_table->count > ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL && + mem_table->count > ARCTURUS_UMD_PSTATE_MCLK_LEVEL) { + smu->pstate_sclk = gfx_table->dpm_levels[ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL].value; + smu->pstate_mclk = mem_table->dpm_levels[ARCTURUS_UMD_PSTATE_MCLK_LEVEL].value; + } + + smu->pstate_sclk = smu->pstate_sclk * 100; + smu->pstate_mclk = smu->pstate_mclk * 100; + + return 0; +} + +static int arcturus_get_clk_table(struct smu_context *smu, + struct pp_clock_levels_with_latency *clocks, + struct arcturus_single_dpm_table *dpm_table) +{ + int i, count; + + count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; + clocks->num_levels = count; + + for (i = 0; i < count; i++) { + clocks->data[i].clocks_in_khz = + dpm_table->dpm_levels[i].value * 1000; + clocks->data[i].latency_in_us = 0; + } + + return 0; +} + +static int arcturus_print_clk_levels(struct smu_context *smu, + enum smu_clk_type type, char *buf) +{ + int i, now, size = 0; + int ret = 0; + struct pp_clock_levels_with_latency clocks; + struct arcturus_single_dpm_table *single_dpm_table; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct arcturus_dpm_table *dpm_table = NULL; + + dpm_table = smu_dpm->dpm_context; + + switch (type) { + case SMU_SCLK: + ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, &now); + if (ret) { + pr_err("Attempt to get current gfx clk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_table->gfx_table); + ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + pr_err("Attempt to get gfx clk levels Failed!"); + return ret; + } + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", i, + clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz == now * 10) + ? "*" : ""); + break; + + case SMU_MCLK: + ret = smu_get_current_clk_freq(smu, SMU_UCLK, &now); + if (ret) { + pr_err("Attempt to get current mclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_table->mem_table); + ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + pr_err("Attempt to get memory clk levels Failed!"); + return ret; + } + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz == now * 10) + ? "*" : ""); + break; + + case SMU_SOCCLK: + ret = smu_get_current_clk_freq(smu, SMU_SOCCLK, &now); + if (ret) { + pr_err("Attempt to get current socclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_table->soc_table); + ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + pr_err("Attempt to get socclk levels Failed!"); + return ret; + } + + for (i = 0; i < clocks.num_levels; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, clocks.data[i].clocks_in_khz / 1000, + (clocks.data[i].clocks_in_khz == now * 10) + ? "*" : ""); + break; + + case SMU_FCLK: + ret = smu_get_current_clk_freq(smu, SMU_FCLK, &now); + if (ret) { + pr_err("Attempt to get current fclk Failed!"); + return ret; + } + + single_dpm_table = &(dpm_table->fclk_table); + for (i = 0; i < single_dpm_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, single_dpm_table->dpm_levels[i].value, + (single_dpm_table->dpm_levels[i].value == now / 100) + ? "*" : ""); + break; + + default: + break; + } + + return size; +} + +static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, + uint32_t feature_mask) +{ + struct arcturus_dpm_table *dpm_table; + struct arcturus_single_dpm_table *single_dpm_table; + uint32_t freq; + int ret = 0; + + dpm_table = smu->smu_dpm.dpm_context; + if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT) && + (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { + single_dpm_table = &(dpm_table->gfx_table); + freq = max ? single_dpm_table->dpm_state.soft_max_level : + single_dpm_table->dpm_state.soft_min_level; + ret = smu_send_smc_msg_with_param(smu, + (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), + (PPCLK_GFXCLK << 16) | (freq & 0xffff)); + if (ret) { + pr_err("Failed to set soft %s gfxclk !\n", + max ? "max" : "min"); + return ret; + } + } + + return ret; +} + +static int arcturus_force_clk_levels(struct smu_context *smu, + enum smu_clk_type type, uint32_t mask) +{ + struct arcturus_dpm_table *dpm_table; + struct arcturus_single_dpm_table *single_dpm_table; + uint32_t soft_min_level, soft_max_level; + int ret = 0; + + mutex_lock(&(smu->mutex)); + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + dpm_table = smu->smu_dpm.dpm_context; + + switch (type) { + case SMU_SCLK: + single_dpm_table = &(dpm_table->gfx_table); + + if (soft_max_level >= single_dpm_table->count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, single_dpm_table->count - 1); + ret = -EINVAL; + break; + } + + single_dpm_table->dpm_state.soft_min_level = + single_dpm_table->dpm_levels[soft_min_level].value; + single_dpm_table->dpm_state.soft_max_level = + single_dpm_table->dpm_levels[soft_max_level].value; + + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK); + if (ret) { + pr_err("Failed to upload boot level to lowest!\n"); + break; + } + + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK); + if (ret) + pr_err("Failed to upload dpm max level to highest!\n"); + + break; + + case SMU_MCLK: + single_dpm_table = &(dpm_table->mem_table); + + if (soft_max_level >= single_dpm_table->count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, single_dpm_table->count - 1); + ret = -EINVAL; + break; + } + + single_dpm_table->dpm_state.soft_min_level = + single_dpm_table->dpm_levels[soft_min_level].value; + single_dpm_table->dpm_state.soft_max_level = + single_dpm_table->dpm_levels[soft_max_level].value; + + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK); + if (ret) { + pr_err("Failed to upload boot level to lowest!\n"); + break; + } + + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK); + if (ret) + pr_err("Failed to upload dpm max level to highest!\n"); + + break; + + case SMU_SOCCLK: + single_dpm_table = &(dpm_table->soc_table); + + if (soft_max_level >= single_dpm_table->count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, single_dpm_table->count - 1); + ret = -EINVAL; + break; + } + + single_dpm_table->dpm_state.soft_min_level = + single_dpm_table->dpm_levels[soft_min_level].value; + single_dpm_table->dpm_state.soft_max_level = + single_dpm_table->dpm_levels[soft_max_level].value; + + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK); + if (ret) { + pr_err("Failed to upload boot level to lowest!\n"); + break; + } + + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK); + if (ret) + pr_err("Failed to upload dpm max level to highest!\n"); + + break; + + case SMU_FCLK: + single_dpm_table = &(dpm_table->fclk_table); + + if (soft_max_level >= single_dpm_table->count) { + pr_err("Clock level specified %d is over max allowed %d\n", + soft_max_level, single_dpm_table->count - 1); + ret = -EINVAL; + break; + } + + single_dpm_table->dpm_state.soft_min_level = + single_dpm_table->dpm_levels[soft_min_level].value; + single_dpm_table->dpm_state.soft_max_level = + single_dpm_table->dpm_levels[soft_max_level].value; + + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK); + if (ret) { + pr_err("Failed to upload boot level to lowest!\n"); + break; + } + + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK); + if (ret) + pr_err("Failed to upload dpm max level to highest!\n"); + + break; + + default: + break; + } + + mutex_unlock(&(smu->mutex)); + return ret; +} + +static const struct smu_temperature_range arcturus_thermal_policy[] = +{ + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + +static int arcturus_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + + PPTable_t *pptable = smu->smu_table.driver_pptable; + + if (!range) + return -EINVAL; + + memcpy(range, &arcturus_thermal_policy[0], sizeof(struct smu_temperature_range)); + + range->max = pptable->TedgeLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_crit_max = pptable->ThotspotLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_crit_max = pptable->TmemLimit * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + range->mem_emergency_max = (pptable->TmemLimit + CTF_OFFSET_HBM)* + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + + + return 0; +} + +static void arcturus_dump_pptable(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + int i; + + pr_info("Dumped PPTable:\n"); + + pr_info("Version = 0x%08x\n", pptable->Version); + + pr_info("FeaturesToRun[0] = 0x%08x\n", pptable->FeaturesToRun[0]); + pr_info("FeaturesToRun[1] = 0x%08x\n", pptable->FeaturesToRun[1]); + + for (i = 0; i < PPT_THROTTLER_COUNT; i++) { + pr_info("SocketPowerLimitAc[%d] = %d\n", i, pptable->SocketPowerLimitAc[i]); + pr_info("SocketPowerLimitAcTau[%d] = %d\n", i, pptable->SocketPowerLimitAcTau[i]); + } + + pr_info("TdcLimitSoc = %d\n", pptable->TdcLimitSoc); + pr_info("TdcLimitSocTau = %d\n", pptable->TdcLimitSocTau); + pr_info("TdcLimitGfx = %d\n", pptable->TdcLimitGfx); + pr_info("TdcLimitGfxTau = %d\n", pptable->TdcLimitGfxTau); + + pr_info("TedgeLimit = %d\n", pptable->TedgeLimit); + pr_info("ThotspotLimit = %d\n", pptable->ThotspotLimit); + pr_info("TmemLimit = %d\n", pptable->TmemLimit); + pr_info("Tvr_gfxLimit = %d\n", pptable->Tvr_gfxLimit); + pr_info("Tvr_memLimit = %d\n", pptable->Tvr_memLimit); + pr_info("Tvr_socLimit = %d\n", pptable->Tvr_socLimit); + pr_info("FitLimit = %d\n", pptable->FitLimit); + + pr_info("PpmPowerLimit = %d\n", pptable->PpmPowerLimit); + pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); + + pr_info("ThrottlerControlMask = %d\n", pptable->ThrottlerControlMask); + + pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); + pr_info("UlvPadding = 0x%08x\n", pptable->UlvPadding); + + pr_info("UlvGfxclkBypass = %d\n", pptable->UlvGfxclkBypass); + pr_info("Padding234[0] = 0x%02x\n", pptable->Padding234[0]); + pr_info("Padding234[1] = 0x%02x\n", pptable->Padding234[1]); + pr_info("Padding234[2] = 0x%02x\n", pptable->Padding234[2]); + + pr_info("MinVoltageGfx = %d\n", pptable->MinVoltageGfx); + pr_info("MinVoltageSoc = %d\n", pptable->MinVoltageSoc); + pr_info("MaxVoltageGfx = %d\n", pptable->MaxVoltageGfx); + pr_info("MaxVoltageSoc = %d\n", pptable->MaxVoltageSoc); + + pr_info("LoadLineResistanceGfx = %d\n", pptable->LoadLineResistanceGfx); + pr_info("LoadLineResistanceSoc = %d\n", pptable->LoadLineResistanceSoc); + + pr_info("[PPCLK_GFXCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_GFXCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_GFXCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_GFXCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_GFXCLK].padding, + pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_GFXCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_GFXCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_GFXCLK].Padding16); + + pr_info("[PPCLK_VCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_VCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_VCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_VCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_VCLK].padding, + pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_VCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_VCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_VCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_VCLK].Padding16); + + pr_info("[PPCLK_DCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_DCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_DCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_DCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_DCLK].padding, + pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_DCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_DCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_DCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_DCLK].Padding16); + + pr_info("[PPCLK_SOCCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_SOCCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_SOCCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_SOCCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_SOCCLK].padding, + pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_SOCCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_SOCCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_SOCCLK].Padding16); + + pr_info("[PPCLK_UCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_UCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_UCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_UCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_UCLK].padding, + pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_UCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_UCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_UCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_UCLK].Padding16); + + pr_info("[PPCLK_FCLK]\n" + " .VoltageMode = 0x%02x\n" + " .SnapToDiscrete = 0x%02x\n" + " .NumDiscreteLevels = 0x%02x\n" + " .padding = 0x%02x\n" + " .ConversionToAvfsClk{m = 0x%08x b = 0x%08x}\n" + " .SsCurve {a = 0x%08x b = 0x%08x c = 0x%08x}\n" + " .SsFmin = 0x%04x\n" + " .Padding_16 = 0x%04x\n", + pptable->DpmDescriptor[PPCLK_FCLK].VoltageMode, + pptable->DpmDescriptor[PPCLK_FCLK].SnapToDiscrete, + pptable->DpmDescriptor[PPCLK_FCLK].NumDiscreteLevels, + pptable->DpmDescriptor[PPCLK_FCLK].padding, + pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.m, + pptable->DpmDescriptor[PPCLK_FCLK].ConversionToAvfsClk.b, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.a, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.b, + pptable->DpmDescriptor[PPCLK_FCLK].SsCurve.c, + pptable->DpmDescriptor[PPCLK_FCLK].SsFmin, + pptable->DpmDescriptor[PPCLK_FCLK].Padding16); + + + pr_info("FreqTableGfx\n"); + for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableGfx[i]); + + pr_info("FreqTableVclk\n"); + for (i = 0; i < NUM_VCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableVclk[i]); + + pr_info("FreqTableDclk\n"); + for (i = 0; i < NUM_DCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableDclk[i]); + + pr_info("FreqTableSocclk\n"); + for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableSocclk[i]); + + pr_info("FreqTableUclk\n"); + for (i = 0; i < NUM_UCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableUclk[i]); + + pr_info("FreqTableFclk\n"); + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) + pr_info(" .[%02d] = %d\n", i, pptable->FreqTableFclk[i]); + + pr_info("Mp0clkFreq\n"); + for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->Mp0clkFreq[i]); + + pr_info("Mp0DpmVoltage\n"); + for (i = 0; i < NUM_MP0CLK_DPM_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->Mp0DpmVoltage[i]); + + pr_info("GfxclkFidle = 0x%x\n", pptable->GfxclkFidle); + pr_info("GfxclkSlewRate = 0x%x\n", pptable->GfxclkSlewRate); + pr_info("Padding567[0] = 0x%x\n", pptable->Padding567[0]); + pr_info("Padding567[1] = 0x%x\n", pptable->Padding567[1]); + pr_info("Padding567[2] = 0x%x\n", pptable->Padding567[2]); + pr_info("Padding567[3] = 0x%x\n", pptable->Padding567[3]); + pr_info("GfxclkDsMaxFreq = %d\n", pptable->GfxclkDsMaxFreq); + pr_info("GfxclkSource = 0x%x\n", pptable->GfxclkSource); + pr_info("Padding456 = 0x%x\n", pptable->Padding456); + + pr_info("EnableTdpm = %d\n", pptable->EnableTdpm); + pr_info("TdpmHighHystTemperature = %d\n", pptable->TdpmHighHystTemperature); + pr_info("TdpmLowHystTemperature = %d\n", pptable->TdpmLowHystTemperature); + pr_info("GfxclkFreqHighTempLimit = %d\n", pptable->GfxclkFreqHighTempLimit); + + pr_info("FanStopTemp = %d\n", pptable->FanStopTemp); + pr_info("FanStartTemp = %d\n", pptable->FanStartTemp); + + pr_info("FanGainEdge = %d\n", pptable->FanGainEdge); + pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot); + pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx); + pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc); + pr_info("FanGainVrMem = %d\n", pptable->FanGainVrMem); + pr_info("FanGainHbm = %d\n", pptable->FanGainHbm); + + pr_info("FanPwmMin = %d\n", pptable->FanPwmMin); + pr_info("FanAcousticLimitRpm = %d\n", pptable->FanAcousticLimitRpm); + pr_info("FanThrottlingRpm = %d\n", pptable->FanThrottlingRpm); + pr_info("FanMaximumRpm = %d\n", pptable->FanMaximumRpm); + pr_info("FanTargetTemperature = %d\n", pptable->FanTargetTemperature); + pr_info("FanTargetGfxclk = %d\n", pptable->FanTargetGfxclk); + pr_info("FanZeroRpmEnable = %d\n", pptable->FanZeroRpmEnable); + pr_info("FanTachEdgePerRev = %d\n", pptable->FanTachEdgePerRev); + pr_info("FanTempInputSelect = %d\n", pptable->FanTempInputSelect); + + pr_info("FuzzyFan_ErrorSetDelta = %d\n", pptable->FuzzyFan_ErrorSetDelta); + pr_info("FuzzyFan_ErrorRateSetDelta = %d\n", pptable->FuzzyFan_ErrorRateSetDelta); + pr_info("FuzzyFan_PwmSetDelta = %d\n", pptable->FuzzyFan_PwmSetDelta); + pr_info("FuzzyFan_Reserved = %d\n", pptable->FuzzyFan_Reserved); + + pr_info("OverrideAvfsGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_GFX]); + pr_info("OverrideAvfsGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->OverrideAvfsGb[AVFS_VOLTAGE_SOC]); + pr_info("Padding8_Avfs[0] = %d\n", pptable->Padding8_Avfs[0]); + pr_info("Padding8_Avfs[1] = %d\n", pptable->Padding8_Avfs[1]); + + pr_info("dBtcGbGfxPll{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbGfxPll.a, + pptable->dBtcGbGfxPll.b, + pptable->dBtcGbGfxPll.c); + pr_info("dBtcGbGfxAfll{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbGfxAfll.a, + pptable->dBtcGbGfxAfll.b, + pptable->dBtcGbGfxAfll.c); + pr_info("dBtcGbSoc{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->dBtcGbSoc.a, + pptable->dBtcGbSoc.b, + pptable->dBtcGbSoc.c); + + pr_info("qAgingGb[AVFS_VOLTAGE_GFX]{m = 0x%x b = 0x%x}\n", + pptable->qAgingGb[AVFS_VOLTAGE_GFX].m, + pptable->qAgingGb[AVFS_VOLTAGE_GFX].b); + pr_info("qAgingGb[AVFS_VOLTAGE_SOC]{m = 0x%x b = 0x%x}\n", + pptable->qAgingGb[AVFS_VOLTAGE_SOC].m, + pptable->qAgingGb[AVFS_VOLTAGE_SOC].b); + + pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_GFX]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].a, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].b, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_GFX].c); + pr_info("qStaticVoltageOffset[AVFS_VOLTAGE_SOC]{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].a, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].b, + pptable->qStaticVoltageOffset[AVFS_VOLTAGE_SOC].c); + + pr_info("DcTol[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_GFX]); + pr_info("DcTol[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcTol[AVFS_VOLTAGE_SOC]); + + pr_info("DcBtcEnabled[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcEnabled[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcEnabled[AVFS_VOLTAGE_SOC]); + pr_info("Padding8_GfxBtc[0] = 0x%x\n", pptable->Padding8_GfxBtc[0]); + pr_info("Padding8_GfxBtc[1] = 0x%x\n", pptable->Padding8_GfxBtc[1]); + + pr_info("DcBtcMin[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcMin[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMin[AVFS_VOLTAGE_SOC]); + pr_info("DcBtcMax[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcMax[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcMax[AVFS_VOLTAGE_SOC]); + + pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); + + pr_info("XgmiDpmPstates\n"); + for (i = 0; i < NUM_XGMI_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiDpmPstates[i]); + pr_info("XgmiDpmSpare[0] = 0x%02x\n", pptable->XgmiDpmSpare[0]); + pr_info("XgmiDpmSpare[1] = 0x%02x\n", pptable->XgmiDpmSpare[1]); + + pr_info("VDDGFX_TVmin = %d\n", pptable->VDDGFX_TVmin); + pr_info("VDDSOC_TVmin = %d\n", pptable->VDDSOC_TVmin); + pr_info("VDDGFX_Vmin_HiTemp = %d\n", pptable->VDDGFX_Vmin_HiTemp); + pr_info("VDDGFX_Vmin_LoTemp = %d\n", pptable->VDDGFX_Vmin_LoTemp); + pr_info("VDDSOC_Vmin_HiTemp = %d\n", pptable->VDDSOC_Vmin_HiTemp); + pr_info("VDDSOC_Vmin_LoTemp = %d\n", pptable->VDDSOC_Vmin_LoTemp); + pr_info("VDDGFX_TVminHystersis = %d\n", pptable->VDDGFX_TVminHystersis); + pr_info("VDDSOC_TVminHystersis = %d\n", pptable->VDDSOC_TVminHystersis); + + pr_info("DebugOverrides = 0x%x\n", pptable->DebugOverrides); + pr_info("ReservedEquation0{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation0.a, + pptable->ReservedEquation0.b, + pptable->ReservedEquation0.c); + pr_info("ReservedEquation1{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation1.a, + pptable->ReservedEquation1.b, + pptable->ReservedEquation1.c); + pr_info("ReservedEquation2{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation2.a, + pptable->ReservedEquation2.b, + pptable->ReservedEquation2.c); + pr_info("ReservedEquation3{a = 0x%x b = 0x%x c = 0x%x}\n", + pptable->ReservedEquation3.a, + pptable->ReservedEquation3.b, + pptable->ReservedEquation3.c); + + pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); + pr_info("PaddingUlv = %d\n", pptable->PaddingUlv); + + pr_info("TotalPowerConfig = %d\n", pptable->TotalPowerConfig); + pr_info("TotalPowerSpare1 = %d\n", pptable->TotalPowerSpare1); + pr_info("TotalPowerSpare2 = %d\n", pptable->TotalPowerSpare2); + + pr_info("PccThresholdLow = %d\n", pptable->PccThresholdLow); + pr_info("PccThresholdHigh = %d\n", pptable->PccThresholdHigh); + + pr_info("Board Parameters:\n"); + pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); + pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); + + pr_info("VddGfxVrMapping = 0x%x\n", pptable->VddGfxVrMapping); + pr_info("VddSocVrMapping = 0x%x\n", pptable->VddSocVrMapping); + pr_info("VddMemVrMapping = 0x%x\n", pptable->VddMemVrMapping); + pr_info("BoardVrMapping = 0x%x\n", pptable->BoardVrMapping); + + pr_info("GfxUlvPhaseSheddingMask = 0x%x\n", pptable->GfxUlvPhaseSheddingMask); + pr_info("ExternalSensorPresent = 0x%x\n", pptable->ExternalSensorPresent); + + pr_info("GfxMaxCurrent = 0x%x\n", pptable->GfxMaxCurrent); + pr_info("GfxOffset = 0x%x\n", pptable->GfxOffset); + pr_info("Padding_TelemetryGfx = 0x%x\n", pptable->Padding_TelemetryGfx); + + pr_info("SocMaxCurrent = 0x%x\n", pptable->SocMaxCurrent); + pr_info("SocOffset = 0x%x\n", pptable->SocOffset); + pr_info("Padding_TelemetrySoc = 0x%x\n", pptable->Padding_TelemetrySoc); + + pr_info("MemMaxCurrent = 0x%x\n", pptable->MemMaxCurrent); + pr_info("MemOffset = 0x%x\n", pptable->MemOffset); + pr_info("Padding_TelemetryMem = 0x%x\n", pptable->Padding_TelemetryMem); + + pr_info("BoardMaxCurrent = 0x%x\n", pptable->BoardMaxCurrent); + pr_info("BoardOffset = 0x%x\n", pptable->BoardOffset); + pr_info("Padding_TelemetryBoardInput = 0x%x\n", pptable->Padding_TelemetryBoardInput); + + pr_info("VR0HotGpio = %d\n", pptable->VR0HotGpio); + pr_info("VR0HotPolarity = %d\n", pptable->VR0HotPolarity); + pr_info("VR1HotGpio = %d\n", pptable->VR1HotGpio); + pr_info("VR1HotPolarity = %d\n", pptable->VR1HotPolarity); + + pr_info("PllGfxclkSpreadEnabled = %d\n", pptable->PllGfxclkSpreadEnabled); + pr_info("PllGfxclkSpreadPercent = %d\n", pptable->PllGfxclkSpreadPercent); + pr_info("PllGfxclkSpreadFreq = %d\n", pptable->PllGfxclkSpreadFreq); + + pr_info("UclkSpreadEnabled = %d\n", pptable->UclkSpreadEnabled); + pr_info("UclkSpreadPercent = %d\n", pptable->UclkSpreadPercent); + pr_info("UclkSpreadFreq = %d\n", pptable->UclkSpreadFreq); + + pr_info("FclkSpreadEnabled = %d\n", pptable->FclkSpreadEnabled); + pr_info("FclkSpreadPercent = %d\n", pptable->FclkSpreadPercent); + pr_info("FclkSpreadFreq = %d\n", pptable->FclkSpreadFreq); + + pr_info("FllGfxclkSpreadEnabled = %d\n", pptable->FllGfxclkSpreadEnabled); + pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); + pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); + + for (i = 0; i < NUM_I2C_CONTROLLERS; i++) { + pr_info("I2cControllers[%d]:\n", i); + pr_info(" .Enabled = %d\n", + pptable->I2cControllers[i].Enabled); + pr_info(" .SlaveAddress = 0x%x\n", + pptable->I2cControllers[i].SlaveAddress); + pr_info(" .ControllerPort = %d\n", + pptable->I2cControllers[i].ControllerPort); + pr_info(" .ControllerName = %d\n", + pptable->I2cControllers[i].ControllerName); + pr_info(" .ThermalThrottler = %d\n", + pptable->I2cControllers[i].ThermalThrotter); + pr_info(" .I2cProtocol = %d\n", + pptable->I2cControllers[i].I2cProtocol); + pr_info(" .Speed = %d\n", + pptable->I2cControllers[i].Speed); + } + + pr_info("MemoryChannelEnabled = %d\n", pptable->MemoryChannelEnabled); + pr_info("DramBitWidth = %d\n", pptable->DramBitWidth); + + pr_info("TotalBoardPower = %d\n", pptable->TotalBoardPower); + + pr_info("XgmiLinkSpeed\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkSpeed[i]); + pr_info("XgmiLinkWidth\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiLinkWidth[i]); + pr_info("XgmiFclkFreq\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiFclkFreq[i]); + pr_info("XgmiSocVoltage\n"); + for (i = 0; i < NUM_XGMI_PSTATE_LEVELS; i++) + pr_info(" .[%d] = %d\n", i, pptable->XgmiSocVoltage[i]); + +} + static const struct pptable_funcs arcturus_ppt_funcs = { + /* translate smu index into arcturus specific index */ .get_smu_msg_index = arcturus_get_smu_msg_index, + .get_smu_clk_index = arcturus_get_smu_clk_index, + .get_smu_feature_index = arcturus_get_smu_feature_index, + .get_smu_table_index = arcturus_get_smu_table_index, + .get_smu_power_index= arcturus_get_pwr_src_index, + .get_workload_type = arcturus_get_workload_type, + /* internal structurs allocations */ + .tables_init = arcturus_tables_init, + .alloc_dpm_context = arcturus_allocate_dpm_context, + /* pptable related */ + .check_powerplay_table = arcturus_check_powerplay_table, + .store_powerplay_table = arcturus_store_powerplay_table, + .append_powerplay_table = arcturus_append_powerplay_table, + /* init dpm */ + .get_allowed_feature_mask = arcturus_get_allowed_feature_mask, + /* btc */ + .run_afll_btc = arcturus_run_btc_afll, + /* dpm/clk tables */ + .set_default_dpm_table = arcturus_set_default_dpm_table, + .populate_umd_state_clk = arcturus_populate_umd_state_clk, + .get_thermal_temperature_range = arcturus_get_thermal_temperature_range, + .print_clk_levels = arcturus_print_clk_levels, + .force_clk_levels = arcturus_force_clk_levels, + /* debug (internal used) */ + .dump_pptable = arcturus_dump_pptable, }; void arcturus_set_ppt_funcs(struct smu_context *smu) { + struct smu_table_context *smu_table = &smu->smu_table; + smu->ppt_funcs = &arcturus_ppt_funcs; smu->smc_if_version = SMU11_DRIVER_IF_VERSION; + smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h index 7b808d091b31..d756b16924b8 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.h @@ -23,6 +23,50 @@ #ifndef __ARCTURUS_PPT_H__ #define __ARCTURUS_PPT_H__ +#define ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL 0x3 +#define ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL 0x3 +#define ARCTURUS_UMD_PSTATE_MCLK_LEVEL 0x2 + +#define MAX_DPM_NUMBER 16 +#define MAX_PCIE_CONF 2 + +struct arcturus_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +struct arcturus_dpm_state { + uint32_t soft_min_level; + uint32_t soft_max_level; + uint32_t hard_min_level; + uint32_t hard_max_level; +}; + +struct arcturus_single_dpm_table { + uint32_t count; + struct arcturus_dpm_state dpm_state; + struct arcturus_dpm_level dpm_levels[MAX_DPM_NUMBER]; +}; + +struct arcturus_pcie_table { + uint16_t count; + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; + uint32_t lclk[MAX_PCIE_CONF]; +}; + +struct arcturus_dpm_table { + struct arcturus_single_dpm_table soc_table; + struct arcturus_single_dpm_table gfx_table; + struct arcturus_single_dpm_table mem_table; + struct arcturus_single_dpm_table eclk_table; + struct arcturus_single_dpm_table vclk_table; + struct arcturus_single_dpm_table dclk_table; + struct arcturus_single_dpm_table fclk_table; + struct arcturus_pcie_table pcie_table; +}; + extern void arcturus_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index 8793c8d0dc52..72962e842d69 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -121,7 +121,7 @@ __SMU_DUMMY_MAP(GetVoltageByDpm), \ __SMU_DUMMY_MAP(GetVoltageByDpmOverdrive), \ __SMU_DUMMY_MAP(PowerUpVcn0), \ - __SMU_DUMMY_MAP(PowerDownVcn01), \ + __SMU_DUMMY_MAP(PowerDownVcn0), \ __SMU_DUMMY_MAP(PowerUpVcn1), \ __SMU_DUMMY_MAP(PowerDownVcn1), \ -- cgit v1.2.3 From 22e1831734716f9e3e4fb1f88060f0efaa59fc6f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 30 Jul 2019 23:30:07 -0500 Subject: drm/amd/powerplay: enable SW SMU routine support for arcturus Enable arcturus SW SMU routines. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 49 ++++++++++++++++++------------ 1 file changed, 30 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index fd0485b29429..9de7b369901e 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -476,7 +476,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev) { if (adev->asic_type == CHIP_VEGA20) return (amdgpu_dpm == 2) ? true : false; - else if (adev->asic_type >= CHIP_NAVI10) + else if (adev->asic_type >= CHIP_ARCTURUS) return true; else return false; @@ -708,6 +708,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) case CHIP_VEGA20: case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_ARCTURUS: if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) smu->od_enabled = true; smu_v11_0_set_smu_funcs(smu); @@ -1013,9 +1014,11 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return 0; } - ret = smu_init_display_count(smu, 0); - if (ret) - return ret; + if (adev->asic_type != CHIP_ARCTURUS) { + ret = smu_init_display_count(smu, 0); + if (ret) + return ret; + } if (initialize) { /* get boot_values from vbios to set revision, gfxclk, and etc. */ @@ -1091,17 +1094,19 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - ret = smu_notify_display_change(smu); - if (ret) - return ret; + if (adev->asic_type != CHIP_ARCTURUS) { + ret = smu_notify_display_change(smu); + if (ret) + return ret; - /* - * Set min deep sleep dce fclk with bootup value from vbios via - * SetMinDeepSleepDcefclk MSG. - */ - ret = smu_set_min_dcef_deep_sleep(smu); - if (ret) - return ret; + /* + * Set min deep sleep dce fclk with bootup value from vbios via + * SetMinDeepSleepDcefclk MSG. + */ + ret = smu_set_min_dcef_deep_sleep(smu); + if (ret) + return ret; + } /* * Set initialized values (get from vbios) to dpm tables context such as @@ -1212,14 +1217,20 @@ static int smu_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - ret = smu_check_fw_status(smu); - if (ret) { - pr_err("SMC firmware status is not correct\n"); - return ret; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + if (adev->asic_type < CHIP_NAVI10) { + ret = smu_load_microcode(smu); + if (ret) + return ret; } } + ret = smu_check_fw_status(smu); + if (ret) { + pr_err("SMC firmware status is not correct\n"); + return ret; + } + if (!smu->pm_enabled) return 0; -- cgit v1.2.3 From cca4fafc09042cc63e7e277ad8d40b03307dfe3d Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 12 Jul 2019 16:50:52 +0800 Subject: drm/amd/powerplay: initialize arcturus MP1 and THM base address Initialize base address for those IPs which are used in powerplay. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/arct_reg_init.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c index 51b8cdffb196..4853899b1824 100644 --- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c @@ -38,6 +38,7 @@ int arct_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i])); adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); @@ -50,6 +51,7 @@ int arct_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SDMA6_HWIP][i] = (uint32_t *)(&(SDMA6_BASE.instance[i])); adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i])); adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); } return 0; } -- cgit v1.2.3 From fe089e1dd797c2c548425dc4756ee02e3682d78e Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 12 Jul 2019 16:53:28 +0800 Subject: drm/amd/powerplay: enable arcturus powerplay Arcturus powerplay is ready to use. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 30ba94296a66..e528122bb7b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -697,6 +697,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); break; default: -- cgit v1.2.3 From 7c16d24abe3525873ec7e62ec366d06022dc6367 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 16 Jul 2019 11:03:10 +0800 Subject: drm/amdgpu: correct VCN powergate routine for acturus Arcturus VCN should powergate in the way as Navi. Signed-off-by: Evan Quan Reviewed-by: Le Ma Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4d3e6f1876c6..b35fc6ef4c1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -305,7 +305,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) if (fences == 0) { amdgpu_gfx_off_ctrl(adev, true); - if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled) + if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); else amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, @@ -322,7 +322,7 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) if (set_clocks) { amdgpu_gfx_off_ctrl(adev, false); - if (adev->asic_type < CHIP_NAVI10 && adev->pm.dpm_enabled) + if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, true); else amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, -- cgit v1.2.3 From 3ff101b8ab78e115feb52645dac35b3634ee5a77 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 17 Jul 2019 09:34:13 +0800 Subject: drm/amd/powerplay: hold on the arcturus gfx dpm support in driver As for now, only "Prefetcher" is guarded to be working from SMU firmware. Signed-off-by: Evan Quan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index b284ebcbe545..a0644ef267a9 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -324,8 +324,7 @@ arcturus_get_allowed_feature_mask(struct smu_context *smu, memset(feature_mask, 0, sizeof(uint32_t) * num); - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) - | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT); return 0; } -- cgit v1.2.3 From 9d4d7236efad8cf47e989be0c80dd37d68d55a5c Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 15 Jul 2019 18:00:50 +0800 Subject: drm/amd/include: adjust base offset of SMUIO and THM for Arcturus Arcturus has different _BASE_IDX value in some HWIP_offset.h. To make source files like smu_v11_0.c and soc15.c that include HWIP_offset.h of Vega20 reusable for Arcturus, align this base offset with Vega20. Signed-off-by: Le Ma Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/arct_ip_offset.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/arct_ip_offset.h b/drivers/gpu/drm/amd/include/arct_ip_offset.h index 3211b3a96d68..a7791a9e1f90 100644 --- a/drivers/gpu/drm/amd/include/arct_ip_offset.h +++ b/drivers/gpu/drm/amd/include/arct_ip_offset.h @@ -196,17 +196,13 @@ static const struct IP_BASE SDMA7_BASE ={ { { { 0x00013800, 0x0001F40 { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE SMUIO_BASE ={ { { { 0x00012080, 0x00016800, 0x00016A00, 0x00401000, 0x00440000, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE THM_BASE ={ { { { 0x00012060, 0x00016600, 0x00400C00, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, - { { 0, 0, 0, 0, 0, 0 } }, +static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, -- cgit v1.2.3 From 7d0e6329dfdcfe48311f8888d6a8dfa73bee00a9 Mon Sep 17 00:00:00 2001 From: Le Ma Date: Tue, 16 Jul 2019 15:21:54 +0800 Subject: drm/amdgpu: update more sdma instances irq support Update for sdma ras ecc_irq and other minors. Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 77 ++++++++++++---------------------- 1 file changed, 27 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index dd8f520b3fa1..31a61430218c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -298,7 +298,7 @@ static unsigned sdma_v4_0_seq_to_irq_id(int seq_num) default: break; } - return 0; + return -EINVAL; } static int sdma_v4_0_irq_id_to_seq(unsigned client_id) @@ -323,7 +323,7 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id) default: break; } - return 0; + return -EINVAL; } static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) @@ -1646,7 +1646,7 @@ static int sdma_v4_0_late_init(void *handle) .sub_block_index = 0, .name = "sdma", }; - int r; + int r, i; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); @@ -1703,14 +1703,11 @@ static int sdma_v4_0_late_init(void *handle) if (r) goto sysfs; resume: - r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0); - if (r) - goto irq; - - r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1); - if (r) { - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0); - goto irq; + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, + sdma_v4_0_seq_to_irq_id(i)); + if (r) + goto irq; } return 0; @@ -1743,16 +1740,13 @@ static int sdma_v4_0_sw_init(void *handle) } /* SDMA SRAM ECC event */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC, - &adev->sdma.ecc_irq); - if (r) - return r; - - /* SDMA SRAM ECC event */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_SRAM_ECC, - &adev->sdma.ecc_irq); - if (r) - return r; + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i), + SDMA0_4_0__SRCID__SDMA_SRAM_ECC, + &adev->sdma.ecc_irq); + if (r) + return r; + } for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; @@ -1785,9 +1779,7 @@ static int sdma_v4_0_sw_init(void *handle) sprintf(ring->name, "page%d", i); r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq, - (i == 0) ? - AMDGPU_SDMA_IRQ_INSTANCE0 : - AMDGPU_SDMA_IRQ_INSTANCE1); + AMDGPU_SDMA_IRQ_INSTANCE0 + i); if (r) return r; } @@ -1850,12 +1842,15 @@ static int sdma_v4_0_hw_init(void *handle) static int sdma_v4_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; if (amdgpu_sriov_vf(adev)) return 0; - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0); - amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1); + for (i = 0; i < adev->sdma.num_instances; i++) { + amdgpu_irq_put(adev, &adev->sdma.ecc_irq, + sdma_v4_0_seq_to_irq_id(i)); + } sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); @@ -1969,16 +1964,9 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, { uint32_t instance, err_source; - switch (entry->client_id) { - case SOC15_IH_CLIENTID_SDMA0: - instance = 0; - break; - case SOC15_IH_CLIENTID_SDMA1: - instance = 1; - break; - default: + instance = sdma_v4_0_irq_id_to_seq(entry->client_id); + if (instance < 0) return 0; - } switch (entry->src_id) { case SDMA0_4_0__SRCID__SDMA_SRAM_ECC: @@ -2024,16 +2012,9 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev, DRM_ERROR("Illegal instruction in SDMA command stream\n"); - switch (entry->client_id) { - case SOC15_IH_CLIENTID_SDMA0: - instance = 0; - break; - case SOC15_IH_CLIENTID_SDMA1: - instance = 1; - break; - default: + instance = sdma_v4_0_irq_id_to_seq(entry->client_id); + if (instance < 0) return 0; - } switch (entry->ring_id) { case 0: @@ -2050,14 +2031,10 @@ static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev, { u32 sdma_edc_config; - u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ? - sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) : - sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG); - - sdma_edc_config = RREG32(reg_offset); + sdma_edc_config = RREG32_SDMA(type, mmSDMA0_EDC_CONFIG); sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE, state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); - WREG32(reg_offset, sdma_edc_config); + WREG32_SDMA(type, mmSDMA0_EDC_CONFIG, sdma_edc_config); return 0; } -- cgit v1.2.3 From 9829e3d89b6ea9027542d094cdb8a434eef4b3aa Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:51:59 +0800 Subject: drm/amd/powerplay: add new sensor type for VCN powergate status VCN is widely used in new ASICs and different from tranditional UVD and VCE. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 95edc3d3a9c4..bba1291ae405 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -123,6 +123,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, AMDGPU_PP_SENSOR_MIN_FAN_RPM, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + AMDGPU_PP_SENSOR_VCN_POWER_STATE, }; enum amd_pp_task { -- cgit v1.2.3 From ab9e314886adef86ca6bf7b6c485abf427209295 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:55:36 +0800 Subject: drm/amd/powerplay: support VCN powergate status retrieval on Raven Enable VCN powergate status report on Raven. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e32ae9d3373c..18e780f566fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); uint32_t sclk, mclk; int ret = 0; @@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_TEMP: *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; + *size = 4; + break; default: ret = -EINVAL; break; @@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + if (bgate) { amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0); + smu10_data->vcn_power_gated = true; } else { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); + smu10_data->vcn_power_gated = false; } } -- cgit v1.2.3 From bf2bf52383a09256e11278e7bcb67dcd912078c7 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:57:27 +0800 Subject: drm/amd/powerplay: support VCN powergate status retrieval for SW SMU Commonly used for VCN powergate status retrieval for SW SMU. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 9de7b369901e..b1e0e2a4aca1 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -424,6 +424,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *size = 4; + break; default: ret = -EINVAL; break; -- cgit v1.2.3 From 5fa790f6c936c4705dea5883fa12da9e017ceb4f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 10:27:21 +0800 Subject: drm/amd/powerplay: correct Navi10 VCN powergate control (v2) No VCN DPM bit check as that's different from VCN PG. Also no extra check for possible double enablement/disablement as that's already done by VCN. v2: check return value of smu_feature_set_enabled Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index b92e109ed46d..5ae19ae58807 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -592,28 +592,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { int ret = 0; - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - if (enable && power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; - } - power_gate->uvd_gated = false; + if (enable) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; } else { - if (!enable && !power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; - } - power_gate->uvd_gated = true; - } + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; } - return 0; + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); + + return ret; } static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, -- cgit v1.2.3 From 1f96ecef6f0137791b5a37e005e65c42ce927e62 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 10:42:29 +0800 Subject: drm/amd/powerplay: correct UVD/VCE/VCN power status retrieval VCN should be used for Vega20 later ASICs while UVD and VCE are for previous ASICs. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 56 ++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 71cd7fa5a925..39998f203b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -3072,28 +3072,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + if (adev->asic_type > CHIP_VEGA20) { + /* VCN clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCN: Disabled\n"); + } else { + seq_printf(m, "VCN: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } } - } - seq_printf(m, "\n"); + seq_printf(m, "\n"); + } else { + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } } } -- cgit v1.2.3 From 832a7062a06016d198039a566dc71073b7155881 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 15:55:52 +0800 Subject: drm/amd/powerplay: init arcturus SMU metrics table on bootup Initialize arcturus SMU metrics table. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index a0644ef267a9..5f911f092311 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -267,6 +267,8 @@ static int arcturus_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER static int arcturus_tables_init(struct smu_context *smu, struct smu_table *tables) { + struct smu_table_context *smu_table = &smu->smu_table; + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -276,6 +278,11 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + return 0; } -- cgit v1.2.3 From ba74c8bf889cbfc02c90525a859bc65ddafc1368 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Jul 2019 13:18:37 -0500 Subject: drm/amd/powerplay: support sensor reading on arcturus Support sensor reading for gpu loading, power and temperatures. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 142 +++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 5f911f092311..1dbb917fb916 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -853,6 +853,147 @@ static int arcturus_get_thermal_temperature_range(struct smu_context *smu, return 0; } +static int arcturus_get_metrics_table(struct smu_context *smu, + SmuMetrics_t *metrics_table) +{ + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + + if (!smu_table->metrics_time || + time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + +static int arcturus_get_current_activity_percent(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + SmuMetrics_t metrics; + int ret = 0; + + if (!value) + return -EINVAL; + + ret = arcturus_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_GPU_LOAD: + *value = metrics.AverageGfxActivity; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + *value = metrics.AverageUclkActivity; + break; + default: + pr_err("Invalid sensor for retrieving clock activity\n"); + return -EINVAL; + } + + return 0; +} + +static int arcturus_get_gpu_power(struct smu_context *smu, uint32_t *value) +{ + SmuMetrics_t metrics; + int ret = 0; + + if (!value) + return -EINVAL; + + ret = arcturus_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + *value = metrics.AverageSocketPower << 8; + + return 0; +} + +static int arcturus_thermal_get_temperature(struct smu_context *smu, + enum amd_pp_sensors sensor, + uint32_t *value) +{ + SmuMetrics_t metrics; + int ret = 0; + + if (!value) + return -EINVAL; + + ret = arcturus_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + switch (sensor) { + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + *value = metrics.TemperatureHotspot * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + *value = metrics.TemperatureEdge * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + *value = metrics.TemperatureHBM * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + default: + pr_err("Invalid sensor for retrieving temp\n"); + return -EINVAL; + } + + return 0; +} + +static int arcturus_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + int ret = 0; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *(uint32_t *)data = pptable->FanMaximumRpm; + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = arcturus_get_current_activity_percent(smu, + sensor, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_POWER: + ret = arcturus_get_gpu_power(smu, (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + case AMDGPU_PP_SENSOR_EDGE_TEMP: + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = arcturus_thermal_get_temperature(smu, sensor, + (uint32_t *)data); + *size = 4; + break; + default: + return -EINVAL; + } + + return ret; +} + static void arcturus_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -1309,6 +1450,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_thermal_temperature_range = arcturus_get_thermal_temperature_range, .print_clk_levels = arcturus_print_clk_levels, .force_clk_levels = arcturus_force_clk_levels, + .read_sensor = arcturus_read_sensor, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, }; -- cgit v1.2.3 From 631807f091aeb8a3c9f5c7271753a1b257446c7e Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 19 Jul 2019 17:18:34 +0800 Subject: drm/amd/powerplay: support real-time clock retrieval on arcturus Enable arcturus real-time clock retrieval. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 1dbb917fb916..3a702110d9bc 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -994,6 +994,29 @@ static int arcturus_read_sensor(struct smu_context *smu, return ret; } +static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + static SmuMetrics_t metrics; + int ret = 0, clk_id = 0; + + if (!value) + return -EINVAL; + + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return -EINVAL; + + ret = arcturus_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + *value = metrics.CurrClock[clk_id]; + + return ret; +} + static void arcturus_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -1448,6 +1471,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .set_default_dpm_table = arcturus_set_default_dpm_table, .populate_umd_state_clk = arcturus_populate_umd_state_clk, .get_thermal_temperature_range = arcturus_get_thermal_temperature_range, + .get_current_clk_freq_by_table = arcturus_get_current_clk_freq_by_table, .print_clk_levels = arcturus_print_clk_levels, .force_clk_levels = arcturus_force_clk_levels, .read_sensor = arcturus_read_sensor, -- cgit v1.2.3 From d427cf8f7f9932aac3908b38ab25d4c4c7c91315 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 12:09:38 +0800 Subject: drm/amd/powerplay: support fan speed retrieval on arcturus Support arcturus fan speed retrieval. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 40 ++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 3a702110d9bc..2b6dfc7cfe1a 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -994,6 +994,44 @@ static int arcturus_read_sensor(struct smu_context *smu, return ret; } +static int arcturus_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed) +{ + SmuMetrics_t metrics; + int ret = 0; + + if (!speed) + return -EINVAL; + + ret = arcturus_get_metrics_table(smu, &metrics); + if (ret) + return ret; + + *speed = metrics.CurrFanSpeed; + + return ret; +} + +static int arcturus_get_fan_speed_percent(struct smu_context *smu, + uint32_t *speed) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t percent, current_rpm; + int ret = 0; + + if (!speed) + return -EINVAL; + + ret = arcturus_get_fan_speed_rpm(smu, ¤t_rpm); + if (ret) + return ret; + + percent = current_rpm * 100 / pptable->FanMaximumRpm; + *speed = percent > 100 ? 100 : percent; + + return ret; +} + static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) @@ -1475,6 +1513,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .print_clk_levels = arcturus_print_clk_levels, .force_clk_levels = arcturus_force_clk_levels, .read_sensor = arcturus_read_sensor, + .get_fan_speed_percent = arcturus_get_fan_speed_percent, + .get_fan_speed_rpm = arcturus_get_fan_speed_rpm, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, }; -- cgit v1.2.3 From 55bf7e6243e72de5d042e76d5bde49f1984530c5 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 16:26:04 +0800 Subject: drm/amd/powerplay: add missing arcturus feature maps Add missing feature maps for arcturus. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 2b6dfc7cfe1a..203fcb7cd724 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -43,6 +43,8 @@ #define MSG_MAP(msg, index) \ [SMU_MSG_##msg] = {1, (index)} +#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \ + [smu_feature] = {1, (arcturus_feature)} #define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF #define SMU_FEATURES_LOW_SHIFT 0 @@ -125,12 +127,15 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_feature_mask_map[SMU_FEATURE_CO FEA_MAP(DPM_GFXCLK), FEA_MAP(DPM_UCLK), FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_FCLK), FEA_MAP(DPM_MP0CLK), FEA_MAP(DS_GFXCLK), FEA_MAP(DS_SOCCLK), FEA_MAP(DS_LCLK), + FEA_MAP(DS_FCLK), FEA_MAP(DS_UCLK), FEA_MAP(GFX_ULV), + ARCTURUS_FEA_MAP(SMU_FEATURE_VCN_PG_BIT, FEATURE_DPM_VCN_BIT), FEA_MAP(RSMU_SMN_CG), FEA_MAP(PPT), FEA_MAP(TDC), -- cgit v1.2.3 From 8a856ced35d51c988497856dcf6f76174305e416 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 17:03:02 +0800 Subject: drm/amd/powerplay: correct the bitmask used in arcturus Those bitmask prefixed by "SMU_" should be used. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 203fcb7cd724..eaca0381e8bd 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -403,7 +403,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) /* socclk */ single_dpm_table = &(dpm_table->soc_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { ret = arcturus_set_single_dpm_table(smu, single_dpm_table, PPCLK_SOCCLK); if (ret) { @@ -418,7 +418,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) /* gfxclk */ single_dpm_table = &(dpm_table->gfx_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { ret = arcturus_set_single_dpm_table(smu, single_dpm_table, PPCLK_GFXCLK); if (ret) { @@ -433,7 +433,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) /* memclk */ single_dpm_table = &(dpm_table->mem_table); - if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { ret = arcturus_set_single_dpm_table(smu, single_dpm_table, PPCLK_UCLK); if (ret) { @@ -448,7 +448,7 @@ static int arcturus_set_default_dpm_table(struct smu_context *smu) /* fclk */ single_dpm_table = &(dpm_table->fclk_table); - if (smu_feature_is_enabled(smu,FEATURE_DPM_FCLK_BIT)) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { ret = arcturus_set_single_dpm_table(smu, single_dpm_table, PPCLK_FCLK); if (ret) { -- cgit v1.2.3 From 790ef68afcf5134ddc1354c29ef5dddb99a45bad Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Fri, 19 Jul 2019 16:06:29 +0800 Subject: drm/amd/powerplay: remove redundancy debug log in smu remove redundacy debug log in smu. eg: [ 6897.969447] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6897.969448] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6897.969448] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6899.024114] amdgpu: [powerplay] Unsupported SMU message: 38 [ 6899.024151] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6899.024151] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6899.024152] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6900.078296] amdgpu: [powerplay] Unsupported SMU message: 38 [ 6900.078332] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6900.078332] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6900.078333] amdgpu: [powerplay] smu 11 clk dpm feature 1 is not enabled [ 6901.133230] amdgpu: [powerplay] Unsupported SMU message: 38 Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index eaca0381e8bd..30317cf9dc9a 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -181,10 +181,8 @@ static int arcturus_get_smu_msg_index(struct smu_context *smc, uint32_t index) return -EINVAL; mapping = arcturus_message_map[index]; - if (!(mapping.valid_mapping)) { - pr_warn("Unsupported SMU message: %d\n", index); + if (!(mapping.valid_mapping)) return -EINVAL; - } return mapping.map_to; } -- cgit v1.2.3 From 4bf76e60b9c7737ea9f585795b6ca97bb03100b8 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 23 Jul 2019 11:42:24 +0800 Subject: drm/amd/powerplay: fix arcturus real-time clock frequency retrieval Make sure we can still get the accurate gfxclk/uclk/socclk frequency even on dpm disabled. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 30 +++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 30317cf9dc9a..1b6d41c2462f 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -1053,7 +1053,35 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu, if (ret) return ret; - *value = metrics.CurrClock[clk_id]; + switch (clk_id) { + case PPCLK_GFXCLK: + /* + * CurrClock[clk_id] can provide accurate + * output only when the dpm feature is enabled. + * We can use Average_* for dpm disabled case. + * But this is available for gfxclk/uclk/socclk. + */ + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) + *value = metrics.CurrClock[PPCLK_GFXCLK]; + else + *value = metrics.AverageGfxclkFrequency; + break; + case PPCLK_UCLK: + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) + *value = metrics.CurrClock[PPCLK_UCLK]; + else + *value = metrics.AverageUclkFrequency; + break; + case PPCLK_SOCCLK: + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) + *value = metrics.CurrClock[PPCLK_SOCCLK]; + else + *value = metrics.AverageSocclkFrequency; + break; + default: + *value = metrics.CurrClock[clk_id]; + break; + } return ret; } -- cgit v1.2.3 From 60d435b73db607df560f3df04fe5f4ba96e37116 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 23 Jul 2019 17:30:35 +0800 Subject: drm/amd/powerplay: support UMD PSTATE settings on arcturus Enable arcturus UMD PSTATE support. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 229 ++++++++++++++++++++++++++- 1 file changed, 225 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 1b6d41c2462f..fa2845b9330b 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -664,15 +664,15 @@ static int arcturus_print_clk_levels(struct smu_context *smu, } static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, - uint32_t feature_mask) + uint32_t feature_mask) { - struct arcturus_dpm_table *dpm_table; struct arcturus_single_dpm_table *single_dpm_table; + struct arcturus_dpm_table *dpm_table = + smu->smu_dpm.dpm_context; uint32_t freq; int ret = 0; - dpm_table = smu->smu_dpm.dpm_context; - if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT) && + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) && (feature_mask & FEATURE_DPM_GFXCLK_MASK)) { single_dpm_table = &(dpm_table->gfx_table); freq = max ? single_dpm_table->dpm_state.soft_max_level : @@ -687,6 +687,36 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max, } } + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && + (feature_mask & FEATURE_DPM_UCLK_MASK)) { + single_dpm_table = &(dpm_table->mem_table); + freq = max ? single_dpm_table->dpm_state.soft_max_level : + single_dpm_table->dpm_state.soft_min_level; + ret = smu_send_smc_msg_with_param(smu, + (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), + (PPCLK_UCLK << 16) | (freq & 0xffff)); + if (ret) { + pr_err("Failed to set soft %s memclk !\n", + max ? "max" : "min"); + return ret; + } + } + + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) && + (feature_mask & FEATURE_DPM_SOCCLK_MASK)) { + single_dpm_table = &(dpm_table->soc_table); + freq = max ? single_dpm_table->dpm_state.soft_max_level : + single_dpm_table->dpm_state.soft_min_level; + ret = smu_send_smc_msg_with_param(smu, + (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq), + (PPCLK_SOCCLK << 16) | (freq & 0xffff)); + if (ret) { + pr_err("Failed to set soft %s socclk !\n", + max ? "max" : "min"); + return ret; + } + } + return ret; } @@ -1086,6 +1116,194 @@ static int arcturus_get_current_clk_freq_by_table(struct smu_context *smu, return ret; } +static uint32_t arcturus_find_lowest_dpm_level(struct arcturus_single_dpm_table *table) +{ + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (table->dpm_levels[i].enabled) + break; + } + if (i >= table->count) { + i = 0; + table->dpm_levels[i].enabled = true; + } + + return i; +} + +static uint32_t arcturus_find_highest_dpm_level(struct arcturus_single_dpm_table *table) +{ + int i = 0; + + if (table->count <= 0) { + pr_err("[%s] DPM Table has no entry!", __func__); + return 0; + } + if (table->count > MAX_DPM_NUMBER) { + pr_err("[%s] DPM Table has too many entries!", __func__); + return MAX_DPM_NUMBER - 1; + } + + for (i = table->count - 1; i >= 0; i--) { + if (table->dpm_levels[i].enabled) + break; + } + if (i < 0) { + i = 0; + table->dpm_levels[i].enabled = true; + } + + return i; +} + + + +static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest) +{ + struct arcturus_dpm_table *dpm_table = + (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + uint32_t soft_level; + int ret = 0; + + /* gfxclk */ + if (highest) + soft_level = arcturus_find_highest_dpm_level(&(dpm_table->gfx_table)); + else + soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->gfx_table)); + + dpm_table->gfx_table.dpm_state.soft_min_level = + dpm_table->gfx_table.dpm_state.soft_max_level = + dpm_table->gfx_table.dpm_levels[soft_level].value; + + /* uclk */ + if (highest) + soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); + else + soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); + + dpm_table->mem_table.dpm_state.soft_min_level = + dpm_table->mem_table.dpm_state.soft_max_level = + dpm_table->mem_table.dpm_levels[soft_level].value; + + /* socclk */ + if (highest) + soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); + else + soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); + + dpm_table->soc_table.dpm_state.soft_min_level = + dpm_table->soc_table.dpm_state.soft_max_level = + dpm_table->soc_table.dpm_levels[soft_level].value; + + ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + if (ret) { + pr_err("Failed to upload boot level to %s!\n", + highest ? "highest" : "lowest"); + return ret; + } + + ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + if (ret) { + pr_err("Failed to upload dpm max level to %s!\n!", + highest ? "highest" : "lowest"); + return ret; + } + + return ret; +} + +static int arcturus_unforce_dpm_levels(struct smu_context *smu) +{ + struct arcturus_dpm_table *dpm_table = + (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + uint32_t soft_min_level, soft_max_level; + int ret = 0; + + /* gfxclk */ + soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->gfx_table)); + soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->gfx_table)); + dpm_table->gfx_table.dpm_state.soft_min_level = + dpm_table->gfx_table.dpm_levels[soft_min_level].value; + dpm_table->gfx_table.dpm_state.soft_max_level = + dpm_table->gfx_table.dpm_levels[soft_max_level].value; + + /* uclk */ + soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); + soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); + dpm_table->mem_table.dpm_state.soft_min_level = + dpm_table->gfx_table.dpm_levels[soft_min_level].value; + dpm_table->mem_table.dpm_state.soft_max_level = + dpm_table->gfx_table.dpm_levels[soft_max_level].value; + + /* socclk */ + soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); + soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); + dpm_table->soc_table.dpm_state.soft_min_level = + dpm_table->soc_table.dpm_levels[soft_min_level].value; + dpm_table->soc_table.dpm_state.soft_max_level = + dpm_table->soc_table.dpm_levels[soft_max_level].value; + + ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + if (ret) { + pr_err("Failed to upload DPM Bootup Levels!"); + return ret; + } + + ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + if (ret) { + pr_err("Failed to upload DPM Max Levels!"); + return ret; + } + + return ret; +} + +static int +arcturus_get_profiling_clk_mask(struct smu_context *smu, + enum amd_dpm_forced_level level, + uint32_t *sclk_mask, + uint32_t *mclk_mask, + uint32_t *soc_mask) +{ + struct arcturus_dpm_table *dpm_table = + (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + struct arcturus_single_dpm_table *gfx_dpm_table; + struct arcturus_single_dpm_table *mem_dpm_table; + struct arcturus_single_dpm_table *soc_dpm_table; + + if (!smu->smu_dpm.dpm_context) + return -EINVAL; + + gfx_dpm_table = &dpm_table->gfx_table; + mem_dpm_table = &dpm_table->mem_table; + soc_dpm_table = &dpm_table->soc_table; + + *sclk_mask = 0; + *mclk_mask = 0; + *soc_mask = 0; + + if (gfx_dpm_table->count > ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL && + mem_dpm_table->count > ARCTURUS_UMD_PSTATE_MCLK_LEVEL && + soc_dpm_table->count > ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL) { + *sclk_mask = ARCTURUS_UMD_PSTATE_GFXCLK_LEVEL; + *mclk_mask = ARCTURUS_UMD_PSTATE_MCLK_LEVEL; + *soc_mask = ARCTURUS_UMD_PSTATE_SOCCLK_LEVEL; + } + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + *sclk_mask = gfx_dpm_table->count - 1; + *mclk_mask = mem_dpm_table->count - 1; + *soc_mask = soc_dpm_table->count - 1; + } + + return 0; +} + static void arcturus_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -1546,6 +1764,9 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .read_sensor = arcturus_read_sensor, .get_fan_speed_percent = arcturus_get_fan_speed_percent, .get_fan_speed_rpm = arcturus_get_fan_speed_rpm, + .force_dpm_limit_value = arcturus_force_dpm_limit_value, + .unforce_dpm_levels = arcturus_unforce_dpm_levels, + .get_profiling_clk_mask = arcturus_get_profiling_clk_mask, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, }; -- cgit v1.2.3 From 1f23cadbe0775559b3152a4f1fcb566d8cf6f571 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 23 Jul 2019 20:28:14 +0800 Subject: drm/amd/powerplay: correct arcturus current clock level calculation There may be 1Mhz delta between target and actual frequency. That should be taken into consideration for current level check. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 35 +++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index fa2845b9330b..c67a9914ce7b 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -51,6 +51,9 @@ #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 #define SMU_FEATURES_HIGH_SHIFT 32 +/* possible frequency drift (1Mhz) */ +#define EPSILON 1 + static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion), @@ -565,6 +568,12 @@ static int arcturus_get_clk_table(struct smu_context *smu, return 0; } +static int arcturus_freqs_in_same_level(int32_t frequency1, + int32_t frequency2) +{ + return (abs(frequency1 - frequency2) <= EPSILON); +} + static int arcturus_print_clk_levels(struct smu_context *smu, enum smu_clk_type type, char *buf) { @@ -595,8 +604,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz == now * 10) - ? "*" : ""); + arcturus_freqs_in_same_level( + clocks.data[i].clocks_in_khz / 1000, + now / 100) ? "*" : ""); break; case SMU_MCLK: @@ -616,8 +626,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz == now * 10) - ? "*" : ""); + arcturus_freqs_in_same_level( + clocks.data[i].clocks_in_khz / 1000, + now / 100) ? "*" : ""); break; case SMU_SOCCLK: @@ -637,8 +648,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - (clocks.data[i].clocks_in_khz == now * 10) - ? "*" : ""); + arcturus_freqs_in_same_level( + clocks.data[i].clocks_in_khz / 1000, + now / 100) ? "*" : ""); break; case SMU_FCLK: @@ -649,11 +661,18 @@ static int arcturus_print_clk_levels(struct smu_context *smu, } single_dpm_table = &(dpm_table->fclk_table); + ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table); + if (ret) { + pr_err("Attempt to get fclk levels Failed!"); + return ret; + } + for (i = 0; i < single_dpm_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, single_dpm_table->dpm_levels[i].value, - (single_dpm_table->dpm_levels[i].value == now / 100) - ? "*" : ""); + arcturus_freqs_in_same_level( + clocks.data[i].clocks_in_khz / 1000, + now / 100) ? "*" : ""); break; default: -- cgit v1.2.3 From b4af964e75c4163fe3baf98193495f7921a4b3b7 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 30 Jul 2019 22:52:37 -0500 Subject: drm/amd/powerplay: make power limit retrieval as asic specific The power limit retrieval should be done per asic. Since we may need to lookup in the pptable and that's really asic specific. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 +- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 51 ++++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 4 +- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 51 ++++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 55 ++++++-------------------- 5 files changed, 116 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index b1e0e2a4aca1..13b2c8a60232 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1136,7 +1136,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - ret = smu_get_power_limit(smu, &smu->default_power_limit, false); + ret = smu_get_power_limit(smu, &smu->default_power_limit, true); if (ret) return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index c67a9914ce7b..9360f5a25b69 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -1323,6 +1323,56 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu, return 0; } +static int arcturus_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool asic_default) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t asic_default_power_limit; + int ret = 0; + int power_src; + + if (!smu->default_power_limit || + !smu->power_limit) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { + power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); + if (power_src < 0) + return -EINVAL; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, + power_src << 16); + if (ret) { + pr_err("[%s] get PPT limit failed!", __func__); + return ret; + } + smu_read_smc_arg(smu, &asic_default_power_limit); + } else { + /* the last hope to figure out the ppt limit */ + if (!pptable) { + pr_err("Cannot get PPT limit due to pptable missing!"); + return -EINVAL; + } + asic_default_power_limit = + pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; + } + + if (smu->od_enabled) { + asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit); + asic_default_power_limit /= 100; + } + + smu->default_power_limit = asic_default_power_limit; + smu->power_limit = asic_default_power_limit; + } + + if (asic_default) + *limit = smu->default_power_limit; + else + *limit = smu->power_limit; + + return 0; +} + static void arcturus_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -1788,6 +1838,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_profiling_clk_mask = arcturus_get_profiling_clk_mask, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, + .get_power_limit = arcturus_get_power_limit, }; void arcturus_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 76edb2ccf160..1ecd73cd768c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -450,6 +450,7 @@ struct pptable_funcs { int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); void (*dump_pptable)(struct smu_context *smu); + int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default); }; struct smu_funcs @@ -482,7 +483,6 @@ struct smu_funcs int (*set_allowed_mask)(struct smu_context *smu); int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); int (*notify_display_change)(struct smu_context *smu); - int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def); int (*set_power_limit)(struct smu_context *smu, uint32_t n); int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value); int (*init_max_sustainable_clocks)(struct smu_context *smu); @@ -611,7 +611,7 @@ struct smu_funcs #define smu_set_default_od8_settings(smu) \ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) #define smu_get_power_limit(smu, limit, def) \ - ((smu)->funcs->get_power_limit ? (smu)->funcs->get_power_limit((smu), (limit), (def)) : 0) + ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0) #define smu_set_power_limit(smu, limit) \ ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0) #define smu_get_current_clk_freq(smu, clk_id, value) \ diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 5ae19ae58807..106352a4fb82 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1509,6 +1509,56 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu, return ret; } +static int navi10_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool asic_default) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + uint32_t asic_default_power_limit; + int ret = 0; + int power_src; + + if (!smu->default_power_limit || + !smu->power_limit) { + if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { + power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); + if (power_src < 0) + return -EINVAL; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, + power_src << 16); + if (ret) { + pr_err("[%s] get PPT limit failed!", __func__); + return ret; + } + smu_read_smc_arg(smu, &asic_default_power_limit); + } else { + /* the last hope to figure out the ppt limit */ + if (!pptable) { + pr_err("Cannot get PPT limit due to pptable missing!"); + return -EINVAL; + } + asic_default_power_limit = + pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; + } + + if (smu->od_enabled) { + asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit); + asic_default_power_limit /= 100; + } + + smu->default_power_limit = asic_default_power_limit; + smu->power_limit = asic_default_power_limit; + } + + if (asic_default) + *limit = smu->default_power_limit; + else + *limit = smu->power_limit; + + return 0; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1546,6 +1596,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .set_performance_level = navi10_set_performance_level, .get_thermal_temperature_range = navi10_get_thermal_temperature_range, .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch, + .get_power_limit = navi10_get_power_limit, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 89f4c9e19642..35669e80a246 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1015,64 +1015,32 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) return 0; } -static int smu_v11_0_get_power_limit(struct smu_context *smu, - uint32_t *limit, - bool get_default) +static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) { int ret = 0; - int power_src; - power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); - if (power_src < 0) + if (n > smu->default_power_limit) { + pr_err("New power limit is over the max allowed %d\n", + smu->default_power_limit); return -EINVAL; - - if (get_default) { - mutex_lock(&smu->mutex); - *limit = smu->default_power_limit; - if (smu->od_enabled) { - *limit *= (100 + smu->smu_table.TDPODLimit); - *limit /= 100; - } - mutex_unlock(&smu->mutex); - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit, - power_src << 16); - if (ret) { - pr_err("[%s] get PPT limit failed!", __func__); - return ret; - } - smu_read_smc_arg(smu, limit); - smu->power_limit = *limit; } - return ret; -} - -static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) -{ - uint32_t max_power_limit; - int ret = 0; - if (n == 0) n = smu->default_power_limit; - max_power_limit = smu->default_power_limit; - - if (smu->od_enabled) { - max_power_limit *= (100 + smu->smu_table.TDPODLimit); - max_power_limit /= 100; + if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { + pr_err("Setting new power limit is not supported!\n"); + return -EOPNOTSUPP; } - if (n > max_power_limit) - return -EINVAL; - if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n); if (ret) { - pr_err("[%s] Set power limit Failed!", __func__); + pr_err("[%s] Set power limit Failed!\n", __func__); return ret; } + smu->power_limit = n; - return ret; + return 0; } static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, @@ -1753,7 +1721,6 @@ static const struct smu_funcs smu_v11_0_funcs = { .get_enabled_mask = smu_v11_0_get_enabled_mask, .system_features_control = smu_v11_0_system_features_control, .notify_display_change = smu_v11_0_notify_display_change, - .get_power_limit = smu_v11_0_get_power_limit, .set_power_limit = smu_v11_0_set_power_limit, .get_current_clk_freq = smu_v11_0_get_current_clk_freq, .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, -- cgit v1.2.3 From 861324983d4f7fac6a66ddc021511df7d4193e2b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Thu, 25 Jul 2019 17:22:01 +0800 Subject: drm/amdgpu: correct irq type used for sdma ecc we should pass irq type, instead of irq client id, to irq_get/put interface Signed-off-by: Hawking Zhang Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 31a61430218c..a33bd867287e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1705,7 +1705,7 @@ static int sdma_v4_0_late_init(void *handle) resume: for (i = 0; i < adev->sdma.num_instances; i++) { r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, - sdma_v4_0_seq_to_irq_id(i)); + AMDGPU_SDMA_IRQ_INSTANCE0 + i); if (r) goto irq; } @@ -1849,7 +1849,7 @@ static int sdma_v4_0_hw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_irq_put(adev, &adev->sdma.ecc_irq, - sdma_v4_0_seq_to_irq_id(i)); + AMDGPU_SDMA_IRQ_INSTANCE0 + i); } sdma_v4_0_ctx_switch_enable(adev, false); -- cgit v1.2.3 From 59de58f84f7f561fe67b2d78a6a2cae6c0b50c24 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Jul 2019 16:40:51 +0800 Subject: drm/amd/powerplay: determine the features to enable by pptable only Per current logics, the features to enable are determined together by driver and pptable. This is not efficient in co-debug with firmware team. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 9360f5a25b69..215f7173fca8 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -327,7 +327,6 @@ static int arcturus_allocate_dpm_context(struct smu_context *smu) return 0; } -#define FEATURE_MASK(feature) (1ULL << feature) static int arcturus_get_allowed_feature_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) @@ -335,9 +334,8 @@ arcturus_get_allowed_feature_mask(struct smu_context *smu, if (num > 2) return -EINVAL; - memset(feature_mask, 0, sizeof(uint32_t) * num); - - *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT); + /* pptable will handle the features to enable */ + memset(feature_mask, 0xFF, sizeof(uint32_t) * num); return 0; } -- cgit v1.2.3 From d65848657c3da5c0d4b685f823d0230f151ab34e Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Tue, 23 Jul 2019 10:18:01 -0400 Subject: drm/amdkfd: Fix byte align on VegaM This was missed during the addition of VegaM support Reviewed-by: Alex Deucher Signed-off-by: Kent Russell Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d3ee9c42f7e..6a5c96e519b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12) ? + adev->asic_type != CHIP_POLARIS12 && + adev->asic_type != CHIP_VEGAM) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; -- cgit v1.2.3 From 2c0f07fe3ca57c8fb4ee179c9fb50d6eba75349e Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 3 Jun 2019 15:58:31 +0800 Subject: drm/amd/powerplay: add callback function of get_thermal_temperature_range 1. the thermal temperature is asic related data, move the code logic to xxx_ppt.c. 2. replace data structure PP_TemperatureRange with smu_temperature_range. 3. change temperature uint from temp*1000 to temp (temperature uint). Signed-off-by: Kevin Wang Signed-off-by: Kenneth Feng Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 17 +++++++++++++ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 +++++++++----- drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 34 +++++++++----------------- 4 files changed, 40 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 22e46a289a16..208e6711d506 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -429,7 +429,6 @@ struct smu_table_context struct smu_table *tables; uint32_t table_count; struct smu_table memory_pool; - uint16_t software_shutdown_temp; uint8_t thermal_controller_type; uint16_t TDPODLimit; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 4aaad255a288..3f68268a8733 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1620,6 +1620,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo return ret; } +static int navi10_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; + + if (!range || !powerplay_table) + return -EINVAL; + + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->software_shutdown_temp; + + return 0; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1657,6 +1673,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, .set_performance_level = navi10_set_performance_level, + .get_thermal_temperature_range = navi10_get_thermal_temperature_range, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index caca9091bfcc..1ecb409e3bed 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, struct smu_temperature_range *range) { struct amdgpu_device *adev = smu->adev; - int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; + int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; if (!range) @@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, if (high > range->max) high = range->max; + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max); + if (low > high) return -EINVAL; @@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); @@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) if (!smu->pm_enabled) return ret; + ret = smu_get_thermal_temperature_range(smu, &range); + if (ret) + return ret; if (smu->smu_table.thermal_controller_type) { ret = smu_v11_0_set_thermal_range(smu, &range); @@ -1211,6 +1215,8 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) adev->pm.dpm.thermal.min_mem_temp = range.mem_min; adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index dc139a6feeb1..dd6fd1c8bf24 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, sizeof(PPTable_t)); - table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); @@ -3234,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu, return 0; } -static const struct smu_temperature_range vega20_thermal_policy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - static int vega20_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range) { - + struct smu_table_context *table_context = &smu->smu_table; + ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table; PPTable_t *pptable = smu->smu_table.driver_pptable; - if (!range) + if (!range || !powerplay_table) return -EINVAL; - memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); - - range->max = pptable->TedgeLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->ThotspotLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->ThbmLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->usSoftwareShutdownTemp; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE); + range->hotspot_crit_max = pptable->ThotspotLimit; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT); + range->mem_crit_max = pptable->ThbmLimit; + range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM); return 0; -- cgit v1.2.3 From 45a660143bf90a35ab64df663b88d82c02a17091 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Tue, 23 Jul 2019 19:56:52 +0800 Subject: drm/amd/powerplay: fix temperature granularity error in smu11 in this patch, drm/amd/powerplay: add callback function of get_thermal_temperature_range the driver missed temperature granularity change on other temperature. Signed-off-by: Kevin Wang Reviewed-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 1ecb409e3bed..ac5b26228e75 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1206,15 +1206,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } - adev->pm.dpm.thermal.min_temp = range.min; - adev->pm.dpm.thermal.max_temp = range.max; - adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; - adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; - adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; - adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; - adev->pm.dpm.thermal.min_mem_temp = range.mem_min; - adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; - adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; -- cgit v1.2.3 From 090efd946d00cd23ce4ac25bce125f408b704d7d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 22:28:58 -0500 Subject: drm/amdgpu/powerplay: use proper revision id for navi The PCI revision id determines the sku. Reviewed-by: Feifei Xu Reviewed-by: Kevin Wang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 3f68268a8733..be592d22bdcc 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -23,6 +23,7 @@ #include "pp_debug.h" #include +#include #include "amdgpu.h" #include "amdgpu_smu.h" #include "atomfirmware.h" @@ -1573,7 +1574,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t uclk_level = 0; - switch (adev->rev_id) { + switch (adev->pdev->revision) { case 0xf0: /* XTX */ case 0xc0: sclk_freq = NAVI10_PEAK_SCLK_XTX; -- cgit v1.2.3 From 479156f2e5540077377a823eaf5a4263bd329063 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Jul 2019 12:10:34 +0800 Subject: drm/amd/powerplay: fix null pointer dereference around dpm state relates DPM state relates are not supported on the new SW SMU ASICs. But still it's not OK to trigger null pointer dereference on accessing them. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 18 +++++++++++++----- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 3 ++- 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03ca8c69114f..8c90baca07b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index c097113c3976..88ed85e3d233 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -306,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu, /* not support power state */ memset(state_info, 0, sizeof(struct pp_states_info)); - state_info->nums = 0; + state_info->nums = 1; + state_info->states[0] = POWER_STATE_TYPE_DEFAULT; return 0; } -- cgit v1.2.3 From f0bc1ee473fefd4d9f2ace9fad1cefdc0b7f6fdd Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Jul 2019 10:12:42 +0800 Subject: drm/amd/powerplay: enable SW SMU reset functionality Move SMU irq handler register to sw_init as that's totally software related. Otherwise, it will prevent SMU reset working. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 88ed85e3d233..93cd969e5cf5 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -724,6 +724,12 @@ static int smu_sw_init(void *handle) return ret; } + ret = smu_register_irq_handler(smu); + if (ret) { + pr_err("Failed to register smc irq handler!\n"); + return ret; + } + return 0; } @@ -733,6 +739,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + kfree(smu->irq_source); + smu->irq_source = NULL; + ret = smu_smc_table_sw_fini(smu); if (ret) { pr_err("Failed to sw fini smc table!\n"); @@ -1089,10 +1098,6 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - ret = smu_register_irq_handler(smu); - if (ret) - goto failed; - if (!smu->pm_enabled) adev->pm.dpm_enabled = false; else @@ -1122,9 +1127,6 @@ static int smu_hw_fini(void *handle) kfree(table_context->overdrive_table); table_context->overdrive_table = NULL; - kfree(smu->irq_source); - smu->irq_source = NULL; - ret = smu_fini_fb_allocations(smu); if (ret) return ret; -- cgit v1.2.3 From 67d0859e2758ef992fd32499747ce4b1038a63c0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 30 Jul 2019 11:17:03 +0200 Subject: drm/amdgpu: fix error handling in amdgpu_cs_process_fence_dep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We always need to drop the ctx reference and should check for errors first and then dereference the fence pointer. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e069de8b54e6..4e4094f842e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return r; } - fence = amdgpu_ctx_get_fence(ctx, entity, - deps[i].handle); + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + amdgpu_ctx_put(ctx); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + else if (!fence) + continue; if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { - struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct drm_sched_fence *s_fence; struct dma_fence *old = fence; + s_fence = to_drm_sched_fence(fence); fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(old); } - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + dma_fence_put(fence); + if (r) return r; - } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, - true); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } } return 0; } -- cgit v1.2.3 From 929e571c04c285861e0bb049a396a2bdaea63282 Mon Sep 17 00:00:00 2001 From: Wang Xiayang Date: Sat, 27 Jul 2019 17:30:30 +0800 Subject: drm/amdgpu: fix a potential information leaking bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Coccinelle reports a path that the array "data" is never initialized. The path skips the checks in the conditional branches when either of callback functions, read_wave_vgprs and read_wave_sgprs, is not registered. Later, the uninitialized "data" array is read in the while-loop below and passed to put_user(). Fix the path by allocating the array with kcalloc(). The patch is simplier than adding a fall-back branch that explicitly calls memset(data, 0, ...). Also it does not need the multiplication 1024*sizeof(*data) as the size parameter for memset() though there is no risk of integer overflow. Signed-off-by: Wang Xiayang Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6d54decef7f8..5652cc72ed3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; -- cgit v1.2.3 From cb0c43f30ca6a34cf9e796d6ca165668cbc2ec89 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2019 17:34:41 +0100 Subject: drm/i915: Avoid ce->gem_context->i915 My plan for the future is to have kernel contexts not to have a GEM context backpointer (as they will not belong to any GEM context). In a few places, we use ce->gem_context to simply obtain the i915 backpointer, for which we can use ce->engine->i915 instead. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190730163441.16477-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_context.c | 4 ++-- drivers/gpu/drm/i915/i915_perf.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index 2312a0c6af89..997e122545bc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -152,7 +152,7 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence, static void clear_pages_worker(struct work_struct *work) { struct clear_pages_work *w = container_of(work, typeof(*w), work); - struct drm_i915_private *i915 = w->ce->gem_context->i915; + struct drm_i915_private *i915 = w->ce->engine->i915; struct drm_i915_gem_object *obj = w->sleeve->vma->obj; struct i915_vma *vma = w->sleeve->vma; struct i915_request *rq; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index b28c7ca681a8..eb2d28a37b58 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1194,7 +1194,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce, { int ret; - GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8); + GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); ret = intel_context_lock_pinned(ce); if (ret) @@ -1216,7 +1216,7 @@ unlock: static int intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) { - struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_private *i915 = ce->engine->i915; int ret; ret = mutex_lock_interruptible(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 988a4092164e..f2cc69ccb635 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1667,7 +1667,7 @@ gen8_update_reg_state_unlocked(struct intel_context *ce, u32 *reg_state, const struct i915_oa_config *oa_config) { - struct drm_i915_private *i915 = ce->gem_context->i915; + struct drm_i915_private *i915 = ce->engine->i915; u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset; u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset; /* The MMIO offsets for Flex EU registers aren't contiguous */ -- cgit v1.2.3 From a02709818f397e7ed7a0943d65a49d54b2752626 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:51:59 +0800 Subject: drm/amd/powerplay: add new sensor type for VCN powergate status VCN is widely used in new ASICs and different from tranditional UVD and VCE. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f661bf96ed0..5b1ebb7f995a 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -123,6 +123,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, AMDGPU_PP_SENSOR_MIN_FAN_RPM, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + AMDGPU_PP_SENSOR_VCN_POWER_STATE, }; enum amd_pp_task { -- cgit v1.2.3 From 201cd702b7012ecee2a613e09b6a227ca0e12504 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:55:36 +0800 Subject: drm/amd/powerplay: support VCN powergate status retrieval on Raven Enable VCN powergate status report on Raven. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e32ae9d3373c..18e780f566fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); uint32_t sclk, mclk; int ret = 0; @@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_TEMP: *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; + *size = 4; + break; default: ret = -EINVAL; break; @@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + if (bgate) { amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0); + smu10_data->vcn_power_gated = true; } else { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); + smu10_data->vcn_power_gated = false; } } -- cgit v1.2.3 From e21e3581e2a1df75abb96b545be15e526bd8c1c6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:57:27 +0800 Subject: drm/amd/powerplay: support VCN powergate status retrieval for SW SMU Commonly used for VCN powergate status retrieval for SW SMU. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 93cd969e5cf5..0685a3388e38 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -338,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *size = 4; + break; default: ret = -EINVAL; break; -- cgit v1.2.3 From a3ebbdb95f8c343a547ee2abec4d8abbf71f8a94 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 10:27:21 +0800 Subject: drm/amd/powerplay: correct Navi10 VCN powergate control (v2) No VCN DPM bit check as that's different from VCN PG. Also no extra check for possible double enablement/disablement as that's already done by VCN. v2: check return value of smu_feature_set_enabled Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index be592d22bdcc..cc0a3b2256af 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -578,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { int ret = 0; - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - if (enable && power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; - } - power_gate->uvd_gated = false; + if (enable) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; } else { - if (!enable && !power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; - } - power_gate->uvd_gated = true; - } + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; } - return 0; + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); + + return ret; } static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, -- cgit v1.2.3 From 6dee4829cfde106a8af7d0d3ba23022f8f054761 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 10:42:29 +0800 Subject: drm/amd/powerplay: correct UVD/VCE/VCN power status retrieval VCN should be used for Vega20 later ASICs while UVD and VCE are for previous ASICs. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 56 ++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8c90baca07b2..2b546567853b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -3075,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + if (adev->asic_type > CHIP_VEGA20) { + /* VCN clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCN: Disabled\n"); + } else { + seq_printf(m, "VCN: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } } - } - seq_printf(m, "\n"); + seq_printf(m, "\n"); + } else { + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } } } -- cgit v1.2.3 From f277bc0c98a407732d933ac5c53d9cd1224653cb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 31 Jul 2019 09:11:26 +0100 Subject: drm/i915/selftests: Pass intel_context to igt_spinner Teach igt_spinner to only use our internal structs, decoupling the interface from the GEM contexts. This makes it easier to avoid requiring ce->gem_context back references for kernel_context that may have them in future. v2: Lift engine lock to verify_wa() caller. v3: Less than v2, but more so Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190731081126.9139-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 43 ++++---- drivers/gpu/drm/i915/gt/selftest_lrc.c | 115 ++++++++++++--------- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 28 +++-- drivers/gpu/drm/i915/selftests/igt_spinner.c | 25 +++-- drivers/gpu/drm/i915/selftests/igt_spinner.h | 6 +- 5 files changed, 117 insertions(+), 100 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 7f9f6701b32c..c24430352a38 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -821,8 +821,7 @@ err_vma: #define TEST_RESET BIT(2) static int -__sseu_prepare(struct drm_i915_private *i915, - const char *name, +__sseu_prepare(const char *name, unsigned int flags, struct intel_context *ce, struct igt_spinner **spin) @@ -838,14 +837,11 @@ __sseu_prepare(struct drm_i915_private *i915, if (!*spin) return -ENOMEM; - ret = igt_spinner_init(*spin, i915); + ret = igt_spinner_init(*spin, ce->engine->gt); if (ret) goto err_free; - rq = igt_spinner_create_request(*spin, - ce->gem_context, - ce->engine, - MI_NOOP); + rq = igt_spinner_create_request(*spin, ce, MI_NOOP); if (IS_ERR(rq)) { ret = PTR_ERR(rq); goto err_fini; @@ -871,8 +867,7 @@ err_free: } static int -__read_slice_count(struct drm_i915_private *i915, - struct intel_context *ce, +__read_slice_count(struct intel_context *ce, struct drm_i915_gem_object *obj, struct igt_spinner *spin, u32 *rpcs) @@ -901,7 +896,7 @@ __read_slice_count(struct drm_i915_private *i915, return ret; } - if (INTEL_GEN(i915) >= 11) { + if (INTEL_GEN(ce->engine->i915) >= 11) { s_mask = GEN11_RPCS_S_CNT_MASK; s_shift = GEN11_RPCS_S_CNT_SHIFT; } else { @@ -944,8 +939,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected, } static int -__sseu_finish(struct drm_i915_private *i915, - const char *name, +__sseu_finish(const char *name, unsigned int flags, struct intel_context *ce, struct drm_i915_gem_object *obj, @@ -962,14 +956,13 @@ __sseu_finish(struct drm_i915_private *i915, goto out; } - ret = __read_slice_count(i915, ce, obj, + ret = __read_slice_count(ce, obj, flags & TEST_RESET ? NULL : spin, &rpcs); ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!"); if (ret) goto out; - ret = __read_slice_count(i915, ce->engine->kernel_context, obj, - NULL, &rpcs); + ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs); ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!"); out: @@ -977,11 +970,12 @@ out: igt_spinner_end(spin); if ((flags & TEST_IDLE) && ret == 0) { - ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT); + ret = i915_gem_wait_for_idle(ce->engine->i915, + 0, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; - ret = __read_slice_count(i915, ce, obj, NULL, &rpcs); + ret = __read_slice_count(ce, obj, NULL, &rpcs); ret = __check_rpcs(name, rpcs, ret, expected, "Context", " after idle!"); } @@ -990,8 +984,7 @@ out: } static int -__sseu_test(struct drm_i915_private *i915, - const char *name, +__sseu_test(const char *name, unsigned int flags, struct intel_context *ce, struct drm_i915_gem_object *obj, @@ -1000,7 +993,7 @@ __sseu_test(struct drm_i915_private *i915, struct igt_spinner *spin = NULL; int ret; - ret = __sseu_prepare(i915, name, flags, ce, &spin); + ret = __sseu_prepare(name, flags, ce, &spin); if (ret) return ret; @@ -1008,7 +1001,7 @@ __sseu_test(struct drm_i915_private *i915, if (ret) goto out_spin; - ret = __sseu_finish(i915, name, flags, ce, obj, + ret = __sseu_finish(name, flags, ce, obj, hweight32(sseu.slice_mask), spin); out_spin: @@ -1088,22 +1081,22 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_context; /* First set the default mask. */ - ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); + ret = __sseu_test(name, flags, ce, obj, engine->sseu); if (ret) goto out_fail; /* Then set a power-gated configuration. */ - ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); + ret = __sseu_test(name, flags, ce, obj, pg_sseu); if (ret) goto out_fail; /* Back to defaults. */ - ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu); + ret = __sseu_test(name, flags, ce, obj, engine->sseu); if (ret) goto out_fail; /* One last power-gated configuration for the road. */ - ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu); + ret = __sseu_test(name, flags, ce, obj, pg_sseu); if (ret) goto out_fail; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 60f27e52d267..b40b57d2daae 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -22,9 +22,9 @@ static int live_sanitycheck(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; + struct i915_gem_engines_iter it; struct i915_gem_context *ctx; - enum intel_engine_id id; + struct intel_context *ce; struct igt_spinner spin; intel_wakeref_t wakeref; int err = -ENOMEM; @@ -35,17 +35,17 @@ static int live_sanitycheck(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin, i915)) + if (igt_spinner_init(&spin, &i915->gt)) goto err_unlock; ctx = kernel_context(i915); if (!ctx) goto err_spin; - for_each_engine(engine, i915, id) { + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { struct i915_request *rq; - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx; @@ -69,6 +69,7 @@ static int live_sanitycheck(void *arg) err = 0; err_ctx: + i915_gem_context_unlock_engines(ctx); kernel_context_close(ctx); err_spin: igt_spinner_fini(&spin); @@ -480,6 +481,24 @@ err_unlock: return err; } +static struct i915_request * +spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arb) +{ + struct intel_context *ce; + struct i915_request *rq; + + ce = i915_gem_context_get_engine(ctx, engine->id); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + rq = igt_spinner_create_request(spin, ce, arb); + intel_context_put(ce); + return rq; +} + static int live_preempt(void *arg) { struct drm_i915_private *i915 = arg; @@ -499,10 +518,10 @@ static int live_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, &i915->gt)) goto err_unlock; - if (igt_spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, &i915->gt)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -529,8 +548,8 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; @@ -545,8 +564,8 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); @@ -603,10 +622,10 @@ static int live_late_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, &i915->gt)) goto err_unlock; - if (igt_spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, &i915->gt)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -632,8 +651,8 @@ static int live_late_preempt(void *arg) goto err_ctx_lo; } - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; @@ -645,8 +664,8 @@ static int live_late_preempt(void *arg) goto err_wedged; } - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_NOOP); + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_NOOP); if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); @@ -711,7 +730,7 @@ static int preempt_client_init(struct drm_i915_private *i915, if (!c->ctx) return -ENOMEM; - if (igt_spinner_init(&c->spin, i915)) + if (igt_spinner_init(&c->spin, &i915->gt)) goto err_ctx; return 0; @@ -761,9 +780,9 @@ static int live_nopreempt(void *arg) engine->execlists.preempt_hang.count = 0; - rq_a = igt_spinner_create_request(&a.spin, - a.ctx, engine, - MI_ARB_CHECK); + rq_a = spinner_create_request(&a.spin, + a.ctx, engine, + MI_ARB_CHECK); if (IS_ERR(rq_a)) { err = PTR_ERR(rq_a); goto err_client_b; @@ -778,9 +797,9 @@ static int live_nopreempt(void *arg) goto err_wedged; } - rq_b = igt_spinner_create_request(&b.spin, - b.ctx, engine, - MI_ARB_CHECK); + rq_b = spinner_create_request(&b.spin, + b.ctx, engine, + MI_ARB_CHECK); if (IS_ERR(rq_b)) { err = PTR_ERR(rq_b); goto err_client_b; @@ -880,9 +899,9 @@ static int live_suppress_self_preempt(void *arg) engine->execlists.preempt_hang.count = 0; - rq_a = igt_spinner_create_request(&a.spin, - a.ctx, engine, - MI_NOOP); + rq_a = spinner_create_request(&a.spin, + a.ctx, engine, + MI_NOOP); if (IS_ERR(rq_a)) { err = PTR_ERR(rq_a); goto err_client_b; @@ -895,9 +914,9 @@ static int live_suppress_self_preempt(void *arg) } for (depth = 0; depth < 8; depth++) { - rq_b = igt_spinner_create_request(&b.spin, - b.ctx, engine, - MI_NOOP); + rq_b = spinner_create_request(&b.spin, + b.ctx, engine, + MI_NOOP); if (IS_ERR(rq_b)) { err = PTR_ERR(rq_b); goto err_client_b; @@ -1048,9 +1067,9 @@ static int live_suppress_wait_preempt(void *arg) goto err_client_3; for (i = 0; i < ARRAY_SIZE(client); i++) { - rq[i] = igt_spinner_create_request(&client[i].spin, - client[i].ctx, engine, - MI_NOOP); + rq[i] = spinner_create_request(&client[i].spin, + client[i].ctx, engine, + MI_NOOP); if (IS_ERR(rq[i])) { err = PTR_ERR(rq[i]); goto err_wedged; @@ -1157,9 +1176,9 @@ static int live_chain_preempt(void *arg) if (!intel_engine_has_preemption(engine)) continue; - rq = igt_spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) goto err_wedged; i915_request_add(rq); @@ -1183,18 +1202,18 @@ static int live_chain_preempt(void *arg) } for_each_prime_number_from(count, 1, ring_size) { - rq = igt_spinner_create_request(&hi.spin, - hi.ctx, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&hi.spin, + hi.ctx, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) goto err_wedged; i915_request_add(rq); if (!igt_wait_for_spinner(&hi.spin, rq)) goto err_wedged; - rq = igt_spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) goto err_wedged; i915_request_add(rq); @@ -1284,10 +1303,10 @@ static int live_preempt_hang(void *arg) mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); - if (igt_spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, &i915->gt)) goto err_unlock; - if (igt_spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, &i915->gt)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -1308,8 +1327,8 @@ static int live_preempt_hang(void *arg) if (!intel_engine_has_preemption(engine)) continue; - rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; @@ -1324,8 +1343,8 @@ static int live_preempt_hang(void *arg) goto err_ctx_lo; } - rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { igt_spinner_end(&spin_lo); err = PTR_ERR(rq); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index ab147985fa74..997da94821d9 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -238,6 +238,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine, struct igt_spinner *spin) { struct i915_gem_context *ctx; + struct intel_context *ce; struct i915_request *rq; intel_wakeref_t wakeref; int err = 0; @@ -248,10 +249,14 @@ switch_to_scratch_context(struct intel_engine_cs *engine, GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); + ce = i915_gem_context_get_engine(ctx, engine->id); + GEM_BUG_ON(IS_ERR(ce)); + rq = ERR_PTR(-ENODEV); with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref) - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); + rq = igt_spinner_create_request(spin, ce, MI_NOOP); + intel_context_put(ce); kernel_context_close(ctx); if (IS_ERR(rq)) { @@ -291,7 +296,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, if (IS_ERR(ctx)) return PTR_ERR(ctx); - err = igt_spinner_init(&spin, i915); + err = igt_spinner_init(&spin, engine->gt); if (err) goto out_ctx; @@ -1083,7 +1088,7 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) { enum intel_engine_id id = ce->engine->id; ok &= engine_wa_list_verify(ce, @@ -1094,7 +1099,6 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, &lists->engine[id].ctx_wa_list, str) == 0; } - i915_gem_context_unlock_engines(ctx); return ok; } @@ -1115,6 +1119,8 @@ live_gpu_reset_workarounds(void *arg) if (IS_ERR(ctx)) return PTR_ERR(ctx); + i915_gem_context_lock_engines(ctx); + pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(&i915->gt); @@ -1131,6 +1137,7 @@ live_gpu_reset_workarounds(void *arg) ok = verify_wa_lists(ctx, &lists, "after reset"); out: + i915_gem_context_unlock_engines(ctx); kernel_context_close(ctx); reference_lists_fini(i915, &lists); intel_runtime_pm_put(&i915->runtime_pm, wakeref); @@ -1143,10 +1150,10 @@ static int live_engine_reset_workarounds(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine; + struct i915_gem_engines_iter it; struct i915_gem_context *ctx; + struct intel_context *ce; struct igt_spinner spin; - enum intel_engine_id id; struct i915_request *rq; intel_wakeref_t wakeref; struct wa_lists lists; @@ -1164,7 +1171,8 @@ live_engine_reset_workarounds(void *arg) reference_lists_init(i915, &lists); - for_each_engine(engine, i915, id) { + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + struct intel_engine_cs *engine = ce->engine; bool ok; pr_info("Verifying after %s reset...\n", engine->name); @@ -1183,11 +1191,11 @@ live_engine_reset_workarounds(void *arg) goto err; } - ret = igt_spinner_init(&spin, i915); + ret = igt_spinner_init(&spin, engine->gt); if (ret) goto err; - rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); if (IS_ERR(rq)) { ret = PTR_ERR(rq); igt_spinner_fini(&spin); @@ -1214,8 +1222,8 @@ live_engine_reset_workarounds(void *arg) goto err; } } - err: + i915_gem_context_unlock_engines(ctx); reference_lists_fini(i915, &lists); intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(&i915->gt); diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 89b6552a6497..41acf209ffdb 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -9,25 +9,24 @@ #include "igt_spinner.h" -int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) +int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) { unsigned int mode; void *vaddr; int err; - GEM_BUG_ON(INTEL_GEN(i915) < 8); + GEM_BUG_ON(INTEL_GEN(gt->i915) < 8); memset(spin, 0, sizeof(*spin)); - spin->i915 = i915; - spin->gt = &i915->gt; + spin->gt = gt; - spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); + spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(spin->hws)) { err = PTR_ERR(spin->hws); goto err; } - spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(spin->obj)) { err = PTR_ERR(spin->obj); goto err_hws; @@ -41,7 +40,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) } spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - mode = i915_coherent_map_type(i915); + mode = i915_coherent_map_type(gt->i915); vaddr = i915_gem_object_pin_map(spin->obj, mode); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); @@ -87,22 +86,22 @@ static int move_to_active(struct i915_vma *vma, struct i915_request * igt_spinner_create_request(struct igt_spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, + struct intel_context *ce, u32 arbitration_command) { + struct intel_engine_cs *engine = ce->engine; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; u32 *batch; int err; - spin->gt = engine->gt; + GEM_BUG_ON(spin->gt != ce->vm->gt); - vma = i915_vma_instance(spin->obj, ctx->vm, NULL); + vma = i915_vma_instance(spin->obj, ce->vm, NULL); if (IS_ERR(vma)) return ERR_CAST(vma); - hws = i915_vma_instance(spin->hws, ctx->vm, NULL); + hws = i915_vma_instance(spin->hws, ce->vm, NULL); if (IS_ERR(hws)) return ERR_CAST(hws); @@ -114,7 +113,7 @@ igt_spinner_create_request(struct igt_spinner *spin, if (err) goto unpin_vma; - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto unpin_hws; diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index 1bfc39efa773..ec62c9ef320b 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -17,7 +17,6 @@ struct intel_gt; struct igt_spinner { - struct drm_i915_private *i915; struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; @@ -25,13 +24,12 @@ struct igt_spinner { void *seqno; }; -int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915); +int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt); void igt_spinner_fini(struct igt_spinner *spin); struct i915_request * igt_spinner_create_request(struct igt_spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, + struct intel_context *ce, u32 arbitration_command); void igt_spinner_end(struct igt_spinner *spin); -- cgit v1.2.3 From 602776f96bfa50a6107af017522501c43a82b29a Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 30 Jul 2019 16:07:39 -0700 Subject: drm/i915/uc: Don't enable communication twice on resume When coming out of S3/S4 we sanitize and re-init the HW, which includes enabling communication during uc_init_hw. We therefore don't want to do that again in uc_resume and can just tell GuC to reload its state. v2: split uc_resume and uc_runtime_resume to match the suspend functions and to better differentiate the expected state in the 2 scenarios (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190730230743.19542-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 33 +++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/gt/uc/intel_uc.h | 1 + drivers/gpu/drm/i915/i915_drv.c | 4 ++-- 3 files changed, 34 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 6eb8bb3fa252..26671f1daaf7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -233,11 +233,18 @@ static void guc_disable_interrupts(struct intel_guc *guc) guc->interrupts.disable(guc); } +static inline bool guc_communication_enabled(struct intel_guc *guc) +{ + return guc->send != intel_guc_send_nop; +} + static int guc_enable_communication(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_gt(guc)->i915; int ret; + GEM_BUG_ON(guc_communication_enabled(guc)); + ret = intel_guc_ct_enable(&guc->ct); if (ret) return ret; @@ -550,7 +557,7 @@ void intel_uc_suspend(struct intel_uc *uc) intel_uc_runtime_suspend(uc); } -int intel_uc_resume(struct intel_uc *uc) +static int __uc_resume(struct intel_uc *uc, bool enable_communication) { struct intel_guc *guc = &uc->guc; int err; @@ -558,7 +565,11 @@ int intel_uc_resume(struct intel_uc *uc) if (!intel_guc_is_running(guc)) return 0; - guc_enable_communication(guc); + /* Make sure we enable communication if and only if it's disabled */ + GEM_BUG_ON(enable_communication == guc_communication_enabled(guc)); + + if (enable_communication) + guc_enable_communication(guc); err = intel_guc_resume(guc); if (err) { @@ -568,3 +579,21 @@ int intel_uc_resume(struct intel_uc *uc) return 0; } + +int intel_uc_resume(struct intel_uc *uc) +{ + /* + * When coming out of S3/S4 we sanitize and re-init the HW, so + * communication is already re-enabled at this point. + */ + return __uc_resume(uc, false); +} + +int intel_uc_runtime_resume(struct intel_uc *uc) +{ + /* + * During runtime resume we don't sanitize, so we need to re-init + * communication as well. + */ + return __uc_resume(uc, true); +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index fe3362fd7706..25da51e95417 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -47,6 +47,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc); void intel_uc_suspend(struct intel_uc *uc); void intel_uc_runtime_suspend(struct intel_uc *uc); int intel_uc_resume(struct intel_uc *uc); +int intel_uc_runtime_resume(struct intel_uc *uc); static inline bool intel_uc_is_using_guc(struct intel_uc *uc) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7d0813150146..08c5504e040c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2950,7 +2950,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_resume(&dev_priv->gt.uc); + intel_uc_runtime_resume(&dev_priv->gt.uc); intel_gt_init_swizzling(&dev_priv->gt); i915_gem_restore_fences(dev_priv); @@ -3047,7 +3047,7 @@ static int intel_runtime_resume(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_resume(&dev_priv->gt.uc); + intel_uc_runtime_resume(&dev_priv->gt.uc); /* * No point of rolling back things in case of an error, as the best -- cgit v1.2.3 From 63064d822c964c04107ead05b64eddccfa142005 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 30 Jul 2019 16:07:40 -0700 Subject: drm/i915/uc: Move uC WOPCM setup in uc_init_hw The register we write are not WOPCM regs but uC ones related to how GuC and HuC are going to use the WOPCM, so it makes logical sense for them to be programmed as part of uc_init_hw. The WOPCM map on the other side is not uC-specific (although that is our main use-case), so keep that separate. v2: move write_and_verify to uncore, fix log, re-use err_out tag, add intel_wopcm_guc_base, fix log Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Reviewed-by: Michal Wajdeczko Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190730230743.19542-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 47 ++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 8 +---- drivers/gpu/drm/i915/intel_uncore.h | 12 +++++++ drivers/gpu/drm/i915/intel_wopcm.c | 68 ----------------------------------- drivers/gpu/drm/i915/intel_wopcm.h | 18 ++++++++-- 6 files changed, 76 insertions(+), 80 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 26671f1daaf7..66b226be6759 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -397,6 +397,49 @@ void intel_uc_sanitize(struct intel_uc *uc) __uc_sanitize(uc); } +/* Initialize and verify the uC regs related to uC positioning in WOPCM */ +static int uc_init_wopcm(struct intel_uc *uc) +{ + struct intel_gt *gt = uc_to_gt(uc); + struct intel_uncore *uncore = gt->uncore; + u32 base = intel_wopcm_guc_base(>->i915->wopcm); + u32 size = intel_wopcm_guc_size(>->i915->wopcm); + u32 huc_agent = intel_uc_is_using_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; + u32 mask; + int err; + + GEM_BUG_ON(!intel_uc_is_using_guc(uc)); + GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); + GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); + GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); + GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); + + mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; + err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask, + size | GUC_WOPCM_SIZE_LOCKED); + if (err) + goto err_out; + + mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; + err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET, + base | huc_agent, mask, + base | huc_agent | + GUC_WOPCM_OFFSET_VALID); + if (err) + goto err_out; + + return 0; + +err_out: + DRM_ERROR("Failed to init uC WOPCM registers:\n"); + DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n", + intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); + DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", + intel_uncore_read(uncore, GUC_WOPCM_SIZE)); + + return err; +} + int intel_uc_init_hw(struct intel_uc *uc) { struct drm_i915_private *i915 = uc_to_gt(uc)->i915; @@ -409,6 +452,10 @@ int intel_uc_init_hw(struct intel_uc *uc) GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + ret = uc_init_wopcm(uc); + if (ret) + goto err_out; + guc_reset_interrupts(guc); /* WaEnableuKernelHeaderValidFix:skl */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9a69445f38c0..fca7b6ce378d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2274,10 +2274,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) -/* Having GuC/HuC is not the same as using GuC/HuC */ +/* Having GuC is not the same as using GuC */ #define USES_GUC(dev_priv) intel_uc_is_using_guc(&(dev_priv)->gt.uc) #define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc) -#define USES_HUC(dev_priv) intel_uc_is_using_huc(&(dev_priv)->gt.uc) #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 65863e955f40..f681152d27fa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1240,14 +1240,8 @@ int i915_gem_init_hw(struct drm_i915_private *i915) goto out; } - ret = intel_wopcm_init_hw(&i915->wopcm, gt); - if (ret) { - DRM_ERROR("Enabling WOPCM failed (%d)\n", ret); - goto out; - } - /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(&i915->gt.uc); + ret = intel_uc_init_hw(>->uc); if (ret) { DRM_ERROR("Enabling uc failed (%d)\n", ret); goto out; diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 2f6ffa309669..e603d210a34d 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -393,6 +393,18 @@ static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore, intel_uncore_write_fw(uncore, reg, val); } +static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, + i915_reg_t reg, u32 val, + u32 mask, u32 expected_val) +{ + u32 reg_val; + + intel_uncore_write(uncore, reg, val); + reg_val = intel_uncore_read(uncore, reg); + + return (reg_val & mask) != expected_val ? -EINVAL : 0; +} + #define raw_reg_read(base, reg) \ readl(base + i915_mmio_reg_offset(reg)) #define raw_reg_write(base, reg, value) \ diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 0e86a9e85b49..d9973c0b0384 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -224,71 +224,3 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) return 0; } - -static int -write_and_verify(struct intel_gt *gt, - i915_reg_t reg, u32 val, u32 mask, u32 locked_bit) -{ - struct intel_uncore *uncore = gt->uncore; - u32 reg_val; - - GEM_BUG_ON(val & ~mask); - - intel_uncore_write(uncore, reg, val); - - reg_val = intel_uncore_read(uncore, reg); - - return (reg_val & mask) != (val | locked_bit) ? -EIO : 0; -} - -/** - * intel_wopcm_init_hw() - Setup GuC WOPCM registers. - * @wopcm: pointer to intel_wopcm. - * @gt: pointer to the containing GT - * - * Setup the GuC WOPCM size and offset registers with the calculated values. It - * will verify the register values to make sure the registers are locked with - * correct values. - * - * Return: 0 on success. -EIO if registers were locked with incorrect values. - */ -int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt) -{ - struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - struct intel_uncore *uncore = gt->uncore; - u32 huc_agent; - u32 mask; - int err; - - if (!USES_GUC(i915)) - return 0; - - GEM_BUG_ON(!HAS_GT_UC(i915)); - GEM_BUG_ON(!wopcm->guc.size); - GEM_BUG_ON(!wopcm->guc.base); - - err = write_and_verify(gt, GUC_WOPCM_SIZE, wopcm->guc.size, - GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED, - GUC_WOPCM_SIZE_LOCKED); - if (err) - goto err_out; - - huc_agent = USES_HUC(i915) ? HUC_LOADING_AGENT_GUC : 0; - mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; - err = write_and_verify(gt, DMA_GUC_WOPCM_OFFSET, - wopcm->guc.base | huc_agent, mask, - GUC_WOPCM_OFFSET_VALID); - if (err) - goto err_out; - - return 0; - -err_out: - DRM_ERROR("Failed to init WOPCM registers:\n"); - DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n", - intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); - DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", - intel_uncore_read(uncore, GUC_WOPCM_SIZE)); - - return err; -} diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h index 56aaed4d64ff..f9b603205bb1 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.h +++ b/drivers/gpu/drm/i915/intel_wopcm.h @@ -9,8 +9,6 @@ #include -struct intel_gt; - /** * struct intel_wopcm - Overall WOPCM info and WOPCM regions. * @size: Size of overall WOPCM. @@ -26,6 +24,21 @@ struct intel_wopcm { } guc; }; +/** + * intel_wopcm_guc_base() + * @wopcm: intel_wopcm structure + * + * Returns the base of the WOPCM shadowed region. + * + * Returns: + * 0 if GuC is not present or not in use. + * Otherwise, the GuC WOPCM base. + */ +static inline u32 intel_wopcm_guc_base(struct intel_wopcm *wopcm) +{ + return wopcm->guc.base; +} + /** * intel_wopcm_guc_size() * @wopcm: intel_wopcm structure @@ -43,6 +56,5 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm) void intel_wopcm_init_early(struct intel_wopcm *wopcm); int intel_wopcm_init(struct intel_wopcm *wopcm); -int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt); #endif -- cgit v1.2.3 From 900c9173ca692f08de9332cf5e4f0ab9486db8b9 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 30 Jul 2019 11:26:14 -0700 Subject: drm/i915: remove dangling forward declaration Commit 20a7f2fc4d7a ("drm/i915: Convert intel_mocs_init_l3cc_table to intel_gt") removed the only user. Signed-off-by: Lucas De Marchi Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190730182614.14379-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_mocs.h | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index a334db2d6d6b..8e20ca8bb34c 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -49,7 +49,6 @@ * context handling keep the MOCS in step. */ -struct drm_i915_private; struct i915_request; struct intel_engine_cs; struct intel_gt; -- cgit v1.2.3 From 91b59cd98a970f566b81418b9bd6f14f918f62d8 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 30 Jul 2019 11:04:03 -0700 Subject: drm/i915/tgl: Move fault registers to their new offset The fault registers moved to another offset. The old location is now taken by the global MOCS registers, to be added in a follow up change. Based on previous patches by Michel Thierry . Signed-off-by: Lucas De Marchi Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190730180407.5993-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 24 ++++++++++++++++++++---- drivers/gpu/drm/i915/i915_gpu_error.c | 12 ++++++++++-- drivers/gpu/drm/i915/i915_reg.h | 3 +++ 3 files changed, 33 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f7e69db4019d..caa07eb20a64 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -79,7 +79,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt, I915_MASTER_ERROR_INTERRUPT); } - if (INTEL_GEN(i915) >= 8) { + if (INTEL_GEN(i915) >= 12) { + rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID); + intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG); + } else if (INTEL_GEN(i915) >= 8) { rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID); intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG); } else if (INTEL_GEN(i915) >= 6) { @@ -117,14 +120,27 @@ static void gen6_check_faults(struct intel_gt *gt) static void gen8_check_faults(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG); + i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg; + u32 fault; + + if (INTEL_GEN(gt->i915) >= 12) { + fault_reg = GEN12_RING_FAULT_REG; + fault_data0_reg = GEN12_FAULT_TLB_DATA0; + fault_data1_reg = GEN12_FAULT_TLB_DATA1; + } else { + fault_reg = GEN8_RING_FAULT_REG; + fault_data0_reg = GEN8_FAULT_TLB_DATA0; + fault_data1_reg = GEN8_FAULT_TLB_DATA1; + } + fault = intel_uncore_read(uncore, fault_reg); if (fault & RING_FAULT_VALID) { u32 fault_data0, fault_data1; u64 fault_addr; - fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0); - fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1); + fault_data0 = intel_uncore_read(uncore, fault_data0_reg); + fault_data1 = intel_uncore_read(uncore, fault_data1_reg); + fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | ((u64)fault_data0 << 12); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 674d341a23f6..6b9072c5151c 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1106,7 +1106,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error, if (INTEL_GEN(dev_priv) >= 6) { ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); - if (INTEL_GEN(dev_priv) >= 8) + + if (INTEL_GEN(dev_priv) >= 12) + ee->fault_reg = I915_READ(GEN12_RING_FAULT_REG); + else if (INTEL_GEN(dev_priv) >= 8) ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG); else ee->fault_reg = GEN6_RING_FAULT_REG_READ(engine); @@ -1541,7 +1544,12 @@ static void capture_reg_state(struct i915_gpu_state *error) if (IS_GEN(i915, 7)) error->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); - if (INTEL_GEN(i915) >= 8) { + if (INTEL_GEN(i915) >= 12) { + error->fault_data0 = intel_uncore_read(uncore, + GEN12_FAULT_TLB_DATA0); + error->fault_data1 = intel_uncore_read(uncore, + GEN12_FAULT_TLB_DATA1); + } else if (INTEL_GEN(i915) >= 8) { error->fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0); error->fault_data1 = intel_uncore_read(uncore, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 21306dd3790a..80e98ec99410 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2490,6 +2490,7 @@ enum i915_power_well_id { #define RENDER_HWS_PGA_GEN7 _MMIO(0x04080) #define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id) #define GEN8_RING_FAULT_REG _MMIO(0x4094) +#define GEN12_RING_FAULT_REG _MMIO(0xcec4) #define GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7) #define RING_FAULT_GTTSEL_MASK (1 << 11) #define RING_FAULT_SRCID(x) (((x) >> 3) & 0xff) @@ -2633,6 +2634,8 @@ enum i915_power_well_id { #define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10) #define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14) +#define GEN12_FAULT_TLB_DATA0 _MMIO(0xceb8) +#define GEN12_FAULT_TLB_DATA1 _MMIO(0xcebc) #define FAULT_VA_HIGH_BITS (0xf << 0) #define FAULT_GTT_SEL (1 << 4) -- cgit v1.2.3 From 23dea05191be897a5f30416342c3cff101f2701c Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 30 Jul 2019 11:04:04 -0700 Subject: drm/i915/tgl: stop using ERROR_GEN6 and DONE_REG These registers have been removed on gen12. v2: merge common branch for IS_GEN_RANGE(i915, 6, 11) Signed-off-by: Lucas De Marchi Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190730180407.5993-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_gpu_error.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 6b9072c5151c..ad5f7bee6852 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -745,16 +745,15 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, for (i = 0; i < error->nfence; i++) err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - if (INTEL_GEN(m->i915) >= 6) { + if (IS_GEN_RANGE(m->i915, 6, 11)) { err_printf(m, "ERROR: 0x%08x\n", error->error); - - if (INTEL_GEN(m->i915) >= 8) - err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", - error->fault_data1, error->fault_data0); - err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } + if (INTEL_GEN(m->i915) >= 8) + err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", + error->fault_data1, error->fault_data0); + if (IS_GEN(m->i915, 7)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); @@ -1568,8 +1567,10 @@ static void capture_reg_state(struct i915_gpu_state *error) if (INTEL_GEN(i915) >= 6) { error->derrmr = intel_uncore_read(uncore, DERRMR); - error->error = intel_uncore_read(uncore, ERROR_GEN6); - error->done_reg = intel_uncore_read(uncore, DONE_REG); + if (INTEL_GEN(i915) < 12) { + error->error = intel_uncore_read(uncore, ERROR_GEN6); + error->done_reg = intel_uncore_read(uncore, DONE_REG); + } } if (INTEL_GEN(i915) >= 5) -- cgit v1.2.3 From 2ddf992179c45fb93de190b5c6ae16d2a4f4849a Mon Sep 17 00:00:00 2001 From: Tomasz Lis Date: Tue, 30 Jul 2019 11:04:05 -0700 Subject: drm/i915/tgl: Define MOCS entries for Tigerlake The MOCS table is published as part of bspec, and versioned. Entries are supposed to never be modified, but new ones can be added. Adding entries increases table version. The patch includes version 1 entries. Two of the 3 legacy entries used for gen9 are no longer expected to work. Although we are changing the gen11 table, those changes are supposed to be backward compatible since we are only touching previously undefined entries. v2: Add the missing entries in 49-51 range and replace "HW reserved" terminology to what it actually is: L1 is implicitly enabled (from Daniele) v3: Use a different table for Tiger Lake since entries 0 and 1 are not the same (from Daniele) Cc: Joonas Lahtinen Cc: Mika Kuoppala Signed-off-by: Tomasz Lis Signed-off-by: Lucas De Marchi Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190730180407.5993-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_mocs.c | 68 ++++++++++++++++++++++++++++++------ 1 file changed, 57 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index e082b25d2db1..d93301310dc7 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -62,6 +62,10 @@ struct drm_i915_mocs_table { #define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ /* (e)LLC caching options */ +/* + * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means + * the same as LE_UC + */ #define LE_0_PAGETABLE _LE_CACHEABILITY(0) #define LE_1_UC _LE_CACHEABILITY(1) #define LE_2_WT _LE_CACHEABILITY(2) @@ -100,8 +104,9 @@ struct drm_i915_mocs_table { * of bspec. * * Entries not part of the following tables are undefined as far as - * userspace is concerned and shouldn't be relied upon. For the time - * being they will be initialized to PTE. + * userspace is concerned and shouldn't be relied upon. For Gen < 12 + * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for + * PTE and will be initialized to an invalid value. * * The last two entries are reserved by the hardware. For ICL+ they * should be initialized according to bspec and never used, for older @@ -137,14 +142,7 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { }; #define GEN11_MOCS_ENTRIES \ - /* Base - Uncached (Deprecated) */ \ - MOCS_ENTRY(I915_MOCS_UNCACHED, \ - LE_1_UC | LE_TC_1_LLC, \ - L3_1_UC), \ - /* Base - L3 + LeCC:PAT (Deprecated) */ \ - MOCS_ENTRY(I915_MOCS_PTE, \ - LE_0_PAGETABLE | LE_TC_1_LLC, \ - L3_3_WB), \ + /* Entries 0 and 1 are defined per-platform */ \ /* Base - L3 + LLC */ \ MOCS_ENTRY(2, \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ @@ -242,7 +240,50 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \ L3_1_UC) +static const struct drm_i915_mocs_entry tigerlake_mocs_table[] = { + /* Base - Error (Reserved for Non-Use) */ + MOCS_ENTRY(0, 0x0, 0x0), + /* Base - Reserved */ + MOCS_ENTRY(1, 0x0, 0x0), + + GEN11_MOCS_ENTRIES, + + /* Implicitly enable L1 - HDC:L1 + L3 + LLC */ + MOCS_ENTRY(48, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_3_WB), + /* Implicitly enable L1 - HDC:L1 + L3 */ + MOCS_ENTRY(49, + LE_1_UC | LE_TC_1_LLC, + L3_3_WB), + /* Implicitly enable L1 - HDC:L1 + LLC */ + MOCS_ENTRY(50, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC), + /* Implicitly enable L1 - HDC:L1 */ + MOCS_ENTRY(51, + LE_1_UC | LE_TC_1_LLC, + L3_1_UC), + /* HW Special Case (CCS) */ + MOCS_ENTRY(60, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC), + /* HW Special Case (Displayable) */ + MOCS_ENTRY(61, + LE_1_UC | LE_TC_1_LLC | LE_SCF(1), + L3_3_WB), +}; + static const struct drm_i915_mocs_entry icelake_mocs_table[] = { + /* Base - Uncached (Deprecated) */ + MOCS_ENTRY(I915_MOCS_UNCACHED, + LE_1_UC | LE_TC_1_LLC, + L3_1_UC), + /* Base - L3 + LeCC:PAT (Deprecated) */ + MOCS_ENTRY(I915_MOCS_PTE, + LE_0_PAGETABLE | LE_TC_1_LLC, + L3_3_WB), + GEN11_MOCS_ENTRIES }; @@ -264,7 +305,12 @@ static bool get_mocs_settings(struct intel_gt *gt, struct drm_i915_private *i915 = gt->i915; bool result = false; - if (INTEL_GEN(i915) >= 11) { + if (INTEL_GEN(i915) >= 12) { + table->size = ARRAY_SIZE(tigerlake_mocs_table); + table->table = tigerlake_mocs_table; + table->n_entries = GEN11_NUM_MOCS_ENTRIES; + result = true; + } else if (IS_GEN(i915, 11)) { table->size = ARRAY_SIZE(icelake_mocs_table); table->table = icelake_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; -- cgit v1.2.3 From a7a7a0e6ebde34e05793d390cc9303e06e8f8dd1 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Tue, 30 Jul 2019 11:04:06 -0700 Subject: drm/i915/tgl: Tigerlake only has global MOCS registers Until Icelake, each engine had its own set of 64 MOCS registers. In order to simplify, Tigerlake moves to only 64 Global MOCS registers, which are no longer part of the engine context. Since these registers are now global, they also only need to be initialized once. >From Gen12 onwards, MOCS must specify the target cache (3:2) and LRU management (5:4) fields and cannot be programmed to 'use the value from Private PAT', because these fields are no longer part of the PPAT. Also cacheability control (1:0) field has changed, 00 no longer means 'use controls from page table', but uncacheable (UC). v2 (Lucas): - Move the changes to the fault registers to a separate commit - the old ones overlap with the range used by the new global MOCS (requested by Daniele) v3 (Lucas): - Clarify comment about setting the unused entries to the same value of index 0, that is the invalid entry (requested by Daniele) - Move changes to DONE_REG and ERROR_GEN6 to a separate commit (requested by Daniele) Cc: Daniele Ceraolo Spurio Signed-off-by: Michel Thierry Signed-off-by: Tvrtko Ursulin Signed-off-by: Lucas De Marchi Reviewed-by: Tomasz Lis Link: https://patchwork.freedesktop.org/patch/msgid/20190730180407.5993-5-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_mocs.c | 44 +++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/gt/intel_mocs.h | 1 + drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/i915_pci.c | 3 ++- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_device_info.h | 1 + 7 files changed, 52 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index d93301310dc7..764e47131c06 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -392,6 +392,10 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) unsigned int index; u32 unused_value; + /* Platforms with global MOCS do not need per-engine initialization. */ + if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) + return; + /* Called under a blanket forcewake */ assert_forcewakes_active(uncore, FORCEWAKE_ALL); @@ -416,6 +420,43 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) unused_value); } +/** + * intel_mocs_init_global() - program the global mocs registers + * gt: pointer to struct intel_gt + * + * This function initializes the MOCS global registers. + */ +void intel_mocs_init_global(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + struct drm_i915_mocs_table table; + unsigned int index; + + if (!HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) + return; + + if (!get_mocs_settings(gt, &table)) + return; + + if (GEM_DEBUG_WARN_ON(table.size > table.n_entries)) + return; + + for (index = 0; index < table.size; index++) + intel_uncore_write(uncore, + GEN12_GLOBAL_MOCS(index), + table.table[index].control_value); + + /* + * Ok, now set the unused entries to the invalid entry (index 0). These + * entries are officially undefined and no contract for the contents and + * settings is given for these entries. + */ + for (; index < table.n_entries; index++) + intel_uncore_write(uncore, + GEN12_GLOBAL_MOCS(index), + table.table[0].control_value); +} + /** * emit_mocs_control_table() - emit the mocs control table * @rq: Request to set up the MOCS table for. @@ -619,7 +660,8 @@ int intel_mocs_emit(struct i915_request *rq) struct drm_i915_mocs_table t; int ret; - if (rq->engine->class != RENDER_CLASS) + if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) || + rq->engine->class != RENDER_CLASS) return 0; if (get_mocs_settings(rq->engine->gt, &t)) { diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index 8e20ca8bb34c..d5d3558caf4e 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -54,6 +54,7 @@ struct intel_engine_cs; struct intel_gt; void intel_mocs_init_l3cc_table(struct intel_gt *gt); +void intel_mocs_init_global(struct intel_gt *gt); void intel_mocs_init_engine(struct intel_engine_cs *engine); int intel_mocs_emit(struct i915_request *rq); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fca7b6ce378d..3ee11b27ad91 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2280,6 +2280,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) +#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs) + #define INTEL_PCH_DEVICE_ID_MASK 0xff80 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f681152d27fa..295702ab99e1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1247,6 +1247,7 @@ int i915_gem_init_hw(struct drm_i915_private *i915) goto out; } + intel_mocs_init_global(gt); intel_mocs_init_l3cc_table(gt); intel_engines_set_scheduler_caps(i915); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index bd9211b3d76e..a7e1cde4a6d9 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -783,7 +783,8 @@ static const struct intel_device_info intel_elkhartlake_info = { [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ - } + }, \ + .has_global_mocs = 1 static const struct intel_device_info intel_tigerlake_12_info = { GEN12_FEATURES, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 80e98ec99410..c5187a58d3c9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11253,6 +11253,8 @@ enum skl_power_gate { #define PMFLUSH_GAPL3UNBLOCK (1 << 21) #define PMFLUSHDONE_LNEBLK (1 << 22) +#define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */ + /* gamt regs */ #define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) #define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 4f58e8d71b67..92e0c2e0954c 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -112,6 +112,7 @@ enum intel_ppgtt_type { func(gpu_reset_clobbers_display); \ func(has_reset_engine); \ func(has_fpga_dbg); \ + func(has_global_mocs); \ func(has_gt_uc); \ func(has_l3_dpf); \ func(has_llc); \ -- cgit v1.2.3 From 1b6c3c6d4607784c83160cf7fb9c94fae5c117a1 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 30 Jul 2019 11:04:07 -0700 Subject: drm/i915: Move MOCS setup to intel_mocs.c Hide the details of MOCS setup from i915_gem by moving both current calls into one in intel_mocs_init. Cc: Stuart Summers Signed-off-by: Tvrtko Ursulin Signed-off-by: Lucas De Marchi Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20190713010940.17711-21-lucas.demarchi@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20190730180407.5993-6-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_mocs.c | 15 +++++++++++---- drivers/gpu/drm/i915/gt/intel_mocs.h | 3 +-- drivers/gpu/drm/i915/i915_gem.c | 3 +-- 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 764e47131c06..77ddb307346a 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -426,14 +426,13 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) * * This function initializes the MOCS global registers. */ -void intel_mocs_init_global(struct intel_gt *gt) +static void intel_mocs_init_global(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; struct drm_i915_mocs_table table; unsigned int index; - if (!HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) - return; + GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915)); if (!get_mocs_settings(gt, &table)) return; @@ -599,7 +598,7 @@ static int emit_mocs_l3cc_table(struct i915_request *rq, * * Return: Nothing. */ -void intel_mocs_init_l3cc_table(struct intel_gt *gt) +static void intel_mocs_init_l3cc_table(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; struct drm_i915_mocs_table table; @@ -678,3 +677,11 @@ int intel_mocs_emit(struct i915_request *rq) return 0; } + +void intel_mocs_init(struct intel_gt *gt) +{ + intel_mocs_init_l3cc_table(gt); + + if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) + intel_mocs_init_global(gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index d5d3558caf4e..2ae816b7ca19 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -53,8 +53,7 @@ struct i915_request; struct intel_engine_cs; struct intel_gt; -void intel_mocs_init_l3cc_table(struct intel_gt *gt); -void intel_mocs_init_global(struct intel_gt *gt); +void intel_mocs_init(struct intel_gt *gt); void intel_mocs_init_engine(struct intel_engine_cs *engine); int intel_mocs_emit(struct i915_request *rq); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 295702ab99e1..62eefe860bcd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1247,8 +1247,7 @@ int i915_gem_init_hw(struct drm_i915_private *i915) goto out; } - intel_mocs_init_global(gt); - intel_mocs_init_l3cc_table(gt); + intel_mocs_init(gt); intel_engines_set_scheduler_caps(i915); -- cgit v1.2.3 From dc25ace66c74ca148c393952bd2ce0856029c692 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 26 Jul 2019 22:25:20 +0200 Subject: drm/i810: Use CONFIG_PREEMPTION CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same functionality which today depends on CONFIG_PREEMPT. Change the Kconfig dependency of i810 to !CONFIG_PREEMPTION so the driver is not accidentally built on a RT kernel. Signed-off-by: Thomas Gleixner Cc: dri-devel@lists.freedesktop.org Cc: Maarten Lankhorst Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/alpine.DEB.2.21.1907262223280.1791@nanos.tec.linutronix.de --- drivers/gpu/drm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1d80222587ad..3c88420e3497 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -394,7 +394,7 @@ config DRM_R128 config DRM_I810 tristate "Intel I810" # !PREEMPT because of missing ioctl locking - depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN) + depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN) help Choose this option if you have an Intel I810 graphics card. If M is selected, the module will be called i810. AGP support is required -- cgit v1.2.3 From c2052d6e242c33594db90751a361aca1065a054a Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 30 Jul 2019 10:51:21 -0700 Subject: drm/i915/ehl: Ungate DDIC and DDID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Specification states that DDI_CLK_SEL needs to be mapped to MG clock even if MG do not exist on EHL, this will ungate those DDIs. BSpec: 20845 Cc: Matt Roper Cc: Vivek Kasireddy Signed-off-by: José Roberto de Souza Reviewed-by: Matt Roper Tested-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190730175121.16413-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c6f38c7b397d..fb58845020dc 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2921,6 +2921,12 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, if (!intel_phy_is_combo(dev_priv, phy)) I915_WRITE(DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); + else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C) + /* + * MG does not exist but the programming is required + * to ungate DDIC and DDID + */ + I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG); } else if (IS_CANNONLAKE(dev_priv)) { /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ val = I915_READ(DPCLKA_CFGCR0); @@ -2961,7 +2967,8 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(dev_priv, port); if (INTEL_GEN(dev_priv) >= 11) { - if (!intel_phy_is_combo(dev_priv, phy)) + if (!intel_phy_is_combo(dev_priv, phy) || + (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)) I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } else if (IS_CANNONLAKE(dev_priv)) { I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) | -- cgit v1.2.3 From 7e9e5ead55beacc11116b3fb90b0de6e7cf55a69 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Wed, 17 Jul 2019 14:15:37 -0700 Subject: drm/vgem: fix cache synchronization on arm/arm64 drm_cflush_pages() is no-op on arm/arm64. But instead we can use dma_sync API. Fixes failures w/ vgem_test. Acked-by: Daniel Vetter Signed-off-by: Rob Clark Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190717211542.30482-1-robdclark@gmail.com --- drivers/gpu/drm/vgem/vgem_drv.c | 130 +++++++++++++++++++++++++--------------- 1 file changed, 83 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 11a8f99ba18c..fc04803ff403 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -47,10 +47,16 @@ static struct vgem_device { struct platform_device *platform; } *vgem_device; +static void sync_and_unpin(struct drm_vgem_gem_object *bo); +static struct page **pin_and_sync(struct drm_vgem_gem_object *bo); + static void vgem_gem_free_object(struct drm_gem_object *obj) { struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); + if (!obj->import_attach) + sync_and_unpin(vgem_obj); + kvfree(vgem_obj->pages); mutex_destroy(&vgem_obj->pages_lock); @@ -78,40 +84,15 @@ static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; mutex_lock(&obj->pages_lock); + if (!obj->pages) + pin_and_sync(obj); if (obj->pages) { get_page(obj->pages[page_offset]); vmf->page = obj->pages[page_offset]; ret = 0; } mutex_unlock(&obj->pages_lock); - if (ret) { - struct page *page; - - page = shmem_read_mapping_page( - file_inode(obj->base.filp)->i_mapping, - page_offset); - if (!IS_ERR(page)) { - vmf->page = page; - ret = 0; - } else switch (PTR_ERR(page)) { - case -ENOSPC: - case -ENOMEM: - ret = VM_FAULT_OOM; - break; - case -EBUSY: - ret = VM_FAULT_RETRY; - break; - case -EFAULT: - case -EINVAL: - ret = VM_FAULT_SIGBUS; - break; - default: - WARN_ON(PTR_ERR(page)); - ret = VM_FAULT_SIGBUS; - break; - } - } return ret; } @@ -277,32 +258,93 @@ static const struct file_operations vgem_driver_fops = { .release = drm_release, }; -static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) +/* Called under pages_lock, except in free path (where it can't race): */ +static void sync_and_unpin(struct drm_vgem_gem_object *bo) { - mutex_lock(&bo->pages_lock); - if (bo->pages_pin_count++ == 0) { - struct page **pages; + struct drm_device *dev = bo->base.dev; + + if (bo->table) { + dma_sync_sg_for_cpu(dev->dev, bo->table->sgl, + bo->table->nents, DMA_BIDIRECTIONAL); + sg_free_table(bo->table); + kfree(bo->table); + bo->table = NULL; + } + + if (bo->pages) { + drm_gem_put_pages(&bo->base, bo->pages, true, true); + bo->pages = NULL; + } +} + +static struct page **pin_and_sync(struct drm_vgem_gem_object *bo) +{ + struct drm_device *dev = bo->base.dev; + int npages = bo->base.size >> PAGE_SHIFT; + struct page **pages; + struct sg_table *sgt; + + WARN_ON(!mutex_is_locked(&bo->pages_lock)); + + pages = drm_gem_get_pages(&bo->base); + if (IS_ERR(pages)) { + bo->pages_pin_count--; + mutex_unlock(&bo->pages_lock); + return pages; + } - pages = drm_gem_get_pages(&bo->base); - if (IS_ERR(pages)) { - bo->pages_pin_count--; - mutex_unlock(&bo->pages_lock); - return pages; - } + sgt = drm_prime_pages_to_sg(pages, npages); + if (IS_ERR(sgt)) { + dev_err(dev->dev, + "failed to allocate sgt: %ld\n", + PTR_ERR(bo->table)); + drm_gem_put_pages(&bo->base, pages, false, false); + mutex_unlock(&bo->pages_lock); + return ERR_CAST(bo->table); + } + + /* + * Flush the object from the CPU cache so that importers + * can rely on coherent indirect access via the exported + * dma-address. + */ + dma_sync_sg_for_device(dev->dev, sgt->sgl, + sgt->nents, DMA_BIDIRECTIONAL); + + bo->pages = pages; + bo->table = sgt; + + return pages; +} + +static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) +{ + struct page **pages; - bo->pages = pages; + mutex_lock(&bo->pages_lock); + if (bo->pages_pin_count++ == 0 && !bo->pages) { + pages = pin_and_sync(bo); + } else { + WARN_ON(!bo->pages); + pages = bo->pages; } mutex_unlock(&bo->pages_lock); - return bo->pages; + return pages; } static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) { + /* + * We shouldn't hit this for imported bo's.. in the import + * case we don't own the scatter-table + */ + WARN_ON(bo->base.import_attach); + mutex_lock(&bo->pages_lock); if (--bo->pages_pin_count == 0) { - drm_gem_put_pages(&bo->base, bo->pages, true, true); - bo->pages = NULL; + WARN_ON(!bo->table); + sync_and_unpin(bo); } mutex_unlock(&bo->pages_lock); } @@ -310,18 +352,12 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) static int vgem_prime_pin(struct drm_gem_object *obj) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); - long n_pages = obj->size >> PAGE_SHIFT; struct page **pages; pages = vgem_pin_pages(bo); if (IS_ERR(pages)) return PTR_ERR(pages); - /* Flush the object from the CPU cache so that importers can rely - * on coherent indirect access via the exported dma-address. - */ - drm_clflush_pages(pages, n_pages); - return 0; } -- cgit v1.2.3 From 32978d8cfd3c0df8b909f9d4020718613819fe81 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:26:39 -0500 Subject: drm/amdgpu: drop drmP.h in amdgpu_amdkfd_arcturus.c Unused. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 4d9101834ba7..c79aaebeeaf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -28,7 +28,6 @@ #include #include #include -#include #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "sdma0/sdma0_4_2_2_offset.h" -- cgit v1.2.3 From 3b90f6ecdfa5fb5efd92c557e18266afc3eb93d6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:27:57 -0500 Subject: drm/amdgpu: drop drmP.h from amdgpu_amdkfd_gfx_v10.c Unused. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 0723f800e815..7c03a7fcd011 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -27,7 +27,6 @@ #include #include #include -#include #include "amdgpu.h" #include "amdgpu_amdkfd.h" #include "amdgpu_ucode.h" -- cgit v1.2.3 From 0a069bbe138797efb3669176888eefe0742690b6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:31:44 -0500 Subject: drm/amdgpu: drop drmP.h in gfx_v10_0.c And fix the fallout. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index e12478a5b902..f6c39583ff7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -20,8 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. * */ + +#include +#include #include -#include +#include +#include #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_psp.h" @@ -393,7 +397,7 @@ static int gfx_v10_0_ring_test_ring(struct amdgpu_ring *ring) if (amdgpu_emu_mode == 1) msleep(1); else - DRM_UDELAY(1); + udelay(1); } if (i < adev->usec_timeout) { if (amdgpu_emu_mode == 1) @@ -4551,7 +4555,7 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring) if (ring->trail_seq == le32_to_cpu(*(ring->trail_fence_cpu_addr))) break; - DRM_UDELAY(1); + udelay(1); } if (i >= adev->usec_timeout) { -- cgit v1.2.3 From b23b2e9e495799e7889b2cb64ea505a27553bdc3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:34:39 -0500 Subject: drm/amdgpu: drop drmP.h from navi10_ih.c And fix the fallout. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/navi10_ih.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index e963746be11c..9fe08408db58 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -21,7 +21,8 @@ * */ -#include +#include + #include "amdgpu.h" #include "amdgpu_ih.h" -- cgit v1.2.3 From e9eea90247fef9070e72b6a010535681c825c117 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:39:40 -0500 Subject: drm/amdgpu: drop drmP.h from nv.c And fix up the fallout. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index e4885e2d281a..595a907f4ea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -23,7 +23,8 @@ #include #include #include -#include +#include + #include "amdgpu.h" #include "amdgpu_atombios.h" #include "amdgpu_ih.h" -- cgit v1.2.3 From 75589f496d6d3f13e8e2da6c32a93b6b7902d4ad Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:43:40 -0500 Subject: drm/amdgpu: drop drmP.h from sdma_v5_0.c And fix the fallout. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 3e536140bfd6..aa43dc6c599a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -21,8 +21,11 @@ * */ +#include #include -#include +#include +#include + #include "amdgpu.h" #include "amdgpu_ucode.h" #include "amdgpu_trace.h" @@ -882,7 +885,7 @@ static int sdma_v5_0_ring_test_ring(struct amdgpu_ring *ring) if (amdgpu_emu_mode == 1) msleep(1); else - DRM_UDELAY(1); + udelay(1); } if (i < adev->usec_timeout) { @@ -1337,7 +1340,7 @@ static int sdma_v5_0_ring_preempt_ib(struct amdgpu_ring *ring) if (ring->trail_seq == le32_to_cpu(*(ring->trail_fence_cpu_addr))) break; - DRM_UDELAY(1); + udelay(1); } if (i >= adev->usec_timeout) { -- cgit v1.2.3 From 9a2ffeb525a1c6f4e25afe0598a10ec43731cb6e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:45:52 -0500 Subject: drm/amdgpu: drop drmP.h from vcn_v2_0.c And fix the fallout. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index eef3ec5449af..36ad0c0e8efb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -22,7 +22,7 @@ */ #include -#include + #include "amdgpu.h" #include "amdgpu_vcn.h" #include "soc15.h" @@ -2112,7 +2112,7 @@ static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9); if (tmp == 0xDEADBEEF) break; - DRM_UDELAY(1); + udelay(1); } if (i >= adev->usec_timeout) -- cgit v1.2.3 From fa1884f9d8016bbd94e5b6c7c9abb4bd3b223bad Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 31 Jul 2019 10:47:26 -0500 Subject: drm/amdgpu: drop drmP.h from vcn_v2_5.c Unused. Acked-by: Sam Ravnborg Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 0c84dbc6a62d..395c2259f979 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -22,7 +22,7 @@ */ #include -#include + #include "amdgpu.h" #include "amdgpu_vcn.h" #include "soc15.h" -- cgit v1.2.3 From 7af25d5b7ea70a7ac669a61c5d9317b2be27c2b9 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 17 Jul 2019 17:34:46 +0800 Subject: drm/amdgpu: move some ras data structure to amdgpu_ras.h These are common structures that can be included by IP specific source files Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 68 -------------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 69 ++++++++++++++++++++++++++++++++- 2 files changed, 68 insertions(+), 69 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index fac7aa2c244f..ff695ceb35bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -30,74 +30,6 @@ #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" -struct ras_ih_data { - /* interrupt bottom half */ - struct work_struct ih_work; - int inuse; - /* IP callback */ - ras_ih_cb cb; - /* full of entries */ - unsigned char *ring; - unsigned int ring_size; - unsigned int element_size; - unsigned int aligned_element_size; - unsigned int rptr; - unsigned int wptr; -}; - -struct ras_fs_data { - char sysfs_name[32]; - char debugfs_name[32]; -}; - -struct ras_err_data { - unsigned long ue_count; - unsigned long ce_count; -}; - -struct ras_err_handler_data { - /* point to bad pages array */ - struct { - unsigned long bp; - struct amdgpu_bo *bo; - } *bps; - /* the count of entries */ - int count; - /* the space can place new entries */ - int space_left; - /* last reserved entry's index + 1 */ - int last_reserved; -}; - -struct ras_manager { - struct ras_common_if head; - /* reference count */ - int use; - /* ras block link */ - struct list_head node; - /* the device */ - struct amdgpu_device *adev; - /* debugfs */ - struct dentry *ent; - /* sysfs */ - struct device_attribute sysfs_attr; - int attr_inuse; - - /* fs node name */ - struct ras_fs_data fs_data; - - /* IH data */ - struct ras_ih_data ih_data; - - struct ras_err_data err_data; -}; - -struct ras_badpage { - unsigned int bp; - unsigned int size; - unsigned int flags; -}; - const char *ras_error_string[] = { "none", "parity", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index b2841195bd3b..80e94d604a2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -108,8 +108,75 @@ struct amdgpu_ras { uint32_t flags; }; -/* interfaces for IP */ +struct ras_ih_data { + /* interrupt bottom half */ + struct work_struct ih_work; + int inuse; + /* IP callback */ + ras_ih_cb cb; + /* full of entries */ + unsigned char *ring; + unsigned int ring_size; + unsigned int element_size; + unsigned int aligned_element_size; + unsigned int rptr; + unsigned int wptr; +}; + +struct ras_fs_data { + char sysfs_name[32]; + char debugfs_name[32]; +}; + +struct ras_err_data { + unsigned long ue_count; + unsigned long ce_count; +}; + +struct ras_err_handler_data { + /* point to bad pages array */ + struct { + unsigned long bp; + struct amdgpu_bo *bo; + } *bps; + /* the count of entries */ + int count; + /* the space can place new entries */ + int space_left; + /* last reserved entry's index + 1 */ + int last_reserved; +}; +struct ras_manager { + struct ras_common_if head; + /* reference count */ + int use; + /* ras block link */ + struct list_head node; + /* the device */ + struct amdgpu_device *adev; + /* debugfs */ + struct dentry *ent; + /* sysfs */ + struct device_attribute sysfs_attr; + int attr_inuse; + + /* fs node name */ + struct ras_fs_data fs_data; + + /* IH data */ + struct ras_ih_data ih_data; + + struct ras_err_data err_data; +}; + +struct ras_badpage { + unsigned int bp; + unsigned int size; + unsigned int flags; +}; + +/* interfaces for IP */ struct ras_fs_if { struct ras_common_if head; char sysfs_name[32]; -- cgit v1.2.3 From 6501a771708d7fa66bc87b13efcc8ab837f1e2ed Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 17 Jul 2019 17:52:28 +0800 Subject: drm/amdgpu: init RSMU and UMC ip base address for vega20 the driver needs to program RSMU and UMC registers to support vega20 RAS feature Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c87dfdb8aedb..de2853b281f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -752,6 +752,8 @@ enum amd_hw_ip_block_type { NBIF_HWIP, THM_HWIP, CLK_HWIP, + UMC_HWIP, + RSMU_HWIP, MAX_HWIP }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 79223188bd47..587e33f5dcce 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -50,6 +50,8 @@ int vega20_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); + adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i])); + adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i])); } return 0; } -- cgit v1.2.3 From 9e585a523baa7cdd7d47f95336bdb47bd64f4d7b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 23 Jul 2019 19:42:03 +0800 Subject: drm/amdgpu: add amdgpu_umc_functions structure This is common structure as UMC callback function Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index de2853b281f7..c23cffc246e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -86,6 +86,7 @@ #include "amdgpu_smu.h" #include "amdgpu_discovery.h" #include "amdgpu_mes.h" +#include "amdgpu_umc.h" #define MAX_GPU_INSTANCE 16 @@ -967,6 +968,7 @@ struct amdgpu_device { const struct amdgpu_nbio_funcs *nbio_funcs; const struct amdgpu_df_funcs *df_funcs; + const struct amdgpu_umc_funcs *umc_funcs; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h new file mode 100644 index 000000000000..1ee1a00e5ac8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __AMDGPU_UMC_H__ +#define __AMDGPU_UMC_H__ + +struct amdgpu_umc_funcs { + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); +}; + +#endif -- cgit v1.2.3 From 245219a66085332a30e4653db3542ea5654ff762 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 24 Jul 2019 14:13:53 +0800 Subject: drm/amdgpu: add rsmu v_0_0_2 ip headers remote smu (rsmu) is a sub-block used as ip register interface, error handling, reset generation.etc Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- .../amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h | 27 ++++++++++++++++++ .../amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h | 32 ++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h new file mode 100644 index 000000000000..46466ae77f19 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _rsmu_0_0_2_OFFSET_HEADER +#define _rsmu_0_0_2_OFFSET_HEADER + +#define mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU 0x0d91 +#define mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h new file mode 100644 index 000000000000..ea0acb598254 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _rsmu_0_0_2_SH_MASK_HEADER +#define _rsmu_0_0_2_SH_MASK_HEADER + +//RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU +#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_WREN__SHIFT 0x0 +#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_INSTANCE__SHIFT 0x10 +#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_MODE_EN__SHIFT 0x1f +#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_WREN_MASK 0x0000FFFFL +#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_INSTANCE_MASK 0x000F0000L +#define RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU__RSMU_UMC_INDEX_MODE_EN_MASK 0x80000000L + +#endif -- cgit v1.2.3 From 03c9963f47a9efe204983fb0ea022814f8ce0084 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 24 Jul 2019 14:36:49 +0800 Subject: drm/amdgpu: add umc v6_1_1 IP headers the change introduces IP headers for unified memory controller (umc) Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- .../amd/include/asic_reg/umc/umc_6_1_1_offset.h | 31 ++++++++ .../amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h | 91 ++++++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h new file mode 100644 index 000000000000..043aa695d63f --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _umc_6_1_1_OFFSET_HEADER +#define _umc_6_1_1_OFFSET_HEADER + +#define mmUMCCH0_0_EccErrCntSel 0x0360 +#define mmUMCCH0_0_EccErrCntSel_BASE_IDX 0 +#define mmUMCCH0_0_EccErrCnt 0x0361 +#define mmUMCCH0_0_EccErrCnt_BASE_IDX 0 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0 0x03c2 +#define mmMCA_UMC_UMC0_MCUMC_STATUST0_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h new file mode 100644 index 000000000000..45c888280af9 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _umc_6_1_1_SH_MASK_HEADER +#define _umc_6_1_1_SH_MASK_HEADER + +//UMCCH0_0_EccErrCntSel +#define UMCCH0_0_EccErrCntSel__EccErrCntCsSel__SHIFT 0x0 +#define UMCCH0_0_EccErrCntSel__EccErrInt__SHIFT 0xc +#define UMCCH0_0_EccErrCntSel__EccErrCntEn__SHIFT 0xf +#define UMCCH0_0_EccErrCntSel__EccErrCntCsSel_MASK 0x0000000FL +#define UMCCH0_0_EccErrCntSel__EccErrInt_MASK 0x00003000L +#define UMCCH0_0_EccErrCntSel__EccErrCntEn_MASK 0x00008000L +//UMCCH0_0_EccErrCnt +#define UMCCH0_0_EccErrCnt__EccErrCnt__SHIFT 0x0 +#define UMCCH0_0_EccErrCnt__EccErrCnt_MASK 0x0000FFFFL +//MCA_UMC_UMC0_MCUMC_STATUST0 +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt__SHIFT 0x10 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV0__SHIFT 0x16 +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId__SHIFT 0x20 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV1__SHIFT 0x26 +#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub__SHIFT 0x28 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV2__SHIFT 0x29 +#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison__SHIFT 0x2b +#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred__SHIFT 0x2c +#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC__SHIFT 0x2d +#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC__SHIFT 0x2e +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV3__SHIFT 0x2f +#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent__SHIFT 0x34 +#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV__SHIFT 0x35 +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV4__SHIFT 0x36 +#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC__SHIFT 0x37 +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC__SHIFT 0x39 +#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV__SHIFT 0x3a +#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV__SHIFT 0x3b +#define MCA_UMC_UMC0_MCUMC_STATUST0__En__SHIFT 0x3c +#define MCA_UMC_UMC0_MCUMC_STATUST0__UC__SHIFT 0x3d +#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow__SHIFT 0x3e +#define MCA_UMC_UMC0_MCUMC_STATUST0__Val__SHIFT 0x3f +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCode_MASK 0x000000000000FFFFL +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrorCodeExt_MASK 0x00000000003F0000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV0_MASK 0x00000000FFC00000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreId_MASK 0x0000003F00000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV1_MASK 0x000000C000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Scrub_MASK 0x0000010000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV2_MASK 0x0000060000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Poison_MASK 0x0000080000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Deferred_MASK 0x0000100000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__UECC_MASK 0x0000200000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__CECC_MASK 0x0000400000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV3_MASK 0x000F800000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Transparent_MASK 0x0010000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__SyndV_MASK 0x0020000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__RESERV4_MASK 0x0040000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__TCC_MASK 0x0080000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__ErrCoreIdVal_MASK 0x0100000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__PCC_MASK 0x0200000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__AddrV_MASK 0x0400000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__MiscV_MASK 0x0800000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__En_MASK 0x1000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__UC_MASK 0x2000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Overflow_MASK 0x4000000000000000L +#define MCA_UMC_UMC0_MCUMC_STATUST0__Val_MASK 0x8000000000000000L +//MCA_UMC_UMC0_MCUMC_ADDRT0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB__SHIFT 0x38 +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x3e +#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL +#define MCA_UMC_UMC0_MCUMC_ADDRT0__LSB_MASK 0x3F00000000000000L +#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xC000000000000000L + +#endif -- cgit v1.2.3 From 9884c2b1c38c33a9152f5aff162473f348fe3acd Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 31 Jul 2019 20:23:01 +0800 Subject: drm/amdgpu: add umc v6_1 query error count support Implement umc query_ras_error_count function to support querry both correctable and uncorrectable error Signed-off-by: Hawking Zhang Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 4 + drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 162 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/umc_v6_1.h | 39 ++++++++ 3 files changed, 205 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/umc_v6_1.c create mode 100644 drivers/gpu/drm/amd/amdgpu/umc_v6_1.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 7a1a78c7b329..cc38a6836825 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -81,6 +81,10 @@ amdgpu-y += \ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o +# add UMC block +amdgpu-y += \ + umc_v6_1.o + # add IH block amdgpu-y += \ amdgpu_irq.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c new file mode 100644 index 000000000000..1ca5ae642946 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -0,0 +1,162 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v6_1.h" +#include "amdgpu_ras.h" +#include "amdgpu.h" + +#include "rsmu/rsmu_0_0_2_offset.h" +#include "rsmu/rsmu_0_0_2_sh_mask.h" +#include "umc/umc_6_1_1_offset.h" +#include "umc/umc_6_1_1_sh_mask.h" + +static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev, + uint32_t umc_instance) +{ + uint32_t rsmu_umc_index; + + rsmu_umc_index = RREG32_SOC15(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_MODE_EN, 1); + rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_INSTANCE, umc_instance); + rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_WREN, 1 << umc_instance); + WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + rsmu_umc_index); +} + +static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) +{ + WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_MODE_EN, 0); +} + +static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t ecc_err_cnt, ecc_err_cnt_addr; + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + + /* select the lower chip and check the error count */ + ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 0); + WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); + ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); + *error_count += + REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt); + /* clear the lower chip err count */ + WREG32(ecc_err_cnt_addr + umc_reg_offset, 0); + + /* select the higher chip and check the err counter */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 1); + WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); + ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); + *error_count += + REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt); + /* clear the higher chip err count */ + WREG32(ecc_err_cnt_addr + umc_reg_offset, 0); + + /* check for SRAM correctable error + MCUMC_STATUS is a 64 bit register */ + mc_umc_status = + RREG32(mc_umc_status_addr + umc_reg_offset); + mc_umc_status |= + (uint64_t)RREG32(mc_umc_status_addr + umc_reg_offset + 1) << 32; + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; + + /* clear the MCUMC_STATUS */ + WREG32(mc_umc_status_addr + umc_reg_offset, 0); + WREG32(mc_umc_status_addr + umc_reg_offset + 1, 0); +} + +static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t umc_reg_offset, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + uint32_t mc_umc_status_addr; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + + /* check the MCUMC_STATUS */ + mc_umc_status = RREG32(mc_umc_status_addr + umc_reg_offset); + mc_umc_status |= + (uint64_t)RREG32(mc_umc_status_addr + umc_reg_offset + 1) << 32; + + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; + + /* clear the MCUMC_STATUS */ + WREG32(mc_umc_status_addr + umc_reg_offset, 0); + WREG32(mc_umc_status_addr + umc_reg_offset + 1, 0); +} + +static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t umc_inst, channel_inst, umc_reg_offset; + + for (umc_inst = 0; umc_inst < UMC_V6_1_UMC_INSTANCE_NUM; umc_inst++) { + /* enable the index mode to query eror count per channel */ + umc_v6_1_enable_umc_index_mode(adev, umc_inst); + for (channel_inst = 0; channel_inst < UMC_V6_1_CHANNEL_INSTANCE_NUM; channel_inst++) { + /* calc the register offset according to channel instance */ + umc_reg_offset = UMC_V6_1_PER_CHANNEL_OFFSET * channel_inst; + umc_v6_1_query_correctable_error_count(adev, umc_reg_offset, + &(err_data->ce_count)); + umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, + &(err_data->ue_count)); + } + } + umc_v6_1_disable_umc_index_mode(adev); +} + +const struct amdgpu_umc_funcs umc_v6_1_funcs = { + .query_ras_error_count = umc_v6_1_query_ras_error_count, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h new file mode 100644 index 000000000000..d25ae414f4d8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -0,0 +1,39 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V6_1_H__ +#define __UMC_V6_1_H__ + +#include "soc15_common.h" + +/* HBM Memory Channel Width */ +#define UMC_V6_1_HBM_MEMORY_CHANNEL_WIDTH 128 +/* number of umc channel instance with memory map register access */ +#define UMC_V6_1_CHANNEL_INSTANCE_NUM 4 +/* number of umc instance with memory map register access */ +#define UMC_V6_1_UMC_INSTANCE_NUM 8 +/* UMC regiser per channel offset */ +#define UMC_V6_1_PER_CHANNEL_OFFSET 0x800 + +extern const struct amdgpu_umc_funcs umc_v6_1_funcs; + +#endif -- cgit v1.2.3 From 5b6b35aaacca95616873db25ad521c6bb5ad39f6 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 17 Jul 2019 21:47:44 +0800 Subject: drm/amdgpu: init umc v6_1 functions for vega20 init umc callback function for vega20 in sw early init phase Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 320b5413738e..19f3d715e2c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -49,6 +49,7 @@ #include "mmhub_v1_0.h" #include "gfxhub_v1_1.h" #include "mmhub_v9_4.h" +#include "umc_v6_1.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" @@ -627,12 +628,24 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; } +static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_VEGA20: + adev->umc_funcs = &umc_v6_1_funcs; + break; + default: + break; + } +} + static int gmc_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; gmc_v9_0_set_gmc_funcs(adev); gmc_v9_0_set_irq_funcs(adev); + gmc_v9_0_set_umc_funcs(adev); adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = @@ -721,6 +734,7 @@ static int gmc_v9_0_ecc_late_init(void *handle) amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); return 0; } + /* handle resume path. */ if (*ras_if) { /* resend ras TA enable cmd during resume. -- cgit v1.2.3 From 939e2258ce4c2e9f5bc3a4f42ca7290a015306c2 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 17 Jul 2019 21:49:53 +0800 Subject: drm/amdgpu: querry umc error count check umc error count in both ras querry function and ras interrupt handler Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 11 ++++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index ff695ceb35bd..3d39d624e9c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -588,11 +588,19 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, struct ras_query_if *info) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); + struct ras_err_data err_data = {0, 0}; if (!obj) return -EINVAL; - /* TODO might read the register to read the count */ + switch (info->head.block) { + case AMDGPU_RAS_BLOCK__UMC: + if (adev->umc_funcs->query_ras_error_count) + adev->umc_funcs->query_ras_error_count(adev, &err_data); + break; + default: + break; + } info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; @@ -986,6 +994,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) struct ras_ih_data *data = &obj->ih_data; struct amdgpu_iv_entry entry; int ret; + struct ras_err_data err_data = {0, 0}; while (data->rptr != data->wptr) { rmb(); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 19f3d715e2c8..5282c9489c70 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -245,7 +245,10 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { + struct ras_err_data err_data = {0, 0}; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->umc_funcs->query_ras_error_count) + adev->umc_funcs->query_ras_error_count(adev, &err_data); amdgpu_ras_reset_gpu(adev, 0); return AMDGPU_RAS_UE; } -- cgit v1.2.3 From 05a58345db4e075eb709d89ea3f43610b93bfb89 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 31 Jul 2019 20:28:13 +0800 Subject: drm/amdgpu: add ras error count after each query (v2) v1: increase ras ce/ue error count v2: log the number of correctable and uncorrectable errors Signed-off-by: Tao Zhou Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3d39d624e9c6..a6134280b941 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -601,9 +601,20 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, default: break; } + + obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; + info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; + if (err_data.ce_count) + dev_info(adev->dev, "%ld correctable errors detected in %s block\n", + obj->err_data.ce_count, ras_block_str(info->head.block)); + if (err_data.ue_count) + dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", + obj->err_data.ue_count, ras_block_str(info->head.block)); + return 0; } -- cgit v1.2.3 From 4fa1c6a679bb0d0bb92cf5bf9b7049ef98552848 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 24 Jul 2019 15:13:27 +0800 Subject: drm/amdgpu: add RREG64/WREG64(_PCIE) operations add 64 bits register access functions v2: implement 64 bit functions in low level Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 11 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 73 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/soc15.c | 45 ++++++++++++++++++ 3 files changed, 129 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c23cffc246e4..61bd7be69a3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -637,6 +637,9 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); +typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); +typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); + typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); @@ -830,6 +833,8 @@ struct amdgpu_device { amdgpu_wreg_t pcie_wreg; amdgpu_rreg_t pciep_rreg; amdgpu_wreg_t pciep_wreg; + amdgpu_rreg64_t pcie_rreg64; + amdgpu_wreg64_t pcie_wreg64; /* protects concurrent UVD register access */ spinlock_t uvd_ctx_idx_lock; amdgpu_rreg_t uvd_ctx_rreg; @@ -1030,6 +1035,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); +uint64_t amdgpu_mm_rreg64(struct amdgpu_device *adev, uint32_t reg); +void amdgpu_mm_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v); u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); @@ -1057,12 +1064,16 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX) +#define RREG64(reg) amdgpu_mm_rreg64(adev, (reg)) +#define WREG64(reg, v) amdgpu_mm_wreg64(adev, (reg), (v)) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) +#define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) +#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2081649f49ca..6940600ebf0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -261,6 +261,43 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, } } +/** + * amdgpu_mm_rreg64 - read a 64 bit memory mapped IO register + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * + * Returns the 64 bit value from the offset specified. + */ +uint64_t amdgpu_mm_rreg64(struct amdgpu_device *adev, uint32_t reg) +{ + uint64_t ret; + + if ((reg * 4) < adev->rmmio_size) + ret = readq(((void __iomem *)adev->rmmio) + (reg * 4)); + else + BUG(); + + return ret; +} + +/** + * amdgpu_mm_wreg64 - write to a 64 bit memory mapped IO register + * + * @adev: amdgpu_device pointer + * @reg: dword aligned register offset + * @v: 64 bit value to write to the register + * + * Writes the value specified to the offset specified. + */ +void amdgpu_mm_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) +{ + if ((reg * 4) < adev->rmmio_size) + writeq(v, ((void __iomem *)adev->rmmio) + (reg * 4)); + else + BUG(); +} + /** * amdgpu_io_rreg - read an IO register * @@ -416,6 +453,40 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32 BUG(); } +/** + * amdgpu_invalid_rreg64 - dummy 64 bit reg read function + * + * @adev: amdgpu device pointer + * @reg: offset of register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + * Returns the value in the register. + */ +static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) +{ + DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); + BUG(); + return 0; +} + +/** + * amdgpu_invalid_wreg64 - dummy reg write function + * + * @adev: amdgpu device pointer + * @reg: offset of register + * @v: value to write to the register + * + * Dummy register read function. Used for register blocks + * that certain asics don't have (all asics). + */ +static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) +{ + DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", + reg, v); + BUG(); +} + /** * amdgpu_block_invalid_rreg - dummy reg read function * @@ -2537,6 +2608,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->pcie_wreg = &amdgpu_invalid_wreg; adev->pciep_rreg = &amdgpu_invalid_rreg; adev->pciep_wreg = &amdgpu_invalid_wreg; + adev->pcie_rreg64 = &amdgpu_invalid_rreg64; + adev->pcie_wreg64 = &amdgpu_invalid_wreg64; adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index e528122bb7b4..bac232f9c627 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -116,6 +116,49 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } +static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags, address, data; + u64 r; + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + /* read low 32 bit */ + WREG32(address, reg); + (void)RREG32(address); + r = RREG32(data); + + /* read high 32 bit*/ + WREG32(address, reg + 4); + (void)RREG32(address); + r |= ((u64)RREG32(data) << 32); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) +{ + unsigned long flags, address, data; + + address = adev->nbio_funcs->get_pcie_index_offset(adev); + data = adev->nbio_funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + /* write low 32 bit */ + WREG32(address, reg); + (void)RREG32(address); + WREG32(data, (u32)(v & 0xffffffffULL)); + (void)RREG32(data); + + /* write high 32 bit */ + WREG32(address, reg + 4); + (void)RREG32(address); + WREG32(data, (u32)(v >> 32)); + (void)RREG32(data); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; @@ -866,6 +909,8 @@ static int soc15_common_early_init(void *handle) adev->smc_wreg = NULL; adev->pcie_rreg = &soc15_pcie_rreg; adev->pcie_wreg = &soc15_pcie_wreg; + adev->pcie_rreg64 = &soc15_pcie_rreg64; + adev->pcie_wreg64 = &soc15_pcie_wreg64; adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; adev->didt_rreg = &soc15_didt_rreg; -- cgit v1.2.3 From 5bbfb64a177f36d3d208e39c61ce6df3968df4d4 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 23 Jul 2019 11:57:15 +0800 Subject: drm/amdgpu: use 64bit operation macros for umc replace some 32bit macros with 64bit operations to simplify code Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 1ca5ae642946..8fbd81d3ce70 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -94,18 +94,11 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, /* check for SRAM correctable error MCUMC_STATUS is a 64 bit register */ - mc_umc_status = - RREG32(mc_umc_status_addr + umc_reg_offset); - mc_umc_status |= - (uint64_t)RREG32(mc_umc_status_addr + umc_reg_offset + 1) << 32; + mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset); if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) *error_count += 1; - - /* clear the MCUMC_STATUS */ - WREG32(mc_umc_status_addr + umc_reg_offset, 0); - WREG32(mc_umc_status_addr + umc_reg_offset + 1, 0); } static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev, @@ -119,10 +112,7 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); /* check the MCUMC_STATUS */ - mc_umc_status = RREG32(mc_umc_status_addr + umc_reg_offset); - mc_umc_status |= - (uint64_t)RREG32(mc_umc_status_addr + umc_reg_offset + 1) << 32; - + mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset); if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -130,17 +120,16 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) *error_count += 1; - - /* clear the MCUMC_STATUS */ - WREG32(mc_umc_status_addr + umc_reg_offset, 0); - WREG32(mc_umc_status_addr + umc_reg_offset + 1, 0); } static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t umc_inst, channel_inst, umc_reg_offset; + uint32_t umc_inst, channel_inst, umc_reg_offset, mc_umc_status_addr; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); for (umc_inst = 0; umc_inst < UMC_V6_1_UMC_INSTANCE_NUM; umc_inst++) { /* enable the index mode to query eror count per channel */ @@ -152,6 +141,8 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, &(err_data->ce_count)); umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, &(err_data->ue_count)); + /* clear umc status */ + WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL); } } umc_v6_1_disable_umc_index_mode(adev); -- cgit v1.2.3 From 045c02165397c6c2c01ca5b8f68a9b642f4d244f Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 23 Jul 2019 12:18:39 +0800 Subject: drm/amdgpu: switch to amdgpu_umc structure create new amdgpu_umc structure to for more umc settings in future and switch to the new structure Signed-off-by: Tao Zhou Signed-off-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 6 ++++++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 8 +++++--- 4 files changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 61bd7be69a3f..2aa06be83974 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -948,6 +948,9 @@ struct amdgpu_device { /* KFD */ struct amdgpu_kfd_dev kfd; + /* UMC */ + struct amdgpu_umc umc; + /* display related functionality */ struct amdgpu_display_manager dm; @@ -973,7 +976,6 @@ struct amdgpu_device { const struct amdgpu_nbio_funcs *nbio_funcs; const struct amdgpu_df_funcs *df_funcs; - const struct amdgpu_umc_funcs *umc_funcs; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index a6134280b941..5f428a3929bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -595,8 +595,8 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: - if (adev->umc_funcs->query_ras_error_count) - adev->umc_funcs->query_ras_error_count(adev, &err_data); + if (adev->umc.funcs->query_ras_error_count) + adev->umc.funcs->query_ras_error_count(adev, &err_data); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 1ee1a00e5ac8..f5d6def96414 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -26,4 +26,10 @@ struct amdgpu_umc_funcs { void *ras_error_status); }; +struct amdgpu_umc { + /* max error count in one ras query call */ + uint32_t max_ras_err_cnt_per_query; + const struct amdgpu_umc_funcs *funcs; +}; + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 5282c9489c70..ae685998b282 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -247,8 +247,8 @@ static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, { struct ras_err_data err_data = {0, 0}; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc_funcs->query_ras_error_count) - adev->umc_funcs->query_ras_error_count(adev, &err_data); + if (adev->umc.funcs->query_ras_error_count) + adev->umc.funcs->query_ras_error_count(adev, &err_data); amdgpu_ras_reset_gpu(adev, 0); return AMDGPU_RAS_UE; } @@ -635,7 +635,9 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA20: - adev->umc_funcs = &umc_v6_1_funcs; + adev->umc.max_ras_err_cnt_per_query = + UMC_V6_1_UMC_INSTANCE_NUM * UMC_V6_1_CHANNEL_INSTANCE_NUM; + adev->umc.funcs = &umc_v6_1_funcs; break; default: break; -- cgit v1.2.3 From f1ed4afa130291be918e4b65a3759108d8fc868b Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 23 Jul 2019 12:25:16 +0800 Subject: drm/amdgpu: update algorithm of umc uncorrectable error counting remove the check of ErrorCodeExt v2: refine the if condition for ue counting Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 8fbd81d3ce70..5b1ccb81b3a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -113,12 +113,12 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev /* check the MCUMC_STATUS */ mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset); - if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && - (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || - REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) *error_count += 1; } -- cgit v1.2.3 From 6f102dba808845836426dd08f1e5b19e2f70bec9 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 22 Jul 2019 19:20:29 +0800 Subject: drm/amdgpu: add support for recording ras error address more than one error address may be recorded in one query Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 5f428a3929bd..143153a3c5b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -588,7 +588,7 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, struct ras_query_if *info) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); - struct ras_err_data err_data = {0, 0}; + struct ras_err_data err_data = {0, 0, 0, NULL}; if (!obj) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 80e94d604a2e..0920db7aff34 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -131,6 +131,8 @@ struct ras_fs_data { struct ras_err_data { unsigned long ue_count; unsigned long ce_count; + unsigned long err_addr_cnt; + uint64_t *err_addr; }; struct ras_err_handler_data { -- cgit v1.2.3 From c2742aef4d17cca71346dc9327eef5840878a7d7 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 22 Jul 2019 18:30:59 +0800 Subject: drm/amdgpu: add structures for umc error address translation add related registers, callback function and channel index table Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 2 ++ drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 10 ++++++++++ 2 files changed, 12 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index f5d6def96414..dfa1a39e57af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -24,6 +24,8 @@ struct amdgpu_umc_funcs { void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); + void (*query_ras_error_address)(struct amdgpu_device *adev, + void *ras_error_status); }; struct amdgpu_umc { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 5b1ccb81b3a2..e05f3e68edb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -29,6 +29,16 @@ #include "umc/umc_6_1_1_offset.h" #include "umc/umc_6_1_1_sh_mask.h" +#define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 + +static uint32_t + umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { + {2, 18, 11, 27}, {4, 20, 13, 29}, + {1, 17, 8, 24}, {7, 23, 14, 30}, + {10, 26, 3, 19}, {12, 28, 5, 21}, + {9, 25, 0, 16}, {15, 31, 6, 22} +}; + static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev, uint32_t umc_instance) { -- cgit v1.2.3 From 8c94810357fad9d583e37785534a8caec558bb24 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 24 Jul 2019 21:43:45 +0800 Subject: drm/amdgpu: query umc ras error address query umc ras error address, translate it to gpu 4k page view and save it. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Dennis Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 80 +++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index e05f3e68edb0..bff1a12f2cc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -31,6 +31,16 @@ #define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 +/* + * (addr / 256) * 8192, the higher 26 bits in ErrorAddr + * is the index of 8KB block + */ +#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) +/* channel index is the index of 256B block */ +#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) +/* offset in 256B block */ +#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) + static uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { {2, 18, 11, 27}, {4, 20, 13, 29}, @@ -158,6 +168,76 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, umc_v6_1_disable_umc_index_mode(adev); } +static void umc_v6_1_query_error_address(struct amdgpu_device *adev, + uint32_t umc_reg_offset, uint32_t channel_index, + struct ras_err_data *err_data) +{ + uint32_t lsb; + uint64_t mc_umc_status, err_addr; + uint32_t mc_umc_status_addr; + + /* skip error address process if -ENOMEM */ + if (!err_data->err_addr) + return; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset); + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + err_addr = RREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4); + + /* the lowest lsb bits should be ignored */ + lsb = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, LSB); + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + err_addr &= ~((0x1ULL << lsb) - 1); + + /* translate umc channel address to soc pa, 3 parts are included */ + err_data->err_addr[err_data->err_addr_cnt] = + ADDR_OF_8KB_BLOCK(err_addr) + | ADDR_OF_256B_BLOCK(channel_index) + | OFFSET_IN_256B_BLOCK(err_addr); + + err_data->err_addr_cnt++; + } +} + +static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t umc_inst, channel_inst, umc_reg_offset; + uint32_t channel_index, mc_umc_status_addr; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); + + for (umc_inst = 0; umc_inst < UMC_V6_1_UMC_INSTANCE_NUM; umc_inst++) { + /* enable the index mode to query eror count per channel */ + umc_v6_1_enable_umc_index_mode(adev, umc_inst); + for (channel_inst = 0; channel_inst < UMC_V6_1_CHANNEL_INSTANCE_NUM; channel_inst++) { + /* calc the register offset according to channel instance */ + umc_reg_offset = UMC_V6_1_PER_CHANNEL_OFFSET * channel_inst; + /* get channel index of interleaved memory */ + channel_index = umc_v6_1_channel_idx_tbl[umc_inst][channel_inst]; + + umc_v6_1_query_error_address(adev, umc_reg_offset, + channel_index, err_data); + + /* clear umc status */ + WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL); + /* clear error address register */ + WREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4, 0x0ULL); + } + } + + umc_v6_1_disable_umc_index_mode(adev); +} + const struct amdgpu_umc_funcs umc_v6_1_funcs = { .query_ras_error_count = umc_v6_1_query_ras_error_count, + .query_ras_error_address = umc_v6_1_query_ras_error_address, }; -- cgit v1.2.3 From cf04dfd0e91f11ac83d369f809ada8147aeaf3fe Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 22 Jul 2019 20:27:25 +0800 Subject: drm/amdgpu: allow ras interrupt callback to return error data add error data as parameter for ras interrupt cb and process it Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 37 +++++++++++++++++---------------- 2 files changed, 22 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 143153a3c5b0..b248c8250d75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1005,7 +1005,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) struct ras_ih_data *data = &obj->ih_data; struct amdgpu_iv_entry entry; int ret; - struct ras_err_data err_data = {0, 0}; + struct ras_err_data err_data = {0, 0, 0, NULL}; while (data->rptr != data->wptr) { rmb(); @@ -1020,14 +1020,14 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) * from the callback to udpate the error type/count, etc */ if (data->cb) { - ret = data->cb(obj->adev, &entry); + ret = data->cb(obj->adev, &err_data, &entry); /* ue will trigger an interrupt, and in that case * we need do a reset to recovery the whole system. * But leave IP do that recovery, here we just dispatch * the error. */ if (ret == AMDGPU_RAS_UE) { - obj->err_data.ue_count++; + obj->err_data.ue_count += err_data.ue_count; } /* Might need get ce count by register, but not all IP * saves ce count, some IP just use one bit or two bits diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 0920db7aff34..2c86a5135ec9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -76,9 +76,6 @@ struct ras_common_if { char name[32]; }; -typedef int (*ras_ih_cb)(struct amdgpu_device *adev, - struct amdgpu_iv_entry *entry); - struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ @@ -108,21 +105,6 @@ struct amdgpu_ras { uint32_t flags; }; -struct ras_ih_data { - /* interrupt bottom half */ - struct work_struct ih_work; - int inuse; - /* IP callback */ - ras_ih_cb cb; - /* full of entries */ - unsigned char *ring; - unsigned int ring_size; - unsigned int element_size; - unsigned int aligned_element_size; - unsigned int rptr; - unsigned int wptr; -}; - struct ras_fs_data { char sysfs_name[32]; char debugfs_name[32]; @@ -149,6 +131,25 @@ struct ras_err_handler_data { int last_reserved; }; +typedef int (*ras_ih_cb)(struct amdgpu_device *adev, + struct ras_err_data *err_data, + struct amdgpu_iv_entry *entry); + +struct ras_ih_data { + /* interrupt bottom half */ + struct work_struct ih_work; + int inuse; + /* IP callback */ + ras_ih_cb cb; + /* full of entries */ + unsigned char *ring; + unsigned int ring_size; + unsigned int element_size; + unsigned int aligned_element_size; + unsigned int rptr; + unsigned int wptr; +}; + struct ras_manager { struct ras_common_if head; /* reference count */ -- cgit v1.2.3 From 81e02619e9589474e815b78cbd0e098de6f0c05b Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 22 Jul 2019 20:33:39 +0800 Subject: drm/amdgpu: update interrupt callback for all ras clients add err_data parameter in interrupt cb for ras clients Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 65be73eb02d4..8e8d92b1a047 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3955,6 +3955,7 @@ static int gfx_v9_0_early_init(void *handle) } static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, + struct ras_err_data *err_data, struct amdgpu_iv_entry *entry); static int gfx_v9_0_ecc_late_init(void *handle) @@ -5269,6 +5270,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, } static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, + struct ras_err_data *err_data, struct amdgpu_iv_entry *entry) { /* TODO ue will trigger an interrupt. */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index ae685998b282..06fca08b6513 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -243,12 +243,12 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, } static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, + struct ras_err_data *err_data, struct amdgpu_iv_entry *entry) { - struct ras_err_data err_data = {0, 0}; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); if (adev->umc.funcs->query_ras_error_count) - adev->umc.funcs->query_ras_error_count(adev, &err_data); + adev->umc.funcs->query_ras_error_count(adev, err_data); amdgpu_ras_reset_gpu(adev, 0); return AMDGPU_RAS_UE; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index a33bd867287e..dda0b8d005f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1627,6 +1627,7 @@ static int sdma_v4_0_early_init(void *handle) } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, + struct ras_err_data *err_data, struct amdgpu_iv_entry *entry); static int sdma_v4_0_late_init(void *handle) @@ -1960,6 +1961,7 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, + struct ras_err_data *err_data, struct amdgpu_iv_entry *entry) { uint32_t instance, err_source; -- cgit v1.2.3 From e10634938b66cff736f5d9b91f80815ef48645d3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 23 Jul 2019 13:07:24 +0800 Subject: drm/amdgpu: add check for ras error type only ue and ce errors are supported Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index b248c8250d75..69a1ba499efa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -155,9 +155,14 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return -EINVAL; data->head.block = block_id; - data->head.type = memcmp("ue", err, 2) == 0 ? - AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE : - AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; + /* only ue and ce errors are supported */ + if (!memcmp("ue", err, 2)) + data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + else if (!memcmp("ce", err, 2)) + data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; + else + return -EINVAL; + data->op = op; if (op == 2) { -- cgit v1.2.3 From 7cdc2ee3004d9de1247fb6b66afa9f7429cbef9a Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 24 Jul 2019 11:19:56 +0800 Subject: drm/amdgpu: remove ras_reserve_vram in ras injection error injection address is not in gpu address space Signed-off-by: Tao Zhou Reviewed-by: Dennis Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 69a1ba499efa..0c31bd06a7e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -62,6 +62,9 @@ const char *ras_block_string[] = { #define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) +/* inject address is 52 bits */ +#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) + static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, uint64_t offset, uint64_t size, struct amdgpu_bo **bo_ptr); @@ -247,7 +250,6 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * { struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; struct ras_debug_if data; - struct amdgpu_bo *bo; int ret = 0; ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); @@ -265,17 +267,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * ret = amdgpu_ras_feature_enable(adev, &data.head, 1); break; case 2: - ret = amdgpu_ras_reserve_vram(adev, - data.inject.address, PAGE_SIZE, &bo); - if (ret) { - /* address was offset, now it is absolute.*/ - data.inject.address += adev->gmc.vram_start; - if (data.inject.address > adev->gmc.vram_end) - break; - } else - data.inject.address = amdgpu_bo_gpu_offset(bo); + if ((data.inject.address >= adev->gmc.mc_vram_size) || + (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { + ret = -EINVAL; + break; + } + + /* data.inject.address is offset instead of absolute gpu address */ ret = amdgpu_ras_error_inject(adev, &data.inject); - amdgpu_ras_release_vram(adev, &bo); break; default: ret = -EINVAL; -- cgit v1.2.3 From ca3f422f539551c8a20dc07aa1db87847de478d8 Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Fri, 19 Jul 2019 14:42:49 +0800 Subject: drm/amd/include: add bitfield define for EDC registers Add EDC registers to support VEGA20 RAS Signed-off-by: Dennis Li Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- .../drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h | 157 +++++++++++++++++++++ 1 file changed, 157 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index 2e1214be67a2..064c4bb1dc62 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -21,6 +21,105 @@ #ifndef _gc_9_0_SH_MASK_HEADER #define _gc_9_0_SH_MASK_HEADER +//GCEA_EDC_CNT +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L +#define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L +#define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L +#define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L + +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L +#define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L +#define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L +#define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L +#define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L +#define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L +#define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L +#define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L +#define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L +#define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L + +// addressBlock: gc_cppdec2 +//CPF_EDC_TAG_CNT +#define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPF_EDC_ROQ_CNT +#define CPF_EDC_ROQ_CNT__COUNT_ME1__SHIFT 0x0 +#define CPF_EDC_ROQ_CNT__COUNT_ME2__SHIFT 0x2 +#define CPF_EDC_ROQ_CNT__COUNT_ME1_MASK 0x00000003L +#define CPF_EDC_ROQ_CNT__COUNT_ME2_MASK 0x0000000CL +//CPG_EDC_TAG_CNT +#define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 +#define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 +#define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L +#define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL +//CPG_EDC_DMA_CNT +#define CPG_EDC_DMA_CNT__ROQ_COUNT__SHIFT 0x0 +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x2 +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x4 +#define CPG_EDC_DMA_CNT__ROQ_COUNT_MASK 0x00000003L +#define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x0000000CL +#define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x00000030L +//CPC_EDC_SCRATCH_CNT +#define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL +//CPC_EDC_UCODE_CNT +#define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0 +#define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2 +#define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L +#define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL +//DC_EDC_STATE_CNT +#define DC_EDC_STATE_CNT__COUNT_ME1__SHIFT 0x0 +#define DC_EDC_STATE_CNT__COUNT_ME1_MASK 0x00000003L +//DC_EDC_CSINVOC_CNT +#define DC_EDC_CSINVOC_CNT__COUNT_ME1__SHIFT 0x0 +#define DC_EDC_CSINVOC_CNT__COUNT_ME1_MASK 0x00000003L +//DC_EDC_RESTORE_CNT +#define DC_EDC_RESTORE_CNT__COUNT_ME1__SHIFT 0x0 +#define DC_EDC_RESTORE_CNT__COUNT_ME1_MASK 0x00000003L // addressBlock: gc_grbmdec //GRBM_CNTL @@ -9033,11 +9132,15 @@ #define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x4 #define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT__SHIFT 0x6 #define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT__SHIFT 0x8 +#define TCC_EDC_CNT2__WRRET_TAG_WRITE_RETURN_SED_COUNT__SHIFT 0xa +#define TCC_EDC_CNT2__ATOMIC_RETURN_BUFFER_SED_COUNT__SHIFT 0xc #define TCC_EDC_CNT2__WRITE_RETURN_SED_COUNT_MASK 0x00000003L #define TCC_EDC_CNT2__WRITE_CACHE_READ_SED_COUNT_MASK 0x0000000CL #define TCC_EDC_CNT2__SRC_FIFO_NEXT_RAM_SED_COUNT_MASK 0x00000030L #define TCC_EDC_CNT2__LATENCY_FIFO_NEXT_RAM_SED_COUNT_MASK 0x000000C0L #define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SED_COUNT_MASK 0x00000300L +#define TCC_EDC_CNT2__WRRET_TAG_WRITE_RETURN_SED_COUNT_MASK 0x00000C00L +#define TCC_EDC_CNT2__ATOMIC_RETURN_BUFFER_SED_COUNT_MASK 0x00003000L //TCC_REDUNDANCY #define TCC_REDUNDANCY__MC_SEL0__SHIFT 0x0 #define TCC_REDUNDANCY__MC_SEL1__SHIFT 0x1 @@ -29818,6 +29921,60 @@ #define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER__SHIFT 0x0 #define DIDT_DBR_STALL_EVENT_COUNTER__DIDT_STALL_EVENT_COUNTER_MASK 0xFFFFFFFFL +//TA_EDC_CNT +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 +#define TA_EDC_CNT__TA_FS_AFIFO_SED_COUNT__SHIFT 0x4 +#define TA_EDC_CNT__TA_FL_LFIFO_SED_COUNT__SHIFT 0x6 +#define TA_EDC_CNT__TA_FX_LFIFO_SED_COUNT__SHIFT 0x8 +#define TA_EDC_CNT__TA_FS_CFIFO_SED_COUNT__SHIFT 0xa +#define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L +#define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL +#define TA_EDC_CNT__TA_FS_AFIFO_SED_COUNT_MASK 0x00000030L +#define TA_EDC_CNT__TA_FL_LFIFO_SED_COUNT_MASK 0x000000C0L +#define TA_EDC_CNT__TA_FX_LFIFO_SED_COUNT_MASK 0x00000300L +#define TA_EDC_CNT__TA_FS_CFIFO_SED_COUNT_MASK 0x00000C00L + +//TCI_EDC_CNT +#define TCI_EDC_CNT__WRITE_RAM_SED_COUNT__SHIFT 0x0 +#define TCI_EDC_CNT__WRITE_RAM_SED_COUNT_MASK 0x00000003L + +//TCP_EDC_CNT_NEW +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0 +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4 +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6 +#define TCP_EDC_CNT_NEW__CMD_FIFO_SED_COUNT__SHIFT 0x8 +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xa +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xc +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0xe +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x10 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x12 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x14 +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x16 +#define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L +#define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL +#define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L +#define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L +#define TCP_EDC_CNT_NEW__CMD_FIFO_SED_COUNT_MASK 0x00000300L +#define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00000C00L +#define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x00003000L +#define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x0000C000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x00030000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x000C0000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00300000L +#define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x00C00000L +//TD_EDC_CNT +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0 +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2 +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4 +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6 +#define TD_EDC_CNT__CS_FIFO_SED_COUNT__SHIFT 0x8 +#define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L +#define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL +#define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L +#define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L +#define TD_EDC_CNT__CS_FIFO_SED_COUNT_MASK 0x00000300L #endif -- cgit v1.2.3 From 4bb6b8c758fe3a9225e3003a3fb35d16cac1baeb Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Fri, 19 Jul 2019 14:50:25 +0800 Subject: drm/amd/include: add define of TCP_EDC_CNT_NEW Signed-off-by: Dennis Li Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h index f1d048e0ed2c..ca16d9125fbc 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h @@ -1700,6 +1700,8 @@ #define mmTCP_BUFFER_ADDR_HASH_CNTL_BASE_IDX 0 #define mmTCP_EDC_CNT 0x0b17 #define mmTCP_EDC_CNT_BASE_IDX 0 +#define mmTCP_EDC_CNT_NEW 0x0b18 +#define mmTCP_EDC_CNT_NEW_BASE_IDX 0 #define mmTC_CFG_L1_LOAD_POLICY0 0x0b1a #define mmTC_CFG_L1_LOAD_POLICY0_BASE_IDX 0 #define mmTC_CFG_L1_LOAD_POLICY1 0x0b1b -- cgit v1.2.3 From dc23a08f0310babf8d2dfa4514edbc381a767410 Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Fri, 19 Jul 2019 15:22:29 +0800 Subject: drm/amdgpu: add define for gfx ras subblock Signed-off-by: Dennis Li Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 230 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 201 ++++++++++++++++++++++++++++ 2 files changed, 431 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 2c86a5135ec9..2765f2dbb1e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -52,6 +52,236 @@ enum amdgpu_ras_block { #define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST #define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1) +enum amdgpu_ras_gfx_subblock { + /* CPC */ + AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START = 0, + AMDGPU_RAS_BLOCK__GFX_CPC_SCRATCH = + AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_CPC_UCODE, + AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME1, + AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME1, + AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME1, + AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME2, + AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME2, + AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2, + AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_END = + AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2, + /* CPF */ + AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME2 = + AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME1, + AMDGPU_RAS_BLOCK__GFX_CPF_TAG, + AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPF_TAG, + /* CPG */ + AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_CPG_DMA_ROQ = + AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_CPG_DMA_TAG, + AMDGPU_RAS_BLOCK__GFX_CPG_TAG, + AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPG_TAG, + /* GDS */ + AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_GDS_MEM = AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_GDS_INPUT_QUEUE, + AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM, + AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM, + AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM, + AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_END = + AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM, + /* SPI */ + AMDGPU_RAS_BLOCK__GFX_SPI_SR_MEM, + /* SQ */ + AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_SQ_SGPR = AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_SQ_LDS_D, + AMDGPU_RAS_BLOCK__GFX_SQ_LDS_I, + AMDGPU_RAS_BLOCK__GFX_SQ_VGPR, + AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_END = AMDGPU_RAS_BLOCK__GFX_SQ_VGPR, + /* SQC (3 ranges) */ + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START, + /* SQC range 0 */ + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START = + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO = + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_END = + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1 */ + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM = + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_END = + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2 */ + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM = + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END = + AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM, + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_END = + AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END, + /* TA */ + AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TA_FS_DFIFO = + AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TA_FS_AFIFO, + AMDGPU_RAS_BLOCK__GFX_TA_FL_LFIFO, + AMDGPU_RAS_BLOCK__GFX_TA_FX_LFIFO, + AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO, + AMDGPU_RAS_BLOCK__GFX_TA_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO, + /* TCA */ + AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TCA_HOLE_FIFO = + AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO, + AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_END = + AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges) */ + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START, + /* TCC range 0 */ + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START = + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA = + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START, + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1, + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0, + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1, + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0, + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1, + AMDGPU_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG, + AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG, + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_END = + AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1 */ + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START, + AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_DEC = + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START, + AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER, + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_END = + AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2 */ + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START, + AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_DATA = + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START, + AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_CONTROL, + AMDGPU_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO, + AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_RETURN, + AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ, + AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO, + AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM, + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO, + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_END = + AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3 */ + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START, + AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START, + AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_END = + AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4 */ + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START, + AMDGPU_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN = + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START, + AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER, + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END = + AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER, + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_END = + AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END, + /* TCI */ + AMDGPU_RAS_BLOCK__GFX_TCI_WRITE_RAM, + /* TCP */ + AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TCP_CACHE_RAM = + AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TCP_LFIFO_RAM, + AMDGPU_RAS_BLOCK__GFX_TCP_CMD_FIFO, + AMDGPU_RAS_BLOCK__GFX_TCP_VM_FIFO, + AMDGPU_RAS_BLOCK__GFX_TCP_DB_RAM, + AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0, + AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1, + AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_END = + AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1, + /* TD */ + AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_LO = + AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_HI, + AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO, + AMDGPU_RAS_BLOCK__GFX_TD_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges) */ + AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START, + /* EA range 0 */ + AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START = + AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START, + AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = + AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START, + AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM, + AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM, + AMDGPU_RAS_BLOCK__GFX_EA_RRET_TAGMEM, + AMDGPU_RAS_BLOCK__GFX_EA_WRET_TAGMEM, + AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM, + AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM, + AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM, + AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_END = + AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM, + /* EA range 1 */ + AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START, + AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = + AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START, + AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM, + AMDGPU_RAS_BLOCK__GFX_EA_IORD_CMDMEM, + AMDGPU_RAS_BLOCK__GFX_EA_IOWR_CMDMEM, + AMDGPU_RAS_BLOCK__GFX_EA_IOWR_DATAMEM, + AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM, + AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM, + AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_END = + AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2 */ + AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START, + AMDGPU_RAS_BLOCK__GFX_EA_MAM_D0MEM = + AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START, + AMDGPU_RAS_BLOCK__GFX_EA_MAM_D1MEM, + AMDGPU_RAS_BLOCK__GFX_EA_MAM_D2MEM, + AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM, + AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END = + AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM, + AMDGPU_RAS_BLOCK__GFX_EA_INDEX_END = + AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END, + /* UTC VM L2 bank */ + AMDGPU_RAS_BLOCK__UTC_VML2_BANK_CACHE, + /* UTC VM walker */ + AMDGPU_RAS_BLOCK__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache */ + AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache */ + AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK, + AMDGPU_RAS_BLOCK__GFX_MAX +}; + enum amdgpu_ras_error_type { AMDGPU_RAS_ERROR__NONE = 0, AMDGPU_RAS_ERROR__PARITY = 1, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8e8d92b1a047..1af28b5bec1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -121,6 +121,207 @@ MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +enum ta_ras_gfx_subblock { + /*CPC*/ + TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, + TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START, + TA_RAS_BLOCK__GFX_CPC_UCODE, + TA_RAS_BLOCK__GFX_DC_STATE_ME1, + TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1, + TA_RAS_BLOCK__GFX_DC_RESTORE_ME1, + TA_RAS_BLOCK__GFX_DC_STATE_ME2, + TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2, + TA_RAS_BLOCK__GFX_DC_RESTORE_ME2, + TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2, + /* CPF*/ + TA_RAS_BLOCK__GFX_CPF_INDEX_START, + TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START, + TA_RAS_BLOCK__GFX_CPF_ROQ_ME1, + TA_RAS_BLOCK__GFX_CPF_TAG, + TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG, + /* CPG*/ + TA_RAS_BLOCK__GFX_CPG_INDEX_START, + TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START, + TA_RAS_BLOCK__GFX_CPG_DMA_TAG, + TA_RAS_BLOCK__GFX_CPG_TAG, + TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG, + /* GDS*/ + TA_RAS_BLOCK__GFX_GDS_INDEX_START, + TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START, + TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE, + TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM, + TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM, + TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM, + TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM, + /* SPI*/ + TA_RAS_BLOCK__GFX_SPI_SR_MEM, + /* SQ*/ + TA_RAS_BLOCK__GFX_SQ_INDEX_START, + TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START, + TA_RAS_BLOCK__GFX_SQ_LDS_D, + TA_RAS_BLOCK__GFX_SQ_LDS_I, + TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/ + TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR, + /* SQC (3 ranges)*/ + TA_RAS_BLOCK__GFX_SQC_INDEX_START, + /* SQC range 0*/ + TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START, + TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO = + TA_RAS_BLOCK__GFX_SQC_INDEX0_START, + TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF, + TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO, + TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF, + TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO, + TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF, + TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + TA_RAS_BLOCK__GFX_SQC_INDEX0_END = + TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO, + /* SQC range 1*/ + TA_RAS_BLOCK__GFX_SQC_INDEX1_START, + TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM = + TA_RAS_BLOCK__GFX_SQC_INDEX1_START, + TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, + TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO, + TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM, + TA_RAS_BLOCK__GFX_SQC_INDEX1_END = + TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM, + /* SQC range 2*/ + TA_RAS_BLOCK__GFX_SQC_INDEX2_START, + TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM = + TA_RAS_BLOCK__GFX_SQC_INDEX2_START, + TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, + TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO, + TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, + TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_RAS_BLOCK__GFX_SQC_INDEX2_END = + TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM, + TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END, + /* TA*/ + TA_RAS_BLOCK__GFX_TA_INDEX_START, + TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START, + TA_RAS_BLOCK__GFX_TA_FS_AFIFO, + TA_RAS_BLOCK__GFX_TA_FL_LFIFO, + TA_RAS_BLOCK__GFX_TA_FX_LFIFO, + TA_RAS_BLOCK__GFX_TA_FS_CFIFO, + TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO, + /* TCA*/ + TA_RAS_BLOCK__GFX_TCA_INDEX_START, + TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START, + TA_RAS_BLOCK__GFX_TCA_REQ_FIFO, + TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO, + /* TCC (5 sub-ranges)*/ + TA_RAS_BLOCK__GFX_TCC_INDEX_START, + /* TCC range 0*/ + TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START, + TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START, + TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1, + TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0, + TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1, + TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0, + TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1, + TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG, + TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG, + TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG, + /* TCC range 1*/ + TA_RAS_BLOCK__GFX_TCC_INDEX1_START, + TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START, + TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER, + TA_RAS_BLOCK__GFX_TCC_INDEX1_END = + TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER, + /* TCC range 2*/ + TA_RAS_BLOCK__GFX_TCC_INDEX2_START, + TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START, + TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL, + TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO, + TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN, + TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ, + TA_RAS_BLOCK__GFX_TCC_SRC_FIFO, + TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM, + TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO, + TA_RAS_BLOCK__GFX_TCC_INDEX2_END = + TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO, + /* TCC range 3*/ + TA_RAS_BLOCK__GFX_TCC_INDEX3_START, + TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START, + TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + TA_RAS_BLOCK__GFX_TCC_INDEX3_END = + TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM, + /* TCC range 4*/ + TA_RAS_BLOCK__GFX_TCC_INDEX4_START, + TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN = + TA_RAS_BLOCK__GFX_TCC_INDEX4_START, + TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_RAS_BLOCK__GFX_TCC_INDEX4_END = + TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER, + TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END, + /* TCI*/ + TA_RAS_BLOCK__GFX_TCI_WRITE_RAM, + /* TCP*/ + TA_RAS_BLOCK__GFX_TCP_INDEX_START, + TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START, + TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM, + TA_RAS_BLOCK__GFX_TCP_CMD_FIFO, + TA_RAS_BLOCK__GFX_TCP_VM_FIFO, + TA_RAS_BLOCK__GFX_TCP_DB_RAM, + TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0, + TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1, + TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1, + /* TD*/ + TA_RAS_BLOCK__GFX_TD_INDEX_START, + TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START, + TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI, + TA_RAS_BLOCK__GFX_TD_CS_FIFO, + TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO, + /* EA (3 sub-ranges)*/ + TA_RAS_BLOCK__GFX_EA_INDEX_START, + /* EA range 0*/ + TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START, + TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START, + TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM, + TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM, + TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM, + TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM, + TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM, + TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM, + TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM, + TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM, + /* EA range 1*/ + TA_RAS_BLOCK__GFX_EA_INDEX1_START, + TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START, + TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM, + TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM, + TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM, + TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM, + TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM, + TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM, + TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM, + /* EA range 2*/ + TA_RAS_BLOCK__GFX_EA_INDEX2_START, + TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START, + TA_RAS_BLOCK__GFX_EA_MAM_D1MEM, + TA_RAS_BLOCK__GFX_EA_MAM_D2MEM, + TA_RAS_BLOCK__GFX_EA_MAM_D3MEM, + TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM, + TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END, + /* UTC VM L2 bank*/ + TA_RAS_BLOCK__UTC_VML2_BANK_CACHE, + /* UTC VM walker*/ + TA_RAS_BLOCK__UTC_VML2_WALKER, + /* UTC ATC L2 2MB cache*/ + TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK, + /* UTC ATC L2 4KB cache*/ + TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK, + TA_RAS_BLOCK__GFX_MAX +}; static const struct soc15_reg_golden golden_settings_gc_9_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), -- cgit v1.2.3 From 2c960ea02f5863c910f9c839e8564ea01d868547 Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Wed, 31 Jul 2019 20:42:15 +0800 Subject: drm/amdgpu: add RAS callback for gfx Add functions for RAS error inject and query error counter Signed-off-by: Dennis Li Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 2 + drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 530 +++++++++++++++++++++++++++++++- 2 files changed, 531 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 1199b5828b90..554a59b3c4a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -196,6 +196,8 @@ struct amdgpu_gfx_funcs { uint32_t *dst); void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); + int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); + int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); }; struct amdgpu_ngg_buf { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 1af28b5bec1d..c6ad662602be 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -322,6 +322,135 @@ enum ta_ras_gfx_subblock { TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK, TA_RAS_BLOCK__GFX_MAX }; + +struct ras_gfx_subblock { + unsigned char *name; + int ta_subblock; + int supported_error_type; +}; + +#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d) \ + [AMDGPU_RAS_BLOCK__##subblock] = { \ + #subblock, \ + TA_RAS_BLOCK__##subblock, \ + ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \ + } + +static const struct ras_gfx_subblock ras_gfx_subblocks[] = { + AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1), +}; + static const struct soc15_reg_golden golden_settings_gc_9_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), @@ -540,6 +669,10 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); +static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status); +static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, + void *inject_if); static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) { @@ -1625,7 +1758,9 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .read_wave_data = &gfx_v9_0_read_wave_data, .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, - .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q + .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, + .ras_error_inject = &gfx_v9_0_ras_error_inject, + .query_ras_error_count = &gfx_v9_0_query_ras_error_count }; static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -5480,6 +5615,399 @@ static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, return AMDGPU_RAS_UE; } +static const struct { + const char *name; + uint32_t ip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + uint32_t per_se_instance; + int32_t num_instance; + uint32_t sec_count_mask; + uint32_t ded_count_mask; +} gfx_ras_edc_regs[] = { + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, + REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, + REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT), + REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, + REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 }, + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, + REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 }, + { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, + REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT), + REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) }, + { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, + REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 }, + { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, + REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT), + REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) }, + { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, + REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT), + REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) }, + { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, + REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 }, + { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, + REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 }, + { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, + REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC), + REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) }, + { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 }, + { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + { "GDS_OA_PHY_PHY_CMD_RAM_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + { "GDS_OA_PHY_PHY_DATA_RAM_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 }, + { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM", + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1, + REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 }, + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, + REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, + REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, + REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, + REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, + REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 }, + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, + REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, + REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 }, + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, + REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 }, + { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, + 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 }, + { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT), + 0 }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, + 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 }, + { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT), + 0 }, + { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, + 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 }, + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72, + REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 }, + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, + REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, + REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, + REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, + REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, + REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, + REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, + REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, + REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, + REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, + REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, + REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT), + REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, + REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT), + REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) }, + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, + REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT), + REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, + REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT), + REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, + REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT), + REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, + REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT), + REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, + REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT), + REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, + REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT), + 0 }, + { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 }, + { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 }, + { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 }, + { "SQC_DATA_BANKA_DIRTY_BIT_RAM", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, + REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 }, + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, + REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, + REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT), + 0 }, + { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 }, + { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 }, + { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, + 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 }, + { "SQC_DATA_BANKB_DIRTY_BIT_RAM", + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, + REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 }, + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 }, + { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, + { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, + { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, + { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 }, + { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 }, + { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 }, + { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 }, + { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 }, + { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, + REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 }, +}; + +static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, + void *inject_if) +{ + struct ras_inject_if *info = (struct ras_inject_if *)inject_if; + int ret; + struct ta_ras_trigger_error_input block_info = { 0 }; + + if (adev->asic_type != CHIP_VEGA20) + return -EINVAL; + + if (!ras_gfx_subblocks[info->head.sub_block_index].name) + return -EPERM; + + if (!(ras_gfx_subblocks[info->head.sub_block_index].supported_error_type & + info->head.type)) + return -EPERM; + + block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); + block_info.sub_block_index = + ras_gfx_subblocks[info->head.sub_block_index].ta_subblock; + block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); + block_info.address = info->address; + block_info.value = info->value; + + mutex_lock(&adev->grbm_idx_mutex); + ret = psp_ras_trigger_error(&adev->psp, &block_info); + mutex_unlock(&adev->grbm_idx_mutex); + + return ret; +} + +static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + uint32_t sec_count, ded_count; + uint32_t i; + uint32_t reg_value; + uint32_t se_id, instance_id; + + if (adev->asic_type != CHIP_VEGA20) + return -EINVAL; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + mutex_lock(&adev->grbm_idx_mutex); + for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) { + for (instance_id = 0; instance_id < 256; instance_id++) { + for (i = 0; + i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]); + i++) { + if (se_id != 0 && + !gfx_ras_edc_regs[i].per_se_instance) + continue; + if (instance_id >= gfx_ras_edc_regs[i].num_instance) + continue; + + gfx_v9_0_select_se_sh(adev, se_id, 0, + instance_id); + + reg_value = RREG32( + adev->reg_offset[gfx_ras_edc_regs[i].ip] + [gfx_ras_edc_regs[i].inst] + [gfx_ras_edc_regs[i].seg] + + gfx_ras_edc_regs[i].reg_offset); + sec_count = reg_value & + gfx_ras_edc_regs[i].sec_count_mask; + ded_count = reg_value & + gfx_ras_edc_regs[i].ded_count_mask; + if (sec_count) { + DRM_INFO( + "Instance[%d][%d]: SubBlock %s, SEC %d\n", + se_id, instance_id, + gfx_ras_edc_regs[i].name, + sec_count); + err_data->ce_count++; + } + + if (ded_count) { + DRM_INFO( + "Instance[%d][%d]: SubBlock %s, DED %d\n", + se_id, instance_id, + gfx_ras_edc_regs[i].name, + ded_count); + err_data->ue_count++; + } + } + } + } + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) -- cgit v1.2.3 From 83b0582c90576dd838bfbec84579f7e674cf59b2 Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Wed, 31 Jul 2019 20:45:50 +0800 Subject: drm/amdgpu: support gfx ras error injection and err_cnt query check gfx error count in both ras querry function and ras interrupt handler. gfx ras is still disabled by default due to known stability issue found in gpu reset. Signed-off-by: Dennis Li Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 19 ++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++ 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 0c31bd06a7e8..e15fedb0ce73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -602,6 +602,10 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, if (adev->umc.funcs->query_ras_error_count) adev->umc.funcs->query_ras_error_count(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__GFX: + if (adev->gfx.funcs->query_ras_error_count) + adev->gfx.funcs->query_ras_error_count(adev, &err_data); + break; default: break; } @@ -639,13 +643,22 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; - if (block_info.block_id != TA_RAS_BLOCK__UMC) { + switch (info->head.block) { + case AMDGPU_RAS_BLOCK__GFX: + if (adev->gfx.funcs->ras_error_inject) + ret = adev->gfx.funcs->ras_error_inject(adev, info); + else + ret = -EINVAL; + break; + case AMDGPU_RAS_BLOCK__UMC: + ret = psp_ras_trigger_error(&adev->psp, &block_info); + break; + default: DRM_INFO("%s error injection is not supported yet\n", ras_block_str(info->head.block)); - return -EINVAL; + ret = -EINVAL; } - ret = psp_ras_trigger_error(&adev->psp, &block_info); if (ret) DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n", ras_block_str(info->head.block), diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c6ad662602be..bd82f6303bd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5611,6 +5611,8 @@ static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, { /* TODO ue will trigger an interrupt. */ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->gfx.funcs->query_ras_error_count) + adev->gfx.funcs->query_ras_error_count(adev, err_data); amdgpu_ras_reset_gpu(adev, 0); return AMDGPU_RAS_UE; } -- cgit v1.2.3 From dc4d716d4c0aa54392dfaca7a2e54b664edb5779 Mon Sep 17 00:00:00 2001 From: Dennis Li Date: Tue, 23 Jul 2019 18:23:44 +0800 Subject: drm/amdgpu: disable inject for failed subblocks of gfx some subblocks of gfx fail in inject test, disable them Signed-off-by: Dennis Li Reviewed-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 281 ++++++++++++++++++++-------------- 1 file changed, 165 insertions(+), 116 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index bd82f6303bd6..6756fc70b537 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -326,129 +326,166 @@ enum ta_ras_gfx_subblock { struct ras_gfx_subblock { unsigned char *name; int ta_subblock; - int supported_error_type; + int hw_supported_error_type; + int sw_supported_error_type; }; -#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d) \ +#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \ [AMDGPU_RAS_BLOCK__##subblock] = { \ #subblock, \ TA_RAS_BLOCK__##subblock, \ ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \ + (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \ } static const struct ras_gfx_subblock ras_gfx_subblocks[] = { - AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1), - AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1), - AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0, + 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 1), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0, + 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0, + 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0, + 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0, + 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0, + 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0, + 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0, + 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0), + AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0), }; static const struct soc15_reg_golden golden_settings_gc_9_0[] = @@ -5927,9 +5964,21 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, if (!ras_gfx_subblocks[info->head.sub_block_index].name) return -EPERM; - if (!(ras_gfx_subblocks[info->head.sub_block_index].supported_error_type & - info->head.type)) + if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type & + info->head.type)) { + DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n", + ras_gfx_subblocks[info->head.sub_block_index].name, + info->head.type); return -EPERM; + } + + if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type & + info->head.type)) { + DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n", + ras_gfx_subblocks[info->head.sub_block_index].name, + info->head.type); + return -EPERM; + } block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); block_info.sub_block_index = -- cgit v1.2.3 From 9475a77b57713e3763be90d0bdf4db5f42a5cb6b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 26 Jul 2019 14:15:12 -0500 Subject: drm/amdkfd: enable KFD support for navi14 Same as navi10. Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 33547a8a928a..0640fcdab557 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -91,6 +91,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) kfd2kgd = amdgpu_amdkfd_arcturus_get_functions(); break; case CHIP_NAVI10: + case CHIP_NAVI14: kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions(); break; default: -- cgit v1.2.3 From 3de433c5b38af49a5fc7602721e2ab5d39f1e69c Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 30 Jul 2019 14:46:28 -0700 Subject: drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold Signed-off-by: Rob Clark [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com --- drivers/gpu/drm/msm/msm_gem.c | 47 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c2114c748c2f..8cf6362e64bf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) return !msm_obj->vram_node; } +/* + * Cache sync.. this is a bit over-complicated, to fit dma-mapping + * API. Really GPU cache is out of scope here (handled on cmdstream) + * and all we need to do is invalidate newly allocated pages before + * mapping to CPU as uncached/writecombine. + * + * On top of this, we have the added headache, that depending on + * display generation, the display's iommu may be wired up to either + * the toplevel drm device (mdss), or to the mdp sub-node, meaning + * that here we either have dma-direct or iommu ops. + * + * Let this be a cautionary tail of abstraction gone wrong. + */ + +static void sync_for_device(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + +static void sync_for_cpu(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sync_for_device(msm_obj); } return msm_obj->pages; @@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, - DMA_BIDIRECTIONAL); + sync_for_cpu(msm_obj); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); -- cgit v1.2.3 From 10e36489abda787e10b21a1e006121dee89c8f1f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2019 14:30:19 +0100 Subject: drm/i915/execlists: Always clear pending&inflight requests on reset If we skip the reset as we found the engine inactive at the time of the reset, we still need to clear the residual inflight & pending request bookkeeping to reflect the current state of HW. Otherwise, we may end up stuck in a loop like: <7> [416.490346] hangcheck rcs0 <7> [416.490371] hangcheck Awake? 1 <7> [416.490376] hangcheck Hangcheck: 8003 ms ago <7> [416.490380] hangcheck Reset count: 0 (global 0) <7> [416.490383] hangcheck Requests: <7> [416.491210] hangcheck RING_START: 0x0017b000 <7> [416.491983] hangcheck RING_HEAD: 0x00000048 <7> [416.491992] hangcheck RING_TAIL: 0x00000048 <7> [416.492006] hangcheck RING_CTL: 0x00000000 <7> [416.492037] hangcheck RING_MODE: 0x00000200 [idle] <7> [416.492044] hangcheck RING_IMR: 00000000 <7> [416.492809] hangcheck ACTHD: 0x00000000_9ca00048 <7> [416.492824] hangcheck BBADDR: 0x00000000_00001004 <7> [416.492838] hangcheck DMA_FADDR: 0x00000000_00000000 <7> [416.492845] hangcheck IPEIR: 0x00000000 <7> [416.492852] hangcheck IPEHR: 0x00000000 <7> [416.492863] hangcheck Execlist status: 0x00018001 00000000, entries 12 <7> [416.492869] hangcheck Execlist CSB read 1, write 1, tasklet queued? no (enabled) <7> [416.492938] hangcheck Pending[0] ring:{start:0017b000, hwsp:fedf9000, seqno:00016fd6}, rq: 20ffa:16fd6!+ prio=-4094 @ 8307ms: signaled <7> [416.492972] hangcheck Queue priority hint: -4093 <7> [416.492979] hangcheck Q 20ffa:16fd8- prio=-4093 @ 8307ms: [i915] <7> [416.492985] hangcheck Q 20ffa:16fda prio=-4094 @ 8307ms: [i915] <7> [416.492990] hangcheck Q 20ffa:16fdc prio=-4094 @ 8307ms: [i915] <7> [416.492996] hangcheck Q 20ffa:16fde prio=-4094 @ 8307ms: [i915] <7> [416.493001] hangcheck Q 20ffa:16fe0 prio=-4094 @ 8307ms: [i915] <7> [416.493007] hangcheck Q 20ffa:16fe2 prio=-4094 @ 8307ms: [i915] <7> [416.493013] hangcheck Q 20ffa:16fe4 prio=-4094 @ 8307ms: [i915] <7> [416.493021] hangcheck ...skipping 21 queued requests... <7> [416.493027] hangcheck Q 20ffa:17010 prio=-4094 @ 8307ms: [i915] <7> [416.493081] hangcheck HWSP: <7> [416.493089] hangcheck [0000] 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 <7> [416.493094] hangcheck * <7> [416.493100] hangcheck [0040] 10008002 00000000 10000018 00000000 10000018 00000000 10000001 00000000 <7> [416.493106] hangcheck [0060] 10000018 00000000 10000001 00000000 10000018 00000000 10000001 00000000 <7> [416.493111] hangcheck * <7> [416.493117] hangcheck [00a0] 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000001 <7> [416.493123] hangcheck [00c0] 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 <7> [416.493127] hangcheck * <7> [416.493132] hangcheck Idle? no <6> [416.512124] i915 0000:00:02.0: GPU HANG: ecode 11:0:0x00000000, hang on rcs0 <6> [416.512205] [drm] GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace. <6> [416.512207] [drm] Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel <6> [416.512208] [drm] drm/i915 developers can then reassign to the right component if it's not a kernel issue. <6> [416.512210] [drm] The gpu crash dump is required to analyze gpu hangs, so please always attach it. <6> [416.512212] [drm] GPU crash dump saved to /sys/class/drm/card0/error <5> [416.513602] i915 0000:00:02.0: Resetting rcs0 for hang on rcs0 <7> [424.489258] hangcheck rcs0 <7> [424.489263] hangcheck Awake? 1 <7> [424.489267] hangcheck Hangcheck: 5954 ms ago <7> [424.489271] hangcheck Reset count: 1 (global 0) <7> [424.489274] hangcheck Requests: <7> [424.490128] hangcheck RING_START: 0x00000000 <7> [424.490870] hangcheck RING_HEAD: 0x00000000 <7> [424.490877] hangcheck RING_TAIL: 0x00000000 <7> [424.490887] hangcheck RING_CTL: 0x00000000 <7> [424.490897] hangcheck RING_MODE: 0x00000200 [idle] <7> [424.490904] hangcheck RING_IMR: 00000000 <7> [424.490917] hangcheck ACTHD: 0x00000000_00000000 <7> [424.490930] hangcheck BBADDR: 0x00000000_00000000 <7> [424.490943] hangcheck DMA_FADDR: 0x00000000_00000000 <7> [424.490950] hangcheck IPEIR: 0x00000000 <7> [424.490956] hangcheck IPEHR: 0x00000000 <7> [424.490968] hangcheck Execlist status: 0x00000001 00000000, entries 12 <7> [424.490972] hangcheck Execlist CSB read 11, write 11, tasklet queued? no (enabled) <7> [424.490983] hangcheck Pending[0] ring:{start:0017b000, hwsp:fedf9000, seqno:00016fd6}, rq: 20ffa:16fd6!+ prio=-4094 @ 16305ms: signaled <7> [424.490989] hangcheck Queue priority hint: -4093 <7> [424.490996] hangcheck Q 20ffa:16fd8- prio=-4093 @ 16305ms: [i915] <7> [424.491001] hangcheck Q 20ffa:16fda prio=-4094 @ 16305ms: [i915] <7> [424.491006] hangcheck Q 20ffa:16fdc prio=-4094 @ 16305ms: [i915] <7> [424.491011] hangcheck Q 20ffa:16fde prio=-4094 @ 16305ms: [i915] <7> [424.491016] hangcheck Q 20ffa:16fe0 prio=-4094 @ 16305ms: [i915] <7> [424.491022] hangcheck Q 20ffa:16fe2 prio=-4094 @ 16305ms: [i915] <7> [424.491048] hangcheck Q 20ffa:16fe4 prio=-4094 @ 16305ms: [i915] <7> [424.491057] hangcheck ...skipping 21 queued requests... <7> [424.491063] hangcheck Q 20ffa:17010 prio=-4094 @ 16305ms: [i915] <7> [424.491095] hangcheck HWSP: <7> [424.491102] hangcheck [0000] 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 <7> [424.491106] hangcheck * <7> [424.491113] hangcheck [0040] 10008002 00000000 10000018 00000000 10000018 00000000 10000001 00000000 <7> [424.491118] hangcheck [0060] 10000018 00000000 10000001 00000000 10000018 00000000 10000001 00000000 <7> [424.491122] hangcheck * <7> [424.491127] hangcheck [00a0] 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0000000b <7> [424.491133] hangcheck [00c0] 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 <7> [424.491136] hangcheck * <7> [424.491141] hangcheck Idle? no <5> [424.491834] i915 0000:00:02.0: Resetting rcs0 for hang on rcs0 Where not having cleared the pending array on reset, it persists indefinitely. Fixes: fff8102aaed5 ("drm/i915/execlists: Process interrupted context on reset") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20190730133035.1977-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index d9061d9348cb..5181d29d272e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2280,18 +2280,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) GEM_BUG_ON(i915_active_is_idle(&ce->active)); GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); rq = active_request(rq); - - /* - * Catch up with any missed context-switch interrupts. - * - * Ideally we would just read the remaining CSB entries now that we - * know the gpu is idle. However, the CSB registers are sometimes^W - * often trashed across a GPU reset! Instead we have to rely on - * guessing the missed context-switch events by looking at what - * requests were completed. - */ - execlists_cancel_port_requests(execlists); - if (!rq) { ce->ring->head = ce->ring->tail; goto out_replay; @@ -2353,6 +2341,7 @@ out_replay: unwind: /* Push back any incomplete requests for replay after the reset. */ + execlists_cancel_port_requests(execlists); __unwind_incomplete_requests(engine); } -- cgit v1.2.3 From de70fdd7d24cd07e51fbec420f8704d956a47949 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 4 Jul 2019 10:45:34 +0000 Subject: drm/i915: fix possible memory leak in intel_hdcp_auth_downstream() 'ksv_fifo' is malloced in intel_hdcp_auth_downstream() and should be freed before leaving from the error handling cases, otherwise it will cause memory leak. Fixes: f26ae6a652f2 ("drm/i915: SRM revocation check for HDCP1.4 and 2.2") Signed-off-by: Wei Yongjun Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190704104534.12508-1-weiyongjun1@huawei.com --- drivers/gpu/drm/i915/display/intel_hdcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index a78139f9e847..1ffc1e61c2be 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -540,7 +540,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) { DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n"); - return -EPERM; + ret = -EPERM; + goto err; } /* -- cgit v1.2.3 From 0de50e40fc685fed4d6896a379b123f859ffb17b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 16:45:49 +0100 Subject: drm/i915: Lift intel_engines_resume() to callers Since the reset path wants to recover the engines itself, it only wants to reinitialise the hardware using i915_gem_init_hw(). Pull the call to intel_engines_resume() to the module init/resume path so we can avoid it during reset. Fixes: 79ffac8599c4 ("drm/i915: Invert the GEM wakeref hierarchy") Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Cc: Tvrtko Ursulin Cc: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-3-chris@chris-wilson.co.uk (cherry picked from commit 092be382a2602067766f190a113514d469162456) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 7 ++++--- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 24 ------------------------ drivers/gpu/drm/i915/gt/intel_engine_pm.h | 2 -- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 21 ++++++++++++++++++++- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 21 ++++++++++++++++++++- drivers/gpu/drm/i915/i915_gem.c | 25 +++++++++++-------------- 7 files changed, 56 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 05011d4a3b88..914b5d4112bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915) i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(i915); + if (i915_gem_init_hw(i915)) + goto err_wedged; + /* * As we didn't flush the kernel context before suspend, we cannot * guarantee that the context image is complete. So let's just reset * it and start again. */ - intel_gt_resume(i915); - - if (i915_gem_init_hw(i915)) + if (intel_gt_resume(i915)) goto err_wedged; intel_uc_resume(i915); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 2ce00d3dc42a..ae5b6baf6dff 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine) { intel_wakeref_init(&engine->wakeref); } - -int intel_engines_resume(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = 0; - - intel_gt_pm_get(i915); - for_each_engine(engine, i915, id) { - intel_engine_pm_get(engine); - engine->serial++; /* kernel context lost */ - err = engine->resume(engine); - intel_engine_pm_put(engine); - if (err) { - dev_err(i915->drm.dev, - "Failed to restart %s (%d)\n", - engine->name, err); - break; - } - } - intel_gt_pm_put(i915); - - return err; -} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index b326cd993d60..f6f213fbc98c 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -17,6 +17,4 @@ void intel_engine_park(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine); -int intel_engines_resume(struct drm_i915_private *i915); - #endif /* INTEL_ENGINE_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 7b5967751762..9f8f7f54191f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -5,6 +5,7 @@ */ #include "i915_drv.h" +#include "intel_engine_pm.h" #include "intel_gt_pm.h" #include "intel_pm.h" #include "intel_wakeref.h" @@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force) intel_engine_reset(engine, false); } -void intel_gt_resume(struct drm_i915_private *i915) +int intel_gt_resume(struct drm_i915_private *i915) { struct intel_engine_cs *engine; enum intel_engine_id id; + int err = 0; /* * After resume, we may need to poke into the pinned kernel @@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915) * Only the kernel contexts should remain pinned over suspend, * allowing us to fixup the user contexts on their first pin. */ + intel_gt_pm_get(i915); for_each_engine(engine, i915, id) { struct intel_context *ce; + intel_engine_pm_get(engine); + ce = engine->kernel_context; if (ce) ce->ops->reset(ce); @@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915) ce = engine->preempt_context; if (ce) ce->ops->reset(ce); + + engine->serial++; /* kernel context lost */ + err = engine->resume(engine); + + intel_engine_pm_put(engine); + if (err) { + dev_err(i915->drm.dev, + "Failed to restart %s (%d)\n", + engine->name, err); + break; + } } + intel_gt_pm_put(i915); + + return err; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 7dd1130a19a4..53f342b20181 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915); void intel_gt_pm_init(struct drm_i915_private *i915); void intel_gt_sanitize(struct drm_i915_private *i915, bool force); -void intel_gt_resume(struct drm_i915_private *i915); +int intel_gt_resume(struct drm_i915_private *i915); #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 4c478b38e420..0439ed66e969 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -951,6 +951,21 @@ static int do_reset(struct drm_i915_private *i915, return gt_reset(i915, stalled_mask); } +static int resume(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int ret; + + for_each_engine(engine, i915, id) { + ret = engine->resume(engine); + if (ret) + return ret; + } + + return 0; +} + /** * i915_reset - reset chip after a hang * @i915: #drm_i915_private to reset @@ -1024,9 +1039,13 @@ void i915_reset(struct drm_i915_private *i915, if (ret) { DRM_ERROR("Failed to initialise HW following reset (%d)\n", ret); - goto error; + goto taint; } + ret = resume(i915); + if (ret) + goto taint; + i915_queue_hangcheck(i915); finish: diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 190ad54fb072..8a659d3d7435 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,7 +46,6 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gemfs.h" -#include "gt/intel_engine_pm.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" #include "gt/intel_reset.h" @@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) intel_mocs_init_l3cc_table(dev_priv); - /* Only when the HW is re-initialised, can we replay the requests */ - ret = intel_engines_resume(dev_priv); - if (ret) - goto cleanup_uc; - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_engines_set_scheduler_caps(dev_priv); return 0; -cleanup_uc: - intel_uc_fini_hw(dev_priv); out: intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - return ret; } @@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) goto err_uc_init; + /* Only when the HW is re-initialised, can we replay the requests */ + ret = intel_gt_resume(dev_priv); + if (ret) + goto err_init_hw; + /* * Despite its name intel_init_clock_gating applies both display * clock gating workarounds; GT mmio workarounds and the occasional @@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv) ret = intel_engines_verify_workarounds(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; ret = __intel_engines_record_defaults(dev_priv); if (ret) - goto err_init_hw; + goto err_gt; if (i915_inject_load_failure()) { ret = -ENODEV; - goto err_init_hw; + goto err_gt; } if (i915_inject_load_failure()) { ret = -EIO; - goto err_init_hw; + goto err_gt; } intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); @@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * HW as irrevisibly wedged, but keep enough state around that the * driver doesn't explode during runtime. */ -err_init_hw: +err_gt: mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_set_wedged(dev_priv); @@ -1630,6 +1626,7 @@ err_init_hw: i915_gem_drain_workqueue(dev_priv); mutex_lock(&dev_priv->drm.struct_mutex); +err_init_hw: intel_uc_fini_hw(dev_priv); err_uc_init: intel_uc_fini(dev_priv); -- cgit v1.2.3 From b1fa6fd94fc6a5d6be85359743b5f3626f3f881c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 16:45:47 +0100 Subject: drm/i915: Add a wakeref getter for iff the wakeref is already active For use in the next patch, we want to acquire a wakeref without having to wake the device up -- i.e. only acquire the engine wakeref if the engine is already active. Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-1-chris@chris-wilson.co.uk (cherry picked from commit de5147b8ce6d51f634661d7c531385371485cec6) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_engine_pm.h | 10 +++++++++- drivers/gpu/drm/i915/intel_wakeref.h | 15 +++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h index f6f213fbc98c..a11c893f64c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h @@ -7,12 +7,20 @@ #ifndef INTEL_ENGINE_PM_H #define INTEL_ENGINE_PM_H +#include "intel_engine_types.h" +#include "intel_wakeref.h" + struct drm_i915_private; -struct intel_engine_cs; void intel_engine_pm_get(struct intel_engine_cs *engine); void intel_engine_pm_put(struct intel_engine_cs *engine); +static inline bool +intel_engine_pm_get_if_awake(struct intel_engine_cs *engine) +{ + return intel_wakeref_get_if_active(&engine->wakeref); +} + void intel_engine_park(struct intel_engine_cs *engine); void intel_engine_init__pm(struct intel_engine_cs *engine); diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h index 9cbb2ebf575b..38275310b196 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.h +++ b/drivers/gpu/drm/i915/intel_wakeref.h @@ -65,6 +65,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm, return 0; } +/** + * intel_wakeref_get_if_in_use: Acquire the wakeref + * @wf: the wakeref + * + * Acquire a hold on the wakeref, but only if the wakeref is already + * active. + * + * Returns: true if the wakeref was acquired, false otherwise. + */ +static inline bool +intel_wakeref_get_if_active(struct intel_wakeref *wf) +{ + return atomic_inc_not_zero(&wf->count); +} + /** * intel_wakeref_put: Release the wakeref * @i915: the drm_i915_private device -- cgit v1.2.3 From 4b9bb9728c915c6079619e71e3340fe4840d9d40 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 26 Jun 2019 16:45:48 +0100 Subject: drm/i915: Only recover active engines If we issue a reset to a currently idle engine, leave it idle afterwards. This is useful to excise a linkage between reset and the shrinker. When waking the engine, we need to pin the default context image which we use for overwriting a guilty context -- if the engine is idle we do not need this pinned image! However, this pinning means that waking the engine acquires the FS_RECLAIM, and so may trigger the shrinker. The shrinker itself may need to wait upon the GPU to unbind and object and so may require services of reset; ergo we should avoid the engine wake up path. The danger in skipping the recovery for idle engines is that we leave the engine with no context defined, which may interfere with the operation of the power context on some older platforms. In practice, we should only be resetting an active GPU but it something to look out for on Ironlake (if memory serves). Fixes: 79ffac8599c4 ("drm/i915: Invert the GEM wakeref hierarchy") Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Cc: Tvrtko Ursulin Cc: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20190626154549.10066-2-chris@chris-wilson.co.uk (cherry picked from commit 18398904ca9e3ddd180e2ecd45886e146b1d9d5b) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/gt/intel_reset.c | 37 +++++++++++++++++++------------- drivers/gpu/drm/i915/gt/selftest_reset.c | 5 ++++- 2 files changed, 26 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 0439ed66e969..3f907701ef4d 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) * written to the powercontext is undefined and so we may lose * GPU state upon resume, i.e. fail to restart after a reset. */ - intel_engine_pm_get(engine); intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); engine->reset.prepare(engine); } @@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915) } } -static void reset_prepare(struct drm_i915_private *i915) +static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915) { struct intel_engine_cs *engine; + intel_engine_mask_t awake = 0; enum intel_engine_id id; - intel_gt_pm_get(i915); - for_each_engine(engine, i915, id) + for_each_engine(engine, i915, id) { + if (intel_engine_pm_get_if_awake(engine)) + awake |= engine->mask; reset_prepare_engine(engine); + } intel_uc_reset_prepare(i915); + + return awake; } static void gt_revoke(struct drm_i915_private *i915) @@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915, static void reset_finish_engine(struct intel_engine_cs *engine) { engine->reset.finish(engine); - intel_engine_pm_put(engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); + + intel_engine_signal_breadcrumbs(engine); } -static void reset_finish(struct drm_i915_private *i915) +static void reset_finish(struct drm_i915_private *i915, + intel_engine_mask_t awake) { struct intel_engine_cs *engine; enum intel_engine_id id; for_each_engine(engine, i915, id) { reset_finish_engine(engine); - intel_engine_signal_breadcrumbs(engine); + if (awake & engine->mask) + intel_engine_pm_put(engine); } - intel_gt_pm_put(i915); } static void nop_submit_request(struct i915_request *request) @@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; + intel_engine_mask_t awake; enum intel_engine_id id; if (test_bit(I915_WEDGED, &error->flags)) @@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - reset_prepare(i915); + awake = reset_prepare(i915); /* Even if the GPU reset fails, it should still stop the engines */ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) @@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915) for_each_engine(engine, i915, id) engine->cancel_requests(engine); - reset_finish(i915); + reset_finish(i915, awake); GEM_TRACE("end\n"); } @@ -988,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915, const char *reason) { struct i915_gpu_error *error = &i915->gpu_error; + intel_engine_mask_t awake; int ret; GEM_TRACE("flags=%lx\n", error->flags); @@ -1004,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915, dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason); error->reset_count++; - reset_prepare(i915); + awake = reset_prepare(i915); if (!intel_has_gpu_reset(i915)) { if (i915_modparams.reset) @@ -1049,7 +1057,7 @@ void i915_reset(struct drm_i915_private *i915, i915_queue_hangcheck(i915); finish: - reset_finish(i915); + reset_finish(i915, awake); unlock: mutex_unlock(&error->wedge_mutex); return; @@ -1100,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) GEM_TRACE("%s flags=%lx\n", engine->name, error->flags); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); - if (!intel_wakeref_active(&engine->wakeref)) + if (!intel_engine_pm_get_if_awake(engine)) return 0; reset_prepare_engine(engine); @@ -1135,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg) * process to program RING_MODE, HWSP and re-enable submission. */ ret = engine->resume(engine); - if (ret) - goto out; out: intel_engine_cancel_stop_cs(engine); reset_finish_engine(engine); + intel_engine_pm_put(engine); return ret; } diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 89da9e7cc1ba..b5c590c9ccba 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg) goto unlock; for (p = igt_atomic_phases; p->name; p++) { + intel_engine_mask_t awake; + GEM_TRACE("intel_gpu_reset under %s\n", p->name); + awake = reset_prepare(i915); p->critical_section_begin(); reset_prepare(i915); err = intel_gpu_reset(i915, ALL_ENGINES); - reset_finish(i915); p->critical_section_end(); + reset_finish(i915, awake); if (err) { pr_err("intel_gpu_reset failed under %s\n", p->name); -- cgit v1.2.3 From 8ad4ca6e9a8166495b5ba843375f199c7f0c218e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 30 Jul 2019 14:34:30 +0300 Subject: drm/i915/oa: add content to Makefile Apparently the empty Makefile has caused some confusion. Add the subdir-cc-flags-y as in 7fcc7ca549d4 ("drm/i915: add header search path to subdir Makefiles") which should be useful. The generated headers still aren't self-contained, so can't add that. References: http://marc.info/?i=80bf2204-558a-6d3f-c493-bf17b891fc8a@infradead.org Cc: Chris Wilson Cc: Lionel Landwerlin Cc: Michal Wajdeczko Reviewed-by: Michal Wajdeczko Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190730113432.22146-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/oa/Makefile | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile index e69de29bb2d1..256bfde4a287 100644 --- a/drivers/gpu/drm/i915/oa/Makefile +++ b/drivers/gpu/drm/i915/oa/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: MIT + +# For building individual subdir files on the command line +subdir-ccflags-y += -I$(srctree)/$(src)/.. -- cgit v1.2.3 From d9b42dfab513c9130ee0458f2e6febb75c89d1c8 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 3 Jul 2019 09:58:18 +0200 Subject: drm/client: Support unmapping of DRM client buffers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DRM clients, such as the fbdev emulation, have their buffer objects mapped by default. Mapping a buffer implicitly prevents its relocation. Hence, the buffer may permanently consume video memory while it's allocated. This is a problem for drivers of low-memory devices, such as ast, mgag200 or older framebuffer hardware, which will then not have enough memory to display other content (e.g., X11). This patch introduces drm_client_buffer_vmap() and _vunmap(). Internal DRM clients can use these functions to unmap and remap buffer objects as needed. There's no reference counting for vmap operations. Callers are expected to either keep buffers mapped (as it is now), or call vmap and vunmap in pairs around code that accesses the mapped memory. v2: * remove several duplicated NULL-pointer checks v3: * style and typo fixes Signed-off-by: Thomas Zimmermann Reviewed-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/315831/ Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/drm_client.c | 66 +++++++++++++++++++++++++++++++++++++------- include/drm/drm_client.h | 2 ++ 2 files changed, 58 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 410572f14257..fb107b24baae 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -281,6 +281,42 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u buffer->gem = obj; + vaddr = drm_client_buffer_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_delete; + } + + return buffer; + +err_delete: + drm_client_buffer_delete(buffer); + + return ERR_PTR(ret); +} + +/** + * drm_client_buffer_vmap - Map DRM client buffer into address space + * @buffer: DRM client buffer + * + * This function maps a client buffer into kernel address space. If the + * buffer is already mapped, it returns the mapping's address. + * + * Client buffer mappings are not ref'counted. Each call to + * drm_client_buffer_vmap() should be followed by a call to + * drm_client_buffer_vunmap(); or the client buffer should be mapped + * throughout its lifetime. The latter is the default. + * + * Returns: + * The mapped memory's address + */ +void *drm_client_buffer_vmap(struct drm_client_buffer *buffer) +{ + void *vaddr; + + if (buffer->vaddr) + return buffer->vaddr; + /* * FIXME: The dependency on GEM here isn't required, we could * convert the driver handle to a dma-buf instead and use the @@ -289,21 +325,31 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u * fd_install step out of the driver backend hooks, to make that * final step optional for internal users. */ - vaddr = drm_gem_vmap(obj); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_delete; - } + vaddr = drm_gem_vmap(buffer->gem); + if (IS_ERR(vaddr)) + return vaddr; buffer->vaddr = vaddr; - return buffer; - -err_delete: - drm_client_buffer_delete(buffer); + return vaddr; +} +EXPORT_SYMBOL(drm_client_buffer_vmap); - return ERR_PTR(ret); +/** + * drm_client_buffer_vunmap - Unmap DRM client buffer + * @buffer: DRM client buffer + * + * This function removes a client buffer's memory mapping. This + * function is only required by clients that manage their buffers + * by themselves. By default, DRM client buffers are mapped throughout + * their entire lifetime. + */ +void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) +{ + drm_gem_vunmap(buffer->gem, buffer->vaddr); + buffer->vaddr = NULL; } +EXPORT_SYMBOL(drm_client_buffer_vunmap); static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer) { diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index 72d51d1e9dd9..5cf2c5dd8b1e 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -149,6 +149,8 @@ struct drm_client_buffer { struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); +void *drm_client_buffer_vmap(struct drm_client_buffer *buffer); +void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); int drm_client_modeset_create(struct drm_client_dev *client); void drm_client_modeset_free(struct drm_client_dev *client); -- cgit v1.2.3 From 87e281f88f3aa4ed401554f793685bcb2463580a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 3 Jul 2019 09:58:24 +0200 Subject: drm/fb-helper: Map DRM client buffer only when required MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch changes DRM clients to not map the buffer by default. The buffer, like any buffer object, should be mapped and unmapped when needed. An unmapped buffer object can be evicted to system memory and does not consume video ram until displayed. This allows to use generic fbdev emulation with drivers for low-memory devices, such as ast and mgag200. This change affects the generic framebuffer console. HW-based consoles map their console buffer once and keep it mapped. Userspace can mmap this buffer into its address space. The shadow-buffered framebuffer console only needs the buffer object to be mapped during updates. While not being updated from the shadow buffer, the buffer object can remain unmapped. Userspace will always mmap the shadow buffer. v2: * change DRM client to not map buffer by default * manually map client buffer for fbdev with HW framebuffer Signed-off-by: Thomas Zimmermann Reviewed-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/315830/ Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/drm_client.c | 16 ++++------------ drivers/gpu/drm/drm_fb_helper.c | 33 +++++++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index fb107b24baae..e1dafb0cc5e2 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u struct drm_device *dev = client->dev; struct drm_client_buffer *buffer; struct drm_gem_object *obj; - void *vaddr; int ret; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); @@ -281,12 +280,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u buffer->gem = obj; - vaddr = drm_client_buffer_vmap(buffer); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_delete; - } - return buffer; err_delete: @@ -305,7 +298,7 @@ err_delete: * Client buffer mappings are not ref'counted. Each call to * drm_client_buffer_vmap() should be followed by a call to * drm_client_buffer_vunmap(); or the client buffer should be mapped - * throughout its lifetime. The latter is the default. + * throughout its lifetime. * * Returns: * The mapped memory's address @@ -339,10 +332,9 @@ EXPORT_SYMBOL(drm_client_buffer_vmap); * drm_client_buffer_vunmap - Unmap DRM client buffer * @buffer: DRM client buffer * - * This function removes a client buffer's memory mapping. This - * function is only required by clients that manage their buffers - * by themselves. By default, DRM client buffers are mapped throughout - * their entire lifetime. + * This function removes a client buffer's memory mapping. Calling this + * function is only required by clients that manage their buffer mappings + * by themselves. */ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 1984e5c54d58..7ba6a0255821 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) struct drm_clip_rect *clip = &helper->dirty_clip; struct drm_clip_rect clip_copy; unsigned long flags; + void *vaddr; spin_lock_irqsave(&helper->dirty_lock, flags); clip_copy = *clip; @@ -412,10 +413,18 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) /* call dirty callback only when it has been really touched */ if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { + /* Generic fbdev uses a shadow buffer */ - if (helper->buffer) + if (helper->buffer) { + vaddr = drm_client_buffer_vmap(helper->buffer); + if (IS_ERR(vaddr)) + return; drm_fb_helper_dirty_blit_real(helper, &clip_copy); + } helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + + if (helper->buffer) + drm_client_buffer_vunmap(helper->buffer); } } @@ -2178,6 +2187,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, struct drm_framebuffer *fb; struct fb_info *fbi; u32 format; + void *vaddr; DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n", sizes->surface_width, sizes->surface_height, @@ -2200,13 +2210,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbops = &drm_fbdev_fb_ops; fbi->screen_size = fb->height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; - fbi->screen_buffer = buffer->vaddr; - /* Shamelessly leak the physical address to user-space */ -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) - fbi->fix.smem_start = - page_to_phys(virt_to_page(fbi->screen_buffer)); -#endif + drm_fb_helper_fill_info(fbi, fb_helper, sizes); if (fb->funcs->dirty) { @@ -2231,6 +2235,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbdefio = &drm_fbdev_defio; fb_deferred_io_init(fbi); + } else { + /* buffer is mapped for HW framebuffer */ + vaddr = drm_client_buffer_vmap(fb_helper->buffer); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + fbi->screen_buffer = vaddr; + /* Shamelessly leak the physical address to user-space */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0) + fbi->fix.smem_start = + page_to_phys(virt_to_page(fbi->screen_buffer)); +#endif } return 0; -- cgit v1.2.3 From 01b947afaa940327e7adf57070a4bf3d0bed9810 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 5 Jul 2019 09:31:00 +0200 Subject: drm/fb-helper: Instanciate shadow FB if configured in device's mode_config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Generic framebuffer emulation uses a shadow buffer for framebuffers with dirty() function. If drivers want to use the shadow FB without such a function, they can now set prefer_shadow or prefer_shadow_fbdev in their mode_config structures. The former flag is exported to userspace, the latter flag is fbdev-only. v3: * only schedule dirty worker if fbdev uses shadow fb * test shadow fb settings with boolean operators * use bool for struct drm_mode_config.prefer_shadow_fbdev * fix documentation comments Signed-off-by: Thomas Zimmermann Reviewed-by: Noralf Trønnes Tested-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/315834/ Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/drm_fb_helper.c | 18 +++++++++++++++--- include/drm/drm_mode_config.h | 7 +++++++ 2 files changed, 22 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 7ba6a0255821..a7ba5b4902d6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -421,7 +421,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) return; drm_fb_helper_dirty_blit_real(helper, &clip_copy); } - helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + if (helper->fb->funcs->dirty) + helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, + &clip_copy, 1); if (helper->buffer) drm_client_buffer_vunmap(helper->buffer); @@ -613,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_unlink_fbi); +static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_framebuffer *fb = fb_helper->fb; + + return dev->mode_config.prefer_shadow_fbdev || + dev->mode_config.prefer_shadow || + fb->funcs->dirty; +} + static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, u32 width, u32 height) { @@ -620,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, struct drm_clip_rect *clip = &helper->dirty_clip; unsigned long flags; - if (!helper->fb->funcs->dirty) + if (!drm_fbdev_use_shadow_fb(helper)) return; spin_lock_irqsave(&helper->dirty_lock, flags); @@ -2213,7 +2225,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, drm_fb_helper_fill_info(fbi, fb_helper, sizes); - if (fb->funcs->dirty) { + if (drm_fbdev_use_shadow_fb(fb_helper)) { struct fb_ops *fbops; void *shadow; diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 759d462d028b..f57eea0481e0 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -852,6 +852,13 @@ struct drm_mode_config { /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; + /** + * @prefer_shadow_fbdev: + * + * Hint to framebuffer emulation to prefer shadow-fb rendering. + */ + bool prefer_shadow_fbdev; + /** * @quirk_addfb_prefer_xbgr_30bpp: * -- cgit v1.2.3 From 58540594570778fd149cd8c9b2bff61f2cefa8c9 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 3 Jul 2019 09:58:34 +0200 Subject: drm/bochs: Use shadow buffer for bochs framebuffer console MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bochs driver (and virtual hardware) requires buffer objects to reside in video ram to display them to the screen. So it can not display the framebuffer console because the respective buffer object is permanently pinned in system memory. Using a shadow buffer for the console solves this problem. The console emulation will pin the buffer object only during updates from the shadow buffer. Otherwise, the bochs driver can freely relocated the buffer between system memory and video ram. v2: * select shadow FB via struct drm_mode_config.prefer_shadow_fbdev Signed-off-by: Thomas Zimmermann Acked-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/315833/ Signed-off-by: Gerd Hoffmann --- drivers/gpu/drm/bochs/bochs_kms.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index bc19dbd531ef..359030d5d818 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs) bochs->dev->mode_config.fb_base = bochs->fb_base; bochs->dev->mode_config.preferred_depth = 24; bochs->dev->mode_config.prefer_shadow = 0; + bochs->dev->mode_config.prefer_shadow_fbdev = 1; bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; bochs->dev->mode_config.funcs = &bochs_mode_funcs; -- cgit v1.2.3 From 2bab52af6fe68c43b327a57e5ce5fc10eefdfadf Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Fri, 31 May 2019 05:46:15 -0400 Subject: drm/msm: add support for per-CRTC max_vblank_count on mdp5 The mdp5 drm/kms driver currently does not work on command-mode DSI panels due to 'vblank wait timed out' errors. This causes a latency of seconds, or tens of seconds in some cases, before content is shown on the panel. This hardware does not have the something that we can use as a frame counter available when running in command mode, so we need to fall back to using timestamps by setting the max_vblank_count to zero. This can be done on a per-CRTC basis, so the convert mdp5 to use drm_crtc_set_max_vblank_count(). This change was tested on a LG Nexus 5 (hammerhead) phone. Suggested-by: Jeffrey Hugo Reviewed-by: Jeffrey Hugo Signed-off-by: Brian Masney Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190531094619.31704-3-masneyb@onstation.org --- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 16 +++++++++++++++- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index ff14555372d0..78d5fa230c16 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, mdp5_crtc->enabled = false; } +static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; + u32 count; + + count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff; + drm_crtc_set_max_vblank_count(crtc, count); + + drm_crtc_vblank_on(crtc); +} + static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, } /* Restore vblank irq handling after power is enabled */ - drm_crtc_vblank_on(crtc); + mdp5_crtc_vblank_on(crtc); mdp5_crtc_mode_set_nofb(crtc); @@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) mdp5_crtc_destroy_state(crtc, crtc->state); __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + + drm_crtc_vblank_reset(crtc); } static const struct drm_crtc_funcs mdp5_crtc_funcs = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 4a60f5fca6b0..fec6ef1ae3b9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_scanout_position = mdp5_get_scanoutpos; dev->driver->get_vblank_counter = mdp5_get_vblank_counter; - dev->max_vblank_count = 0xffffffff; + dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ dev->vblank_disable_immediate = true; return kms; -- cgit v1.2.3 From c14b5dce5ece48035cfd0aa951b39c69ad5056f4 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 25 Jul 2019 10:53:08 -0600 Subject: drm/msm: Annotate intentional switch statement fall throughs Explicitly mark intentional fall throughs in switch statements to keep -Wimplicit-fallthrough from complaining. Reviewed-by: Rob Clark Signed-off-by: Jordan Crouse Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/1564073588-27386-1-git-send-email-jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 ++ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 1 + drivers/gpu/drm/msm/adreno/adreno_gpu.c | 1 + 3 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1671db47aa57..e9c55d1d6c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: /* copy commands into RB: */ obj = submit->bos[submit->cmd[i].idx].obj; @@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index be39cf01e51e..dc8ec2c94301 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9acbbc0f3232..048c8be426f3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); -- cgit v1.2.3 From 8ef7e3409ceccec713ca8de6608817fb57954024 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 30 Jul 2019 15:05:53 -0700 Subject: drm/i915/ehl: Don't forget to handle port C's hotplug interrupts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're mostly re-using ICL's interrupt handling on EHL, but we still need to remember to account for the extra combo port that EHL has. Use TGP's mask (which includes combo port C) rather than ICP's mask when appropriate. Let's also skip reading TC-specific registers on this platform since EHL doesn't have any TC ports. v2: Base setup of SHOTPLUG_CTL_TC on whether the tc pin mask is non-zero rather than performing another PCH type check. (Jose) Cc: José Roberto de Souza Cc: Vivek Kasireddy Cc: Lucas De Marchi Signed-off-by: Matt Roper Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190730220553.15300-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 41 +++++++++++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index fbe13bacd5b7..33ac5d7e1e9e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1746,6 +1746,8 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) return val & ICP_DDIA_HPD_LONG_DETECT; case HPD_PORT_B: return val & ICP_DDIB_HPD_LONG_DETECT; + case HPD_PORT_C: + return val & TGP_DDIC_HPD_LONG_DETECT; default: return false; } @@ -2605,10 +2607,18 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir, const u32 *pins) { - u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; - u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; + u32 ddi_hotplug_trigger; + u32 tc_hotplug_trigger; u32 pin_mask = 0, long_mask = 0; + if (HAS_PCH_MCC(dev_priv)) { + ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; + tc_hotplug_trigger = 0; + } else { + ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; + tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; + } + if (ddi_hotplug_trigger) { u32 dig_hotplug_reg; @@ -3920,9 +3930,11 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, hotplug |= ddi_hotplug_enable_mask; I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); - hotplug = I915_READ(SHOTPLUG_CTL_TC); - hotplug |= tc_hotplug_enable_mask; - I915_WRITE(SHOTPLUG_CTL_TC, hotplug); + if (tc_hotplug_enable_mask) { + hotplug = I915_READ(SHOTPLUG_CTL_TC); + hotplug |= tc_hotplug_enable_mask; + I915_WRITE(SHOTPLUG_CTL_TC, hotplug); + } } static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3938,6 +3950,18 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) ICP_TC_HPD_ENABLE_MASK); } +static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + hotplug_irqs = SDE_DDI_MASK_TGP; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); +} + static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, enabled_irqs; @@ -4442,6 +4466,8 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) if (HAS_PCH_TGP(dev_priv)) icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK); + else if (HAS_PCH_MCC(dev_priv)) + icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); else icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK); @@ -4974,7 +5000,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (I915_HAS_HOTPLUG(dev_priv)) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else { - if (INTEL_GEN(dev_priv) >= 11) + if (HAS_PCH_MCC(dev_priv)) + /* EHL doesn't need most of gen11_hpd_irq_setup */ + dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup; + else if (INTEL_GEN(dev_priv) >= 11) dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; else if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; -- cgit v1.2.3 From a1c9ca223c3df1b8993abedde777f5462165387c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2019 14:30:26 +0100 Subject: drm/i915: Remove lrc default desc from GEM context We only compute the lrc_descriptor() on pinning the context, i.e. infrequently, so we do not benefit from storing the template as the addressing mode is also fixed for the lifetime of the intel_context. Signed-off-by: Chris Wilson Reviewed-by: Prathap Kumar Valsan Acked-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190730133035.1977-9-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 28 ++--------------------- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 2 -- drivers/gpu/drm/i915/gt/intel_lrc.c | 12 ++++++---- drivers/gpu/drm/i915/gvt/scheduler.c | 3 --- 4 files changed, 10 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index eb2d28a37b58..64f7a533e886 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -397,30 +397,6 @@ static void context_close(struct i915_gem_context *ctx) i915_gem_context_put(ctx); } -static u32 default_desc_template(const struct drm_i915_private *i915, - const struct i915_address_space *vm) -{ - u32 address_mode; - u32 desc; - - desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; - - address_mode = INTEL_LEGACY_32B_CONTEXT; - if (vm && i915_vm_is_4lvl(vm)) - address_mode = INTEL_LEGACY_64B_CONTEXT; - desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; - - if (IS_GEN(i915, 8)) - desc |= GEN8_CTX_L3LLC_COHERENT; - - /* TODO: WaDisableLiteRestore when we start using semaphore - * signalling between Command Streamers - * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; - */ - - return desc; -} - static struct i915_gem_context * __create_context(struct drm_i915_private *i915) { @@ -459,7 +435,6 @@ __create_context(struct drm_i915_private *i915) i915_gem_context_set_recoverable(ctx); ctx->ring_size = 4 * PAGE_SIZE; - ctx->desc_template = default_desc_template(i915, NULL); for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -478,8 +453,9 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) struct i915_gem_engines_iter it; struct intel_context *ce; + GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); + ctx->vm = i915_vm_get(vm); - ctx->desc_template = default_desc_template(ctx->i915, vm); for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { i915_vm_put(ce->vm); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 0ee61482ef94..a02d98494078 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -171,8 +171,6 @@ struct i915_gem_context { /** ring_size: size for allocating the per-engine ring buffer */ u32 ring_size; - /** desc_template: invariant fields for the HW context descriptor */ - u32 desc_template; /** guilty_count: How many times this context has caused a GPU hang. */ atomic_t guilty_count; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 5181d29d272e..232f40fcb490 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -417,13 +417,17 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH))); - desc = ctx->desc_template; /* bits 0-11 */ - GEM_BUG_ON(desc & GENMASK_ULL(63, 12)); + desc = INTEL_LEGACY_32B_CONTEXT; + if (i915_vm_is_4lvl(ce->vm)) + desc = INTEL_LEGACY_64B_CONTEXT; + desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; + + desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; + if (IS_GEN(engine->i915, 8)) + desc |= GEN8_CTX_L3LLC_COHERENT; desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; /* bits 12-31 */ - GEM_BUG_ON(desc & GENMASK_ULL(63, 32)); - /* * The following 32bits are copied into the OA reports (dword 2). * Consider updating oa_get_render_ctx_id in i915_perf.c when changing diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index f68798ab1e7c..4c018fb1359c 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -291,9 +291,6 @@ shadow_context_descriptor_update(struct intel_context *ce, * Update bits 0-11 of the context descriptor which includes flags * like GEN8_CTX_* cached in desc_template */ - desc &= U64_MAX << 12; - desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1); - desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); desc |= workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; -- cgit v1.2.3 From 9ca7ad6c7706edeae331c1632d0c63897418ebad Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Wed, 26 Jun 2019 11:00:15 -0700 Subject: drm: msm: Fix add_gpu_components add_gpu_components() adds found GPU nodes from the DT to the match list, regardless of the status of the nodes. This is a problem, because if the nodes are disabled, they should not be on the match list because they will not be matched. This prevents display from initing if a GPU node is defined, but it's status is disabled. Fix this by checking the node's status before adding it to the match list. Fixes: dc3ea265b856 (drm/msm: Drop the gpu binding) Reviewed-by: Rob Clark Signed-off-by: Jeffrey Hugo Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190626180015.45242-1-jeffrey.l.hugo@gmail.com --- drivers/gpu/drm/msm/msm_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c226156f2dea..c356f5ccf253 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); -- cgit v1.2.3 From 6cf72db68d20230380a7a37dae31943c4d1d6e07 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 31 Jul 2019 17:57:07 -0700 Subject: drm/i915/gt: Move gt_cleanup_early out of gem_cleanup_early We don't call the init_early function from within the gem code, so we shouldn't do it for the cleanup either. v2: while at it, s/gt_cleanup_early/gt_late_release (Chris) v3: s/late_release/driver_late_release/ (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin #v1 Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801005709.34092-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.h | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 2 -- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index caa07eb20a64..720f0cab7aae 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -260,7 +260,7 @@ void intel_gt_fini_scratch(struct intel_gt *gt) i915_vma_unpin_and_release(>->scratch, 0); } -void intel_gt_cleanup_early(struct intel_gt *gt) +void intel_gt_driver_late_release(struct intel_gt *gt) { intel_gt_fini_reset(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 640bb0531f5b..4920cb351f10 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -30,7 +30,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_hw(struct drm_i915_private *i915); -void intel_gt_cleanup_early(struct intel_gt *gt); +void intel_gt_driver_late_release(struct intel_gt *gt); void intel_gt_check_and_clear_faults(struct intel_gt *gt); void intel_gt_clear_error_registers(struct intel_gt *gt, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 08c5504e040c..6381f7b1e858 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -951,6 +951,7 @@ err_uc: intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); err_workqueues: + intel_gt_driver_late_release(&dev_priv->gt); i915_workqueues_cleanup(dev_priv); return ret; } @@ -966,6 +967,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) intel_power_domains_cleanup(dev_priv); intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); + intel_gt_driver_late_release(&dev_priv->gt); i915_workqueues_cleanup(dev_priv); pm_qos_remove_request(&dev_priv->sb_qos); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 62eefe860bcd..e779dba2e5a3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1689,8 +1689,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); WARN_ON(dev_priv->mm.shrink_count); - intel_gt_cleanup_early(&dev_priv->gt); - i915_gemfs_fini(dev_priv); } -- cgit v1.2.3 From 6f76098fe0f3f0b519b2ad528b4319195d6d0f73 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 31 Jul 2019 17:57:08 -0700 Subject: drm/i915/uc: Move uC early functions inside the GT ones uC is a subcomponent of GT, so initialize/clean it as part of it. The wopcm_init_early doesn't have to be happen before the uC one, but since in other parts of the code we consider WOPCM first do the same for consistency. v2: s/cleanup_early/late_release to match the caller v3: s/late_release/driver_late_release/ (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Cc: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Michal Wajdeczko #v1 Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801005709.34092-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 2 ++ drivers/gpu/drm/i915/gt/uc/intel_uc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc.h | 2 +- drivers/gpu/drm/i915/i915_drv.c | 10 ++++------ 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 720f0cab7aae..5a7d1a34e429 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -22,6 +22,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) intel_gt_init_hangcheck(gt); intel_gt_init_reset(gt); intel_gt_pm_init_early(gt); + intel_uc_init_early(>->uc); } void intel_gt_init_hw(struct drm_i915_private *i915) @@ -262,5 +263,6 @@ void intel_gt_fini_scratch(struct intel_gt *gt) void intel_gt_driver_late_release(struct intel_gt *gt) { + intel_uc_driver_late_release(>->uc); intel_gt_fini_reset(gt); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 66b226be6759..0d07f9fc4317 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -137,7 +137,7 @@ void intel_uc_init_early(struct intel_uc *uc) sanitize_options_early(uc); } -void intel_uc_cleanup_early(struct intel_uc *uc) +void intel_uc_driver_late_release(struct intel_uc *uc) { guc_free_load_err_log(&uc->guc); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 25da51e95417..d008791702c8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -34,7 +34,7 @@ struct intel_uc { }; void intel_uc_init_early(struct intel_uc *uc); -void intel_uc_cleanup_early(struct intel_uc *uc); +void intel_uc_driver_late_release(struct intel_uc *uc); void intel_uc_init_mmio(struct intel_uc *uc); void intel_uc_fetch_firmwares(struct intel_uc *uc); void intel_uc_cleanup_firmwares(struct intel_uc *uc); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6381f7b1e858..1fde2357e167 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -921,6 +921,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) if (ret < 0) return ret; + intel_wopcm_init_early(&dev_priv->wopcm); + intel_gt_init_early(&dev_priv->gt, dev_priv); ret = i915_gem_init_early(dev_priv); @@ -930,13 +932,11 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); - intel_wopcm_init_early(&dev_priv->wopcm); - intel_uc_init_early(&dev_priv->gt.uc); intel_pm_setup(dev_priv); intel_init_dpio(dev_priv); ret = intel_power_domains_init(dev_priv); if (ret < 0) - goto err_uc; + goto err_gem; intel_irq_init(dev_priv); intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); @@ -947,8 +947,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) return 0; -err_uc: - intel_uc_cleanup_early(&dev_priv->gt.uc); +err_gem: i915_gem_cleanup_early(dev_priv); err_workqueues: intel_gt_driver_late_release(&dev_priv->gt); @@ -965,7 +964,6 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv) { intel_irq_fini(dev_priv); intel_power_domains_cleanup(dev_priv); - intel_uc_cleanup_early(&dev_priv->gt.uc); i915_gem_cleanup_early(dev_priv); intel_gt_driver_late_release(&dev_priv->gt); i915_workqueues_cleanup(dev_priv); -- cgit v1.2.3 From 9dfe3459ef4152ca4cf66c78cdd1ae5f58aaf2ae Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 31 Jul 2019 17:57:09 -0700 Subject: drm/i915/gt: Introduce intel_gt_runtime_suspend/resume To be called from the top level runtime functions, to hide the gt-specific bits (mainly related to intel_uc). v2: rebased Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801005709.34092-3-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 12 ++++++++++++ drivers/gpu/drm/i915/gt/intel_gt_pm.h | 2 ++ drivers/gpu/drm/i915/i915_drv.c | 9 +++------ 3 files changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 65c0d0c9d543..6c8970271a7f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -164,3 +164,15 @@ int intel_gt_resume(struct intel_gt *gt) return err; } + +void intel_gt_runtime_suspend(struct intel_gt *gt) +{ + intel_uc_runtime_suspend(>->uc); +} + +int intel_gt_runtime_resume(struct intel_gt *gt) +{ + intel_gt_init_swizzling(gt); + + return intel_uc_runtime_resume(>->uc); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index ba960e1fc209..527894fe1345 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -23,5 +23,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_sanitize(struct intel_gt *gt, bool force); int intel_gt_resume(struct intel_gt *gt); +void intel_gt_runtime_suspend(struct intel_gt *gt); +int intel_gt_runtime_resume(struct intel_gt *gt); #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1fde2357e167..d6178f5c6b50 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2925,7 +2925,7 @@ static int intel_runtime_suspend(struct device *kdev) */ i915_gem_runtime_suspend(dev_priv); - intel_uc_runtime_suspend(&dev_priv->gt.uc); + intel_gt_runtime_suspend(&dev_priv->gt); intel_runtime_pm_disable_interrupts(dev_priv); @@ -2950,9 +2950,8 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_runtime_resume(&dev_priv->gt.uc); + intel_gt_runtime_resume(&dev_priv->gt); - intel_gt_init_swizzling(&dev_priv->gt); i915_gem_restore_fences(dev_priv); enable_rpm_wakeref_asserts(rpm); @@ -3047,13 +3046,11 @@ static int intel_runtime_resume(struct device *kdev) intel_runtime_pm_enable_interrupts(dev_priv); - intel_uc_runtime_resume(&dev_priv->gt.uc); - /* * No point of rolling back things in case of an error, as the best * we can do is to hope that things will still work (and disable RPM). */ - intel_gt_init_swizzling(&dev_priv->gt); + intel_gt_runtime_resume(&dev_priv->gt); i915_gem_restore_fences(dev_priv); /* -- cgit v1.2.3 From 57a68c3507161bed25c97df8941fb45166acec99 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Wed, 31 Jul 2019 22:33:18 +0000 Subject: drm/i915/uc: Rename intel_uc_is_using* into intel_uc_supports* Rename intel_uc_is_using* into intel_uc_supports* to make clear distinction from actual state (compare intel_uc_fw_is_running) Suggested-by: Chris Wilson Signed-off-by: Michal Wajdeczko Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190731223321.36436-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 8 +++---- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 44 +++++++++++++++++----------------- drivers/gpu/drm/i915/gt/uc/intel_uc.h | 6 ++--- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 5 files changed, 32 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 13fbbffd05c7..f8fc34816e2c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -144,7 +144,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) { u32 flags = 0; - if (!intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) + if (!intel_uc_supports_guc_submission(&guc_to_gt(guc)->uc)) flags |= GUC_CTL_DISABLE_SCHEDULER; return flags; @@ -154,7 +154,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) { u32 flags = 0; - if (intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) { + if (intel_uc_supports_guc_submission(&guc_to_gt(guc)->uc)) { u32 ctxnum, base; base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); @@ -290,7 +290,7 @@ int intel_guc_init(struct intel_guc *guc) if (ret) goto err_ads; - if (intel_uc_is_using_guc_submission(>->uc)) { + if (intel_uc_supports_guc_submission(>->uc)) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -329,7 +329,7 @@ void intel_guc_fini(struct intel_guc *guc) i915_ggtt_disable_guc(gt->ggtt); - if (intel_uc_is_using_guc_submission(>->uc)) + if (intel_uc_supports_guc_submission(>->uc)) intel_guc_submission_fini(guc); intel_guc_ct_fini(&guc->ct); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index c9535caba844..d642b167a389 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -185,7 +185,7 @@ int intel_huc_check_status(struct intel_huc *huc) intel_wakeref_t wakeref; u32 status = 0; - if (!intel_uc_is_using_huc(>->uc)) + if (!intel_uc_supports_huc(>->uc)) return -ENODEV; with_intel_runtime_pm(>->i915->runtime_pm, wakeref) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 0d07f9fc4317..65ce20efdba8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -95,11 +95,11 @@ static void sanitize_options_early(struct intel_uc *uc) DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", i915_modparams.enable_guc, - yesno(intel_uc_is_using_guc_submission(uc)), - yesno(intel_uc_is_using_huc(uc))); + yesno(intel_uc_supports_guc_submission(uc)), + yesno(intel_uc_supports_huc(uc))); /* Verify GuC firmware availability */ - if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) { + if (intel_uc_supports_guc(uc) && !intel_uc_fw_supported(guc_fw)) { DRM_WARN("Incompatible option detected: enable_guc=%d, " "but GuC is not supported!\n", i915_modparams.enable_guc); @@ -108,7 +108,7 @@ static void sanitize_options_early(struct intel_uc *uc) } /* Verify HuC firmware availability */ - if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) { + if (intel_uc_supports_huc(uc) && !intel_uc_fw_supported(huc_fw)) { DRM_WARN("Incompatible option detected: enable_guc=%d, " "but HuC is not supported!\n", i915_modparams.enable_guc); @@ -117,7 +117,7 @@ static void sanitize_options_early(struct intel_uc *uc) } /* XXX: GuC submission is unavailable for now */ - if (intel_uc_is_using_guc_submission(uc)) { + if (intel_uc_supports_guc_submission(uc)) { DRM_INFO("Incompatible option detected: enable_guc=%d, " "but GuC submission is not supported!\n", i915_modparams.enable_guc); @@ -309,21 +309,21 @@ void intel_uc_fetch_firmwares(struct intel_uc *uc) { struct drm_i915_private *i915 = uc_to_gt(uc)->i915; - if (!intel_uc_is_using_guc(uc)) + if (!intel_uc_supports_guc(uc)) return; intel_uc_fw_fetch(&uc->guc.fw, i915); - if (intel_uc_is_using_huc(uc)) + if (intel_uc_supports_huc(uc)) intel_uc_fw_fetch(&uc->huc.fw, i915); } void intel_uc_cleanup_firmwares(struct intel_uc *uc) { - if (!intel_uc_is_using_guc(uc)) + if (!intel_uc_supports_guc(uc)) return; - if (intel_uc_is_using_huc(uc)) + if (intel_uc_supports_huc(uc)) intel_uc_fw_cleanup_fetch(&uc->huc.fw); intel_uc_fw_cleanup_fetch(&uc->guc.fw); @@ -335,20 +335,20 @@ int intel_uc_init(struct intel_uc *uc) struct intel_huc *huc = &uc->huc; int ret; - if (!intel_uc_is_using_guc(uc)) + if (!intel_uc_supports_guc(uc)) return 0; if (!intel_uc_fw_supported(&guc->fw)) return -ENODEV; /* XXX: GuC submission is unavailable for now */ - GEM_BUG_ON(intel_uc_is_using_guc_submission(uc)); + GEM_BUG_ON(intel_uc_supports_guc_submission(uc)); ret = intel_guc_init(guc); if (ret) return ret; - if (intel_uc_is_using_huc(uc)) { + if (intel_uc_supports_huc(uc)) { ret = intel_huc_init(huc); if (ret) goto err_guc; @@ -365,12 +365,12 @@ void intel_uc_fini(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; - if (!intel_uc_is_using_guc(uc)) + if (!intel_uc_supports_guc(uc)) return; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - if (intel_uc_is_using_huc(uc)) + if (intel_uc_supports_huc(uc)) intel_huc_fini(&uc->huc); intel_guc_fini(guc); @@ -391,7 +391,7 @@ static void __uc_sanitize(struct intel_uc *uc) void intel_uc_sanitize(struct intel_uc *uc) { - if (!intel_uc_is_using_guc(uc)) + if (!intel_uc_supports_guc(uc)) return; __uc_sanitize(uc); @@ -404,11 +404,11 @@ static int uc_init_wopcm(struct intel_uc *uc) struct intel_uncore *uncore = gt->uncore; u32 base = intel_wopcm_guc_base(>->i915->wopcm); u32 size = intel_wopcm_guc_size(>->i915->wopcm); - u32 huc_agent = intel_uc_is_using_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; + u32 huc_agent = intel_uc_supports_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; u32 mask; int err; - GEM_BUG_ON(!intel_uc_is_using_guc(uc)); + GEM_BUG_ON(!intel_uc_supports_guc(uc)); GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); @@ -447,7 +447,7 @@ int intel_uc_init_hw(struct intel_uc *uc) struct intel_huc *huc = &uc->huc; int ret, attempts; - if (!intel_uc_is_using_guc(uc)) + if (!intel_uc_supports_guc(uc)) return 0; GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); @@ -474,7 +474,7 @@ int intel_uc_init_hw(struct intel_uc *uc) if (ret) goto err_out; - if (intel_uc_is_using_huc(uc)) { + if (intel_uc_supports_huc(uc)) { ret = intel_huc_fw_upload(huc); if (ret && intel_uc_fw_is_overridden(&huc->fw)) goto err_out; @@ -508,7 +508,7 @@ int intel_uc_init_hw(struct intel_uc *uc) if (ret) goto err_communication; - if (intel_uc_is_using_guc_submission(uc)) { + if (intel_uc_supports_guc_submission(uc)) { ret = intel_guc_submission_enable(guc); if (ret) goto err_communication; @@ -517,7 +517,7 @@ int intel_uc_init_hw(struct intel_uc *uc) dev_info(i915->drm.dev, "GuC firmware version %u.%u\n", guc->fw.major_ver_found, guc->fw.minor_ver_found); dev_info(i915->drm.dev, "GuC submission %s\n", - enableddisabled(intel_uc_is_using_guc_submission(uc))); + enableddisabled(intel_uc_supports_guc_submission(uc))); dev_info(i915->drm.dev, "HuC %s\n", enableddisabled(intel_huc_is_authenticated(huc))); @@ -553,7 +553,7 @@ void intel_uc_fini_hw(struct intel_uc *uc) GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - if (intel_uc_is_using_guc_submission(uc)) + if (intel_uc_supports_guc_submission(uc)) intel_guc_submission_disable(guc); guc_disable_communication(guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index d008791702c8..7ccf6cfa984c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -49,19 +49,19 @@ void intel_uc_runtime_suspend(struct intel_uc *uc); int intel_uc_resume(struct intel_uc *uc); int intel_uc_runtime_resume(struct intel_uc *uc); -static inline bool intel_uc_is_using_guc(struct intel_uc *uc) +static inline bool intel_uc_supports_guc(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc > 0; } -static inline bool intel_uc_is_using_guc_submission(struct intel_uc *uc) +static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; } -static inline bool intel_uc_is_using_huc(struct intel_uc *uc) +static inline bool intel_uc_supports_huc(struct intel_uc *uc) { GEM_BUG_ON(i915_modparams.enable_guc < 0); return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3ee11b27ad91..7ca95136d302 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2275,8 +2275,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) /* Having GuC is not the same as using GuC */ -#define USES_GUC(dev_priv) intel_uc_is_using_guc(&(dev_priv)->gt.uc) -#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc) +#define USES_GUC(dev_priv) intel_uc_supports_guc(&(dev_priv)->gt.uc) +#define USES_GUC_SUBMISSION(dev_priv) intel_uc_supports_guc_submission(&(dev_priv)->gt.uc) #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu) -- cgit v1.2.3 From db81bc6eb9c01598782599ecf6797adfb167f801 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Wed, 31 Jul 2019 22:33:19 +0000 Subject: drm/i915/uc: Consider enable_guc modparam during fw selection We can use value of enable_guc modparam during firmware path selection and start using firmware status to see if GuC/HuC is being used. This is first step to make enable_guc modparam read-only. v2: rebased, don't care about <0 (Chris) v3: oops Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190731223321.36436-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 +++++ drivers/gpu/drm/i915/gt/uc/intel_huc.h | 5 +++++ drivers/gpu/drm/i915/gt/uc/intel_uc.h | 6 ++---- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 23 +++++++++++++++++++++-- 4 files changed, 33 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 714e9892aaff..5901506672cd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -172,6 +172,11 @@ int intel_guc_suspend(struct intel_guc *guc); int intel_guc_resume(struct intel_guc *guc); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); +static inline bool intel_guc_is_supported(struct intel_guc *guc) +{ + return intel_uc_fw_supported(&guc->fw); +} + static inline bool intel_guc_is_running(struct intel_guc *guc) { return intel_uc_fw_is_running(&guc->fw); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index 4465209ce233..a6ae59b8cb77 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -55,6 +55,11 @@ static inline int intel_huc_sanitize(struct intel_huc *huc) return 0; } +static inline bool intel_huc_is_supported(struct intel_huc *huc) +{ + return intel_uc_fw_supported(&huc->fw); +} + static inline bool intel_huc_is_authenticated(struct intel_huc *huc) { return intel_uc_fw_is_running(&huc->fw); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 7ccf6cfa984c..a81c016ff078 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -51,8 +51,7 @@ int intel_uc_runtime_resume(struct intel_uc *uc); static inline bool intel_uc_supports_guc(struct intel_uc *uc) { - GEM_BUG_ON(i915_modparams.enable_guc < 0); - return i915_modparams.enable_guc > 0; + return intel_guc_is_supported(&uc->guc); } static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc) @@ -63,8 +62,7 @@ static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc) static inline bool intel_uc_supports_huc(struct intel_uc *uc) { - GEM_BUG_ON(i915_modparams.enable_guc < 0); - return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC; + return intel_huc_is_supported(&uc->huc); } #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index ac91e3efd02b..650ad6037b74 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -132,6 +132,25 @@ __uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev) uc_fw->path = NULL; } } + + /* We don't want to enable GuC/HuC on pre-Gen11 by default */ + if (i915_modparams.enable_guc == -1 && p < INTEL_ICELAKE) + uc_fw->path = NULL; +} + +static const char *__override_guc_firmware_path(void) +{ + if (i915_modparams.enable_guc & (ENABLE_GUC_SUBMISSION | + ENABLE_GUC_LOAD_HUC)) + return i915_modparams.guc_firmware_path; + return ""; +} + +static const char *__override_huc_firmware_path(void) +{ + if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC) + return i915_modparams.huc_firmware_path; + return ""; } static bool @@ -139,10 +158,10 @@ __uc_fw_override(struct intel_uc_fw *uc_fw) { switch (uc_fw->type) { case INTEL_UC_FW_TYPE_GUC: - uc_fw->path = i915_modparams.guc_firmware_path; + uc_fw->path = __override_guc_firmware_path(); break; case INTEL_UC_FW_TYPE_HUC: - uc_fw->path = i915_modparams.huc_firmware_path; + uc_fw->path = __override_huc_firmware_path(); break; } -- cgit v1.2.3 From 724df646c80374f0d6e76c160a248547a042082b Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Wed, 31 Jul 2019 22:33:20 +0000 Subject: drm/i915/guc: Use dedicated flag to track submission mode Instead of relying on enable_guc modparam to represent actual GuC submission mode, use dedicated flag and look at modparam only to check if submission was explicitly disabled by the user. v2: rebased, simplified condition (Chris) Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190731223321.36436-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc.h | 7 +++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 16 ++++++++++++++++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h | 1 + drivers/gpu/drm/i915/gt/uc/intel_uc.h | 3 +-- 5 files changed, 26 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index f8fc34816e2c..da14f8067497 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -82,6 +82,7 @@ void intel_guc_init_early(struct intel_guc *guc) intel_guc_fw_init_early(guc); intel_guc_ct_init_early(&guc->ct); intel_guc_log_init_early(&guc->log); + intel_guc_submission_init_early(guc); mutex_init(&guc->send_mutex); spin_lock_init(&guc->irq_lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 5901506672cd..6edb29b9ceaa 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -61,6 +61,8 @@ struct intel_guc { void (*disable)(struct intel_guc *guc); } interrupts; + bool submission_supported; + struct i915_vma *ads_vma; struct __guc_ads_blob *ads_blob; @@ -190,6 +192,11 @@ static inline int intel_guc_sanitize(struct intel_guc *guc) return 0; } +static inline bool intel_guc_is_submission_supported(struct intel_guc *guc) +{ + return guc->submission_supported; +} + static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask) { spin_lock_irq(&guc->irq_lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index b4238fe16a03..b4b508f19a1c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1163,6 +1163,22 @@ void intel_guc_submission_disable(struct intel_guc *guc) guc_clients_disable(guc); } +static bool __guc_submission_support(struct intel_guc *guc) +{ + /* XXX: GuC submission is unavailable for now */ + return false; + + if (!intel_guc_is_supported(guc)) + return false; + + return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; +} + +void intel_guc_submission_init_early(struct intel_guc *guc) +{ + guc->submission_supported = __guc_submission_support(guc); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_guc.c" #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index 87a38cb6faf3..c4ad2702ec8d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -77,6 +77,7 @@ struct intel_guc_client { I915_SELFTEST_DECLARE(bool use_nop_wqi); }; +void intel_guc_submission_init_early(struct intel_guc *guc); int intel_guc_submission_init(struct intel_guc *guc); int intel_guc_submission_enable(struct intel_guc *guc); void intel_guc_submission_disable(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index a81c016ff078..0cca839422e2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -56,8 +56,7 @@ static inline bool intel_uc_supports_guc(struct intel_uc *uc) static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc) { - GEM_BUG_ON(i915_modparams.enable_guc < 0); - return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; + return intel_guc_is_submission_supported(&uc->guc); } static inline bool intel_uc_supports_huc(struct intel_uc *uc) -- cgit v1.2.3 From 01158da721c5fd5f321cfd7e3e955fbd83ba3124 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Thu, 1 Aug 2019 13:28:40 +0000 Subject: drm/i915/uc: Stop sanitizing enable_guc modparam As we already track GuC/HuC uses by other means than modparam there is no point in sanitizing it. Just scan modparam for major discrepancies between what was requested vs actual. v2: rebased, reworded info messages v3: oops Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801132840.33176-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 92 +++++++++++------------------------ 1 file changed, 28 insertions(+), 64 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 65ce20efdba8..d1b08b28b1ad 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -55,78 +55,42 @@ static int __intel_uc_reset_hw(struct intel_uc *uc) return ret; } -static int __get_platform_enable_guc(struct intel_uc *uc) +static void __confirm_options(struct intel_uc *uc) { - struct intel_uc_fw *guc_fw = &uc->guc.fw; - struct intel_uc_fw *huc_fw = &uc->huc.fw; - int enable_guc = 0; - - if (!HAS_GT_UC(uc_to_gt(uc)->i915)) - return 0; - - /* We don't want to enable GuC/HuC on pre-Gen11 by default */ - if (INTEL_GEN(uc_to_gt(uc)->i915) < 11) - return 0; - - if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw)) - enable_guc |= ENABLE_GUC_LOAD_HUC; - - return enable_guc; -} - -/** - * sanitize_options_early - sanitize uC related modparam options - * @uc: the intel_uc structure - * - * In case of "enable_guc" option this function will attempt to modify - * it only if it was initially set to "auto(-1)". Default value for this - * modparam varies between platforms and it is hardcoded in driver code. - * Any other modparam value is only monitored against availability of the - * related hardware or firmware definitions. - */ -static void sanitize_options_early(struct intel_uc *uc) -{ - struct intel_uc_fw *guc_fw = &uc->guc.fw; - struct intel_uc_fw *huc_fw = &uc->huc.fw; - - /* A negative value means "use platform default" */ - if (i915_modparams.enable_guc < 0) - i915_modparams.enable_guc = __get_platform_enable_guc(uc); - - DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n", + DRM_DEBUG_DRIVER("enable_guc=%d (guc:%s submission:%s huc:%s)\n", i915_modparams.enable_guc, + yesno(intel_uc_supports_guc(uc)), yesno(intel_uc_supports_guc_submission(uc)), yesno(intel_uc_supports_huc(uc))); - /* Verify GuC firmware availability */ - if (intel_uc_supports_guc(uc) && !intel_uc_fw_supported(guc_fw)) { - DRM_WARN("Incompatible option detected: enable_guc=%d, " - "but GuC is not supported!\n", - i915_modparams.enable_guc); - DRM_INFO("Disabling GuC/HuC loading!\n"); - i915_modparams.enable_guc = 0; - } + if (i915_modparams.enable_guc == -1) + return; - /* Verify HuC firmware availability */ - if (intel_uc_supports_huc(uc) && !intel_uc_fw_supported(huc_fw)) { - DRM_WARN("Incompatible option detected: enable_guc=%d, " - "but HuC is not supported!\n", - i915_modparams.enable_guc); - DRM_INFO("Disabling HuC loading!\n"); - i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC; + if (i915_modparams.enable_guc == 0) { + GEM_BUG_ON(intel_uc_supports_guc(uc)); + GEM_BUG_ON(intel_uc_supports_guc_submission(uc)); + GEM_BUG_ON(intel_uc_supports_huc(uc)); + return; } - /* XXX: GuC submission is unavailable for now */ - if (intel_uc_supports_guc_submission(uc)) { - DRM_INFO("Incompatible option detected: enable_guc=%d, " - "but GuC submission is not supported!\n", - i915_modparams.enable_guc); - DRM_INFO("Switching to non-GuC submission mode!\n"); - i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION; - } + if (!intel_uc_supports_guc(uc)) + DRM_INFO("Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "GuC is not supported!"); + + if (i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC && + !intel_uc_supports_huc(uc)) + DRM_INFO("Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "HuC is not supported!"); + + if (i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION && + !intel_uc_supports_guc_submission(uc)) + DRM_INFO("Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "GuC submission is N/A"); - /* Make sure that sanitization was done */ - GEM_BUG_ON(i915_modparams.enable_guc < 0); + if (i915_modparams.enable_guc & ~(ENABLE_GUC_SUBMISSION | + ENABLE_GUC_LOAD_HUC)) + DRM_INFO("Incompatible option enable_guc=%d - %s\n", + i915_modparams.enable_guc, "undocumented flag"); } void intel_uc_init_early(struct intel_uc *uc) @@ -134,7 +98,7 @@ void intel_uc_init_early(struct intel_uc *uc) intel_guc_init_early(&uc->guc); intel_huc_init_early(&uc->huc); - sanitize_options_early(uc); + __confirm_options(uc); } void intel_uc_driver_late_release(struct intel_uc *uc) -- cgit v1.2.3 From 5d571068f71ebf41f434fa3f9b4ebcc9849d5efe Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 25 Jul 2019 17:24:10 -0700 Subject: drm/i915/tgl: Add and use new DC5 and DC6 residency counter registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tiger Lake has a new register offset for DC5 and DC6 residency counters. v2: - Rename registers since they are not in the CSR memory range (requested by Anshuman) - Fix type (requested by Matthew) Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20190726002412.5827-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 21 +++++++++++++-------- drivers/gpu/drm/i915/i915_reg.h | 2 ++ 2 files changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 24787bb48c9f..6dbd85b38759 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2465,6 +2465,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); intel_wakeref_t wakeref; struct intel_csr *csr; + i915_reg_t dc5_reg, dc6_reg = {}; if (!HAS_CSR(dev_priv)) return -ENODEV; @@ -2482,15 +2483,19 @@ static int i915_dmc_info(struct seq_file *m, void *unused) seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), CSR_VERSION_MINOR(csr->version)); - if (WARN_ON(INTEL_GEN(dev_priv) > 11)) - goto out; + if (INTEL_GEN(dev_priv) >= 12) { + dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; + dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; + } else { + dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : + SKL_CSR_DC3_DC5_COUNT; + if (!IS_GEN9_LP(dev_priv)) + dc6_reg = SKL_CSR_DC5_DC6_COUNT; + } - seq_printf(m, "DC3 -> DC5 count: %d\n", - I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : - SKL_CSR_DC3_DC5_COUNT)); - if (!IS_GEN9_LP(dev_priv)) - seq_printf(m, "DC5 -> DC6 count: %d\n", - I915_READ(SKL_CSR_DC5_DC6_COUNT)); + seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg)); + if (dc6_reg.reg) + seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg)); out: seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c5187a58d3c9..d760830cfd7b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7271,6 +7271,8 @@ enum { #define SKL_CSR_DC3_DC5_COUNT _MMIO(0x80030) #define SKL_CSR_DC5_DC6_COUNT _MMIO(0x8002C) #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038) +#define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084) +#define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088) /* interrupts */ #define DE_MASTER_IRQ_CONTROL (1 << 31) -- cgit v1.2.3 From 2b92a82fe03bd426a90b82d69fe1c340ac6b9d74 Mon Sep 17 00:00:00 2001 From: Jordan Justen Date: Thu, 25 Jul 2019 17:24:11 -0700 Subject: drm/i915/tgl: allow the reg_read ioctl to read the RCS TIMESTAMP register This enables the Mesa driver to advertise support for ARB_timer_query, and thus an OpenGL version higher than 3.2. Based on the ICL patch by Paulo Zanoni and CNL patch by Nanley Chery. Cc: Joonas Lahtinen Cc: Rodrigo Vivi Signed-off-by: Jordan Justen Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20190726002412.5827-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 475ab3d4d91d..2b839acfa0f6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1776,7 +1776,7 @@ static const struct reg_whitelist { } reg_read_whitelist[] = { { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), - .gen_mask = INTEL_GEN_MASK(4, 11), + .gen_mask = INTEL_GEN_MASK(4, 12), .size = 8 } }; -- cgit v1.2.3 From 908091c85003d00742b215c996646e37c750f88b Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Aug 2019 17:23:27 +0100 Subject: drm/i915/pmu: Make more struct i915_pmu centric Just tidy the code a bit by removing a sea of overly verbose i915->pmu.*. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801162330.2729-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_pmu.c | 194 +++++++++++++++++++++------------------- 1 file changed, 104 insertions(+), 90 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index eff86483bec0..12008966b00e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -74,8 +74,9 @@ static unsigned int event_enabled_bit(struct perf_event *event) return config_enabled_bit(event->attr.config); } -static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) +static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) { + struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); u64 enable; /* @@ -83,7 +84,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) * * We start with a bitmask of all currently enabled events. */ - enable = i915->pmu.enable; + enable = pmu->enable; /* * Mask out all the ones which do not need the timer, or in @@ -114,24 +115,26 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) void i915_pmu_gt_parked(struct drm_i915_private *i915) { - if (!i915->pmu.base.event_init) + struct i915_pmu *pmu = &i915->pmu; + + if (!pmu->base.event_init) return; - spin_lock_irq(&i915->pmu.lock); + spin_lock_irq(&pmu->lock); /* * Signal sampling timer to stop if only engine events are enabled and * GPU went idle. */ - i915->pmu.timer_enabled = pmu_needs_timer(i915, false); - spin_unlock_irq(&i915->pmu.lock); + pmu->timer_enabled = pmu_needs_timer(pmu, false); + spin_unlock_irq(&pmu->lock); } -static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) +static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) { - if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { - i915->pmu.timer_enabled = true; - i915->pmu.timer_last = ktime_get(); - hrtimer_start_range_ns(&i915->pmu.timer, + if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { + pmu->timer_enabled = true; + pmu->timer_last = ktime_get(); + hrtimer_start_range_ns(&pmu->timer, ns_to_ktime(PERIOD), 0, HRTIMER_MODE_REL_PINNED); } @@ -139,15 +142,17 @@ static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) void i915_pmu_gt_unparked(struct drm_i915_private *i915) { - if (!i915->pmu.base.event_init) + struct i915_pmu *pmu = &i915->pmu; + + if (!pmu->base.event_init) return; - spin_lock_irq(&i915->pmu.lock); + spin_lock_irq(&pmu->lock); /* * Re-enable sampling timer when GPU goes active. */ - __i915_pmu_maybe_start_timer(i915); - spin_unlock_irq(&i915->pmu.lock); + __i915_pmu_maybe_start_timer(pmu); + spin_unlock_irq(&pmu->lock); } static void @@ -251,15 +256,16 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) { struct drm_i915_private *i915 = container_of(hrtimer, struct drm_i915_private, pmu.timer); + struct i915_pmu *pmu = &i915->pmu; unsigned int period_ns; ktime_t now; - if (!READ_ONCE(i915->pmu.timer_enabled)) + if (!READ_ONCE(pmu->timer_enabled)) return HRTIMER_NORESTART; now = ktime_get(); - period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last)); - i915->pmu.timer_last = now; + period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last)); + pmu->timer_last = now; /* * Strictly speaking the passed in period may not be 100% accurate for @@ -443,6 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915) { #if IS_ENABLED(CONFIG_PM) struct intel_runtime_pm *rpm = &i915->runtime_pm; + struct i915_pmu *pmu = &i915->pmu; intel_wakeref_t wakeref; unsigned long flags; u64 val; @@ -458,16 +465,16 @@ static u64 get_rc6(struct drm_i915_private *i915) * previously. */ - spin_lock_irqsave(&i915->pmu.lock, flags); + spin_lock_irqsave(&pmu->lock, flags); - if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; - i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; + if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; + pmu->sample[__I915_SAMPLE_RC6].cur = val; } else { - val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; } - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&pmu->lock, flags); } else { struct device *kdev = rpm->kdev; @@ -478,7 +485,7 @@ static u64 get_rc6(struct drm_i915_private *i915) * on top of the last known real value, as the approximated RC6 * counter value. */ - spin_lock_irqsave(&i915->pmu.lock, flags); + spin_lock_irqsave(&pmu->lock, flags); /* * After the above branch intel_runtime_pm_get_if_in_use failed @@ -494,20 +501,20 @@ static u64 get_rc6(struct drm_i915_private *i915) if (pm_runtime_status_suspended(kdev)) { val = pm_runtime_suspended_time(kdev); - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_time_last = val; + if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + pmu->suspended_time_last = val; - val -= i915->pmu.suspended_time_last; - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; + val -= pmu->suspended_time_last; + val += pmu->sample[__I915_SAMPLE_RC6].cur; - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; - } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; } else { - val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; + val = pmu->sample[__I915_SAMPLE_RC6].cur; } - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&pmu->lock, flags); } return val; @@ -520,6 +527,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); + struct i915_pmu *pmu = &i915->pmu; u64 val = 0; if (is_engine_event(event)) { @@ -542,12 +550,12 @@ static u64 __i915_pmu_event_read(struct perf_event *event) switch (event->attr.config) { case I915_PMU_ACTUAL_FREQUENCY: val = - div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, + div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur, USEC_PER_SEC /* to MHz */); break; case I915_PMU_REQUESTED_FREQUENCY: val = - div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, + div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur, USEC_PER_SEC /* to MHz */); break; case I915_PMU_INTERRUPTS: @@ -582,24 +590,25 @@ static void i915_pmu_enable(struct perf_event *event) struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); unsigned int bit = event_enabled_bit(event); + struct i915_pmu *pmu = &i915->pmu; unsigned long flags; - spin_lock_irqsave(&i915->pmu.lock, flags); + spin_lock_irqsave(&pmu->lock, flags); /* * Update the bitmask of enabled events and increment * the event reference counter. */ - BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); - GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); - GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); - i915->pmu.enable |= BIT_ULL(bit); - i915->pmu.enable_count[bit]++; + BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS); + GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); + GEM_BUG_ON(pmu->enable_count[bit] == ~0); + pmu->enable |= BIT_ULL(bit); + pmu->enable_count[bit]++; /* * Start the sampling timer if needed and not already enabled. */ - __i915_pmu_maybe_start_timer(i915); + __i915_pmu_maybe_start_timer(pmu); /* * For per-engine events the bitmask and reference counting @@ -625,7 +634,7 @@ static void i915_pmu_enable(struct perf_event *event) engine->pmu.enable_count[sample]++; } - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&pmu->lock, flags); /* * Store the current counter value so we can report the correct delta @@ -640,9 +649,10 @@ static void i915_pmu_disable(struct perf_event *event) struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); unsigned int bit = event_enabled_bit(event); + struct i915_pmu *pmu = &i915->pmu; unsigned long flags; - spin_lock_irqsave(&i915->pmu.lock, flags); + spin_lock_irqsave(&pmu->lock, flags); if (is_engine_event(event)) { u8 sample = engine_event_sample(event); @@ -664,18 +674,18 @@ static void i915_pmu_disable(struct perf_event *event) engine->pmu.enable &= ~BIT(sample); } - GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); - GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); + GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); + GEM_BUG_ON(pmu->enable_count[bit] == 0); /* * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. */ - if (--i915->pmu.enable_count[bit] == 0) { - i915->pmu.enable &= ~BIT_ULL(bit); - i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); + if (--pmu->enable_count[bit] == 0) { + pmu->enable &= ~BIT_ULL(bit); + pmu->timer_enabled &= pmu_needs_timer(pmu, true); } - spin_unlock_irqrestore(&i915->pmu.lock, flags); + spin_unlock_irqrestore(&pmu->lock, flags); } static void i915_pmu_event_start(struct perf_event *event, int flags) @@ -824,8 +834,9 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, } static struct attribute ** -create_event_attributes(struct drm_i915_private *i915) +create_event_attributes(struct i915_pmu *pmu) { + struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); static const struct { u64 config; const char *name; @@ -939,8 +950,8 @@ create_event_attributes(struct drm_i915_private *i915) } } - i915->pmu.i915_attr = i915_attr; - i915->pmu.pmu_attr = pmu_attr; + pmu->i915_attr = i915_attr; + pmu->pmu_attr = pmu_attr; return attr; @@ -956,7 +967,7 @@ err_alloc: return NULL; } -static void free_event_attributes(struct drm_i915_private *i915) +static void free_event_attributes(struct i915_pmu *pmu) { struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; @@ -964,12 +975,12 @@ static void free_event_attributes(struct drm_i915_private *i915) kfree((*attr_iter)->name); kfree(i915_pmu_events_attr_group.attrs); - kfree(i915->pmu.i915_attr); - kfree(i915->pmu.pmu_attr); + kfree(pmu->i915_attr); + kfree(pmu->pmu_attr); i915_pmu_events_attr_group.attrs = NULL; - i915->pmu.i915_attr = NULL; - i915->pmu.pmu_attr = NULL; + pmu->i915_attr = NULL; + pmu->pmu_attr = NULL; } static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) @@ -1006,7 +1017,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; -static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) +static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu) { enum cpuhp_state slot; int ret; @@ -1019,7 +1030,7 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) return ret; slot = ret; - ret = cpuhp_state_add_instance(slot, &i915->pmu.node); + ret = cpuhp_state_add_instance(slot, &pmu->node); if (ret) { cpuhp_remove_multi_state(slot); return ret; @@ -1029,15 +1040,16 @@ static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) return 0; } -static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) +static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) { WARN_ON(cpuhp_slot == CPUHP_INVALID); - WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); + WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node)); cpuhp_remove_multi_state(cpuhp_slot); } void i915_pmu_register(struct drm_i915_private *i915) { + struct i915_pmu *pmu = &i915->pmu; int ret; if (INTEL_GEN(i915) <= 2) { @@ -1045,56 +1057,58 @@ void i915_pmu_register(struct drm_i915_private *i915) return; } - i915_pmu_events_attr_group.attrs = create_event_attributes(i915); + i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); if (!i915_pmu_events_attr_group.attrs) { ret = -ENOMEM; goto err; } - i915->pmu.base.attr_groups = i915_pmu_attr_groups; - i915->pmu.base.task_ctx_nr = perf_invalid_context; - i915->pmu.base.event_init = i915_pmu_event_init; - i915->pmu.base.add = i915_pmu_event_add; - i915->pmu.base.del = i915_pmu_event_del; - i915->pmu.base.start = i915_pmu_event_start; - i915->pmu.base.stop = i915_pmu_event_stop; - i915->pmu.base.read = i915_pmu_event_read; - i915->pmu.base.event_idx = i915_pmu_event_event_idx; - - spin_lock_init(&i915->pmu.lock); - hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - i915->pmu.timer.function = i915_sample; - - ret = perf_pmu_register(&i915->pmu.base, "i915", -1); + pmu->base.attr_groups = i915_pmu_attr_groups; + pmu->base.task_ctx_nr = perf_invalid_context; + pmu->base.event_init = i915_pmu_event_init; + pmu->base.add = i915_pmu_event_add; + pmu->base.del = i915_pmu_event_del; + pmu->base.start = i915_pmu_event_start; + pmu->base.stop = i915_pmu_event_stop; + pmu->base.read = i915_pmu_event_read; + pmu->base.event_idx = i915_pmu_event_event_idx; + + spin_lock_init(&pmu->lock); + hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + pmu->timer.function = i915_sample; + + ret = perf_pmu_register(&pmu->base, "i915", -1); if (ret) goto err; - ret = i915_pmu_register_cpuhp_state(i915); + ret = i915_pmu_register_cpuhp_state(pmu); if (ret) goto err_unreg; return; err_unreg: - perf_pmu_unregister(&i915->pmu.base); + perf_pmu_unregister(&pmu->base); err: - i915->pmu.base.event_init = NULL; - free_event_attributes(i915); + pmu->base.event_init = NULL; + free_event_attributes(pmu); DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); } void i915_pmu_unregister(struct drm_i915_private *i915) { - if (!i915->pmu.base.event_init) + struct i915_pmu *pmu = &i915->pmu; + + if (!pmu->base.event_init) return; - WARN_ON(i915->pmu.enable); + WARN_ON(pmu->enable); - hrtimer_cancel(&i915->pmu.timer); + hrtimer_cancel(&pmu->timer); - i915_pmu_unregister_cpuhp_state(i915); + i915_pmu_unregister_cpuhp_state(pmu); - perf_pmu_unregister(&i915->pmu.base); - i915->pmu.base.event_init = NULL; - free_event_attributes(i915); + perf_pmu_unregister(&pmu->base); + pmu->base.event_init = NULL; + free_event_attributes(pmu); } -- cgit v1.2.3 From 28fba0961de40d401070608c668de76fba848ea1 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Aug 2019 17:23:28 +0100 Subject: drm/i915/pmu: Convert engine sampling to uncore mmio Drops one macro using implicit dev_priv. v2: * Use ENGINE_READ_FW. (Chris) Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801162330.2729-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_pmu.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 12008966b00e..09265b6b78b2 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -162,29 +162,30 @@ add_sample(struct i915_pmu_sample *sample, u32 val) } static void -engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) +engines_sample(struct drm_i915_private *i915, unsigned int period_ns) { + struct intel_uncore *uncore = &i915->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; unsigned long flags; - if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) + if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) return; wakeref = 0; - if (READ_ONCE(dev_priv->gt.awake)) - wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); + if (READ_ONCE(i915->gt.awake)) + wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) return; - spin_lock_irqsave(&dev_priv->uncore.lock, flags); - for_each_engine(engine, dev_priv, id) { + spin_lock_irqsave(&uncore->lock, flags); + for_each_engine(engine, i915, id) { struct intel_engine_pmu *pmu = &engine->pmu; bool busy; u32 val; - val = I915_READ_FW(RING_CTL(engine->mmio_base)); + val = ENGINE_READ_FW(engine, RING_CTL); if (val == 0) /* powerwell off => engine idle */ continue; @@ -202,15 +203,15 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) */ busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); if (!busy) { - val = I915_READ_FW(RING_MI_MODE(engine->mmio_base)); + val = ENGINE_READ_FW(engine, RING_MI_MODE); busy = !(val & MODE_IDLE); } if (busy) add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); } - spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); + spin_unlock_irqrestore(&uncore->lock, flags); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static void -- cgit v1.2.3 From 08ce5c64b25d0d434ef55fed0e0a3d7b526d793b Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Aug 2019 17:23:29 +0100 Subject: drm/i915/pmu: Convert sampling to gt Engines and frequencies are a GT thing so adjust sampling routines to match. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801162330.2729-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_pmu.c | 43 ++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 09265b6b78b2..5cf9a47a0c43 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -162,9 +162,10 @@ add_sample(struct i915_pmu_sample *sample, u32 val) } static void -engines_sample(struct drm_i915_private *i915, unsigned int period_ns) +engines_sample(struct intel_gt *gt, unsigned int period_ns) { - struct intel_uncore *uncore = &i915->uncore; + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; @@ -174,7 +175,7 @@ engines_sample(struct drm_i915_private *i915, unsigned int period_ns) return; wakeref = 0; - if (READ_ONCE(i915->gt.awake)) + if (READ_ONCE(gt->awake)) wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) return; @@ -221,34 +222,35 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) } static void -frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) +frequency_sample(struct intel_gt *gt, unsigned int period_ns) { - if (dev_priv->pmu.enable & - config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + struct i915_pmu *pmu = &i915->pmu; + + if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { u32 val; - val = dev_priv->gt_pm.rps.cur_freq; - if (dev_priv->gt.awake) { + val = i915->gt_pm.rps.cur_freq; + if (gt->awake) { intel_wakeref_t wakeref; - with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, + with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref) { - val = intel_uncore_read_notrace(&dev_priv->uncore, + val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1); - val = intel_get_cagf(dev_priv, val); + val = intel_get_cagf(i915, val); } } - add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], - intel_gpu_freq(dev_priv, val), + add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], + intel_gpu_freq(i915, val), period_ns / 1000); } - if (dev_priv->pmu.enable & - config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { - add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.cur_freq), + if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { + add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], + intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq), period_ns / 1000); } } @@ -258,6 +260,7 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) struct drm_i915_private *i915 = container_of(hrtimer, struct drm_i915_private, pmu.timer); struct i915_pmu *pmu = &i915->pmu; + struct intel_gt *gt = &i915->gt; unsigned int period_ns; ktime_t now; @@ -274,8 +277,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) * grabbing the forcewake. However the potential error from timer call- * back delay greatly dominates this so we keep it simple. */ - engines_sample(i915, period_ns); - frequency_sample(i915, period_ns); + engines_sample(gt, period_ns); + frequency_sample(gt, period_ns); hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); -- cgit v1.2.3 From 518ea582cb6e74af54c3b9c38b3183fbf1937d11 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 1 Aug 2019 17:23:30 +0100 Subject: drm/i915/pmu: Make get_rc6 take intel_gt RC6 is a GT state so make the function parameter reflect that. Signed-off-by: Tvrtko Ursulin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190801162330.2729-4-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_pmu.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 5cf9a47a0c43..e0e0180bca7c 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -431,8 +431,9 @@ static int i915_pmu_event_init(struct perf_event *event) return 0; } -static u64 __get_rc6(struct drm_i915_private *i915) +static u64 __get_rc6(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; u64 val; val = intel_rc6_residency_ns(i915, @@ -449,9 +450,10 @@ static u64 __get_rc6(struct drm_i915_private *i915) return val; } -static u64 get_rc6(struct drm_i915_private *i915) +static u64 get_rc6(struct intel_gt *gt) { #if IS_ENABLED(CONFIG_PM) + struct drm_i915_private *i915 = gt->i915; struct intel_runtime_pm *rpm = &i915->runtime_pm; struct i915_pmu *pmu = &i915->pmu; intel_wakeref_t wakeref; @@ -460,7 +462,7 @@ static u64 get_rc6(struct drm_i915_private *i915) wakeref = intel_runtime_pm_get_if_in_use(rpm); if (wakeref) { - val = __get_rc6(i915); + val = __get_rc6(gt); intel_runtime_pm_put(rpm, wakeref); /* @@ -523,7 +525,7 @@ static u64 get_rc6(struct drm_i915_private *i915) return val; #else - return __get_rc6(i915); + return __get_rc6(gt); #endif } @@ -566,7 +568,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) val = count_interrupts(i915); break; case I915_PMU_RC6_RESIDENCY: - val = get_rc6(i915); + val = get_rc6(&i915->gt); break; } } -- cgit v1.2.3 From 412e85b605315fd129a849599cf4a5a7959573a8 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Thu, 1 Aug 2019 18:02:15 -0400 Subject: drm/nouveau: Only release VCPI slots on mode changes Looks like a regression got introduced into nv50_mstc_atomic_check() that somehow didn't get found until now. If userspace changes crtc_state->active to false but leaves the CRTC enabled, we end up calling drm_dp_atomic_find_vcpi_slots() using the PBN calculated in asyh->dp.pbn. However, if the display is inactive we end up calculating a PBN of 0, which inadvertently causes us to have an allocation of 0. >From there, if userspace then disables the CRTC afterwards we end up accidentally attempting to free the VCPI twice: WARNING: CPU: 0 PID: 1484 at drivers/gpu/drm/drm_dp_mst_topology.c:3336 drm_dp_atomic_release_vcpi_slots+0x87/0xb0 [drm_kms_helper] RIP: 0010:drm_dp_atomic_release_vcpi_slots+0x87/0xb0 [drm_kms_helper] Call Trace: drm_atomic_helper_check_modeset+0x3f3/0xa60 [drm_kms_helper] ? drm_atomic_check_only+0x43/0x780 [drm] drm_atomic_helper_check+0x15/0x90 [drm_kms_helper] nv50_disp_atomic_check+0x83/0x1d0 [nouveau] drm_atomic_check_only+0x54d/0x780 [drm] ? drm_atomic_set_crtc_for_connector+0xec/0x100 [drm] drm_atomic_commit+0x13/0x50 [drm] drm_atomic_helper_set_config+0x81/0x90 [drm_kms_helper] drm_mode_setcrtc+0x194/0x6a0 [drm] ? vprintk_emit+0x16a/0x230 ? drm_ioctl+0x163/0x390 [drm] ? drm_mode_getcrtc+0x180/0x180 [drm] drm_ioctl_kernel+0xaa/0xf0 [drm] drm_ioctl+0x208/0x390 [drm] ? drm_mode_getcrtc+0x180/0x180 [drm] nouveau_drm_ioctl+0x63/0xb0 [nouveau] do_vfs_ioctl+0x405/0x660 ? recalc_sigpending+0x17/0x50 ? _copy_from_user+0x37/0x60 ksys_ioctl+0x5e/0x90 ? exit_to_usermode_loop+0x92/0xe0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x59/0x190 entry_SYSCALL_64_after_hwframe+0x44/0xa9 WARNING: CPU: 0 PID: 1484 at drivers/gpu/drm/drm_dp_mst_topology.c:3336 drm_dp_atomic_release_vcpi_slots+0x87/0xb0 [drm_kms_helper] ---[ end trace 4c395c0c51b1f88d ]--- [drm:drm_dp_atomic_release_vcpi_slots [drm_kms_helper]] *ERROR* no VCPI for [MST PORT:00000000e288eb7d] found in mst state 000000008e642070 So, fix this by doing what we probably should have done from the start: only call drm_dp_atomic_find_vcpi_slots() when crtc_state->mode_changed is set, so that VCPI allocations remain for as long as the CRTC is enabled. Signed-off-by: Lyude Paul Fixes: 232c9eec417a ("drm/nouveau: Use atomic VCPI helpers for MST") Cc: Lyude Paul Cc: Ben Skeggs Cc: Daniel Vetter Cc: David Airlie Cc: Jerry Zuo Cc: Harry Wentland Cc: Juston Li Cc: Karol Herbst Cc: Laurent Pinchart Cc: Ilia Mirkin Cc: # v5.1+ Acked-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20190801220216.15323-1-lyude@redhat.com --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8497768f1b41..126703816794 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, connector->display_info.bpc * 3); - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (crtc_state->mode_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); -- cgit v1.2.3 From 9eae7c3bcb52ec0a9f816d830e232e36a20b46d4 Mon Sep 17 00:00:00 2001 From: Fuqian Huang Date: Thu, 4 Jul 2019 10:34:36 +0800 Subject: drm/exynos: using dev_get_drvdata directly Several drivers cast a struct device pointer to a struct platform_device pointer only to then call platform_get_drvdata(). To improve readability, these constructs can be simplified by using dev_get_drvdata() directly. Signed-off-by: Fuqian Huang Reviewed-by: Emil Velikov Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_fimc.c | 2 +- drivers/gpu/drm/exynos/exynos_drm_gsc.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index a594ab7be2c0..164d914cbe9a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc; module_param_named(fimc_devs, fimc_mask, uint, 0644); MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); -#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) +#define get_fimc_context(dev) dev_get_drvdata(dev) enum { FIMC_CLK_LCLK, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 1e4b21c49a06..1c524db9570f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -58,7 +58,7 @@ #define GSC_COEF_DEPTH 3 #define GSC_AUTOSUSPEND_DELAY 2000 -#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) +#define get_gsc_context(dev) dev_get_drvdata(dev) #define gsc_read(offset) readl(ctx->regs + (offset)) #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) -- cgit v1.2.3 From 59d431746f1b3c76fd551b71241d7fdce38a58e9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 9 Jul 2019 21:01:14 +0900 Subject: drm/exynos: remove redundant assignment to pointer 'node' The pointer 'node' is being assigned with a value that is never read and is re-assigned later. The assignment is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_g2d.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 50904eee96f7..2a3382d43bc9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d) static int g2d_init_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; - struct g2d_cmdlist_node *node = g2d->cmdlist_node; + struct g2d_cmdlist_node *node; int nr; int ret; struct g2d_buf_info *buf_info; -- cgit v1.2.3 From d6f25bd9d4079165ea90f12d71e06d1dca83cd86 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 9 Jul 2019 21:08:48 +0900 Subject: drm/exynos: add CONFIG_MMU dependency Compile-testing this driver on a NOMMU configuration shows a link failure: drivers/gpu/drm/exynos/exynos_drm_gem.o: In function `exynos_drm_gem_fault': exynos_drm_gem.c:(.text+0x484): undefined reference to `vmf_insert_mixed' Add a CONFIG_MMU dependency to ensure we only enable this in configurations that build correctly. Many other drm drivers have the same dependency. It would be nice to make this work in MMU-less configurations, but evidently nobody has ever needed this so far. Fixes: 156bdac99061 ("drm/exynos: trigger build of all modules") Signed-off-by: Arnd Bergmann Reviewed-by: Vladimir Murzin Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 60ce4a8ad9e1..6f7d3b3b3628 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -2,6 +2,7 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) + depends on MMU select DRM_KMS_HELPER select VIDEOMODE_HELPERS select SND_SOC_HDMI_CODEC if SND_SOC -- cgit v1.2.3 From 51fbd8de87dcf09f3929e5438e4344bea4338990 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 2 Aug 2019 00:36:16 +0100 Subject: drm/i915/pmu: Atomically acquire the gt_pm wakeref Currently, we only sample if the intel_gt is awake, but we acquire our own runtime_pm wakeref. Since intel_gt has transitioned to tracking its own wakeref, we can atomically test and acquire that wakeref instead. v2: Take engine->wakeref for engine sampling Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190801233616.23007-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 8 ++++++- drivers/gpu/drm/i915/i915_pmu.c | 40 +++++++++++++++-------------------- 2 files changed, 24 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 527894fe1345..e8a18d4b27c9 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -9,7 +9,8 @@ #include -struct intel_gt; +#include "intel_gt_types.h" +#include "intel_wakeref.h" enum { INTEL_GT_UNPARK, @@ -19,6 +20,11 @@ enum { void intel_gt_pm_get(struct intel_gt *gt); void intel_gt_pm_put(struct intel_gt *gt); +static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt) +{ + return intel_wakeref_get_if_active(>->wakeref); +} + void intel_gt_pm_init_early(struct intel_gt *gt); void intel_gt_sanitize(struct intel_gt *gt, bool force); diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index e0e0180bca7c..c2e5f6d5c1e0 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -8,6 +8,8 @@ #include #include "gt/intel_engine.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_gt_pm.h" #include "i915_drv.h" #include "i915_pmu.h" @@ -165,30 +167,26 @@ static void engines_sample(struct intel_gt *gt, unsigned int period_ns) { struct drm_i915_private *i915 = gt->i915; - struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; - unsigned long flags; if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) return; - wakeref = 0; - if (READ_ONCE(gt->awake)) - wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); - if (!wakeref) - return; - - spin_lock_irqsave(&uncore->lock, flags); for_each_engine(engine, i915, id) { struct intel_engine_pmu *pmu = &engine->pmu; + unsigned long flags; bool busy; u32 val; + if (!intel_engine_pm_get_if_awake(engine)) + continue; + + spin_lock_irqsave(&engine->uncore->lock, flags); + val = ENGINE_READ_FW(engine, RING_CTL); if (val == 0) /* powerwell off => engine idle */ - continue; + goto skip; if (val & RING_WAIT) add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); @@ -209,10 +207,11 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) } if (busy) add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); - } - spin_unlock_irqrestore(&uncore->lock, flags); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); +skip: + spin_unlock_irqrestore(&engine->uncore->lock, flags); + intel_engine_pm_put(engine); + } } static void @@ -232,15 +231,10 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) u32 val; val = i915->gt_pm.rps.cur_freq; - if (gt->awake) { - intel_wakeref_t wakeref; - - with_intel_runtime_pm_if_in_use(&i915->runtime_pm, - wakeref) { - val = intel_uncore_read_notrace(uncore, - GEN6_RPSTAT1); - val = intel_get_cagf(i915, val); - } + if (intel_gt_pm_get_if_awake(gt)) { + val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1); + val = intel_get_cagf(i915, val); + intel_gt_pm_put(gt); } add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], -- cgit v1.2.3 From 576f05865581f82ac988ffec70e4e2ebd31165db Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2019 12:21:51 +0100 Subject: drm/i915: Flush extra hard after writing relocations through the GTT Recently discovered in commit bdae33b8b82b ("drm/i915: Use maximum write flush for pwrite_gtt") was that we needed to our full write barrier before changing the GGTT PTE to ensure that our indirect writes through the GTT landed before the PTE changed (and the writes end up in a different page). That also applies to our GGTT relocation path. Signed-off-by: Chris Wilson Cc: stable@vger.kernel.org Reviewed-by: Prathap Kumar Valsan Link: https://patchwork.freedesktop.org/patch/msgid/20190730112151.5633-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index cbd7c6e3a1f8..4db4463089ce 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1014,11 +1014,12 @@ static void reloc_cache_reset(struct reloc_cache *cache) kunmap_atomic(vaddr); i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); } else { - wmb(); + struct i915_ggtt *ggtt = cache_to_ggtt(cache); + + intel_gt_flush_ggtt_writes(ggtt->vm.gt); io_mapping_unmap_atomic((void __iomem *)vaddr); - if (cache->node.allocated) { - struct i915_ggtt *ggtt = cache_to_ggtt(cache); + if (cache->node.allocated) { ggtt->vm.clear_range(&ggtt->vm, cache->node.start, cache->node.size); @@ -1073,6 +1074,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, void *vaddr; if (cache->vaddr) { + intel_gt_flush_ggtt_writes(ggtt->vm.gt); io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); } else { struct i915_vma *vma; @@ -1114,7 +1116,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, offset = cache->node.start; if (cache->node.allocated) { - wmb(); ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, page), offset, I915_CACHE_NONE, 0); -- cgit v1.2.3 From 1bbbab097a05276e312dd2462791d32b21ceb1ee Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 22 Jul 2019 23:25:35 +0100 Subject: drm/exynos: fix missing decrement of retry counter Currently the retry counter is not being decremented, leading to a potential infinite spin if the scalar_reads don't change state. Addresses-Coverity: ("Infinite loop") Fixes: 280e54c9f614 ("drm/exynos: scaler: Reset hardware before starting the operation") Signed-off-by: Colin Ian King Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_scaler.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c index 9af096479e1c..b24ba948b725 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler) scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG); do { cpu_relax(); - } while (retry > 1 && + } while (--retry > 1 && scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET); do { cpu_relax(); scaler_write(1, SCALER_INT_EN); - } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1); + } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1); return retry ? 0 : -EIO; } -- cgit v1.2.3 From d8af05ff38ae7a42819b285ffef314942414ef8b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 2 Aug 2019 11:00:15 +0100 Subject: drm/i915: Allow sharing the idle-barrier from other kernel requests By placing our idle-barriers in the i915_active fence tree, we expose those for reuse by other components that are issuing requests along the kernel_context. Reusing the proto-barrier active_node is perfectly fine as the new request implies a context-switch, and so an opportune point to run the idle-barrier. However, the proto-barrier is not equivalent to a normal active_node and care must be taken to avoid dereferencing the ERR_PTR used as its request marker. v2: Comment the more egregious cheek v3: A glossary! Reported-by: Lionel Landwerlin Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch") Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly") Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190802100015.1281-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context.c | 40 ++- drivers/gpu/drm/i915/gt/intel_context.h | 13 +- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 2 +- drivers/gpu/drm/i915/gt/selftest_context.c | 310 +++++++++++++++++++++ drivers/gpu/drm/i915/i915_active.c | 288 ++++++++++++++++--- drivers/gpu/drm/i915/i915_active.h | 2 +- drivers/gpu/drm/i915/i915_active_types.h | 2 +- .../gpu/drm/i915/selftests/i915_live_selftests.h | 3 +- 8 files changed, 597 insertions(+), 63 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/selftest_context.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index f30441a140f8..34c8e37a73b8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -162,23 +162,41 @@ static int __intel_context_active(struct i915_active *active) if (err) goto err_ring; + return 0; + +err_ring: + intel_ring_unpin(ce->ring); +err_put: + intel_context_put(ce); + return err; +} + +int intel_context_active_acquire(struct intel_context *ce) +{ + int err; + + err = i915_active_acquire(&ce->active); + if (err) + return err; + /* Preallocate tracking nodes */ if (!i915_gem_context_is_kernel(ce->gem_context)) { err = i915_active_acquire_preallocate_barrier(&ce->active, ce->engine); - if (err) - goto err_state; + if (err) { + i915_active_release(&ce->active); + return err; + } } return 0; +} -err_state: - __context_unpin_state(ce->state); -err_ring: - intel_ring_unpin(ce->ring); -err_put: - intel_context_put(ce); - return err; +void intel_context_active_release(struct intel_context *ce) +{ + /* Nodes preallocated in intel_context_active() */ + i915_active_acquire_barrier(&ce->active); + i915_active_release(&ce->active); } void @@ -301,3 +319,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce) return rq; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_context.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 23c7e4c0ce7c..07f9924de48f 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -104,17 +104,8 @@ static inline void intel_context_exit(struct intel_context *ce) ce->ops->exit(ce); } -static inline int intel_context_active_acquire(struct intel_context *ce) -{ - return i915_active_acquire(&ce->active); -} - -static inline void intel_context_active_release(struct intel_context *ce) -{ - /* Nodes preallocated in intel_context_active() */ - i915_active_acquire_barrier(&ce->active); - i915_active_release(&ce->active); -} +int intel_context_active_acquire(struct intel_context *ce); +void intel_context_active_release(struct intel_context *ce); static inline struct intel_context *intel_context_get(struct intel_context *ce) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index e74fbf04a68d..ce54092475da 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -90,7 +90,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) /* Check again on the next retirement. */ engine->wakeref_serial = engine->serial + 1; - i915_request_add_barriers(rq); + i915_request_add_active_barriers(rq); __i915_request_commit(rq); return false; diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c new file mode 100644 index 000000000000..d39b5594cb02 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -0,0 +1,310 @@ +/* + * SPDX-License-Identifier: GPL-2.0 + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_selftest.h" +#include "intel_gt.h" + +#include "gem/selftests/mock_context.h" +#include "selftests/igt_flush_test.h" +#include "selftests/mock_drm.h" + +static int request_sync(struct i915_request *rq) +{ + long timeout; + int err = 0; + + i915_request_get(rq); + + i915_request_add(rq); + timeout = i915_request_wait(rq, 0, HZ / 10); + if (timeout < 0) + err = timeout; + else + i915_request_retire_upto(rq); + + i915_request_put(rq); + + return err; +} + +static int context_sync(struct intel_context *ce) +{ + struct intel_timeline *tl = ce->ring->timeline; + int err = 0; + + do { + struct i915_request *rq; + long timeout; + + rcu_read_lock(); + rq = rcu_dereference(tl->last_request.request); + if (rq) + rq = i915_request_get_rcu(rq); + rcu_read_unlock(); + if (!rq) + break; + + timeout = i915_request_wait(rq, 0, HZ / 10); + if (timeout < 0) + err = timeout; + else + i915_request_retire_upto(rq); + + i915_request_put(rq); + } while (!err); + + return err; +} + +static int __live_active_context(struct intel_engine_cs *engine, + struct i915_gem_context *fixme) +{ + struct intel_context *ce; + int pass; + int err; + + /* + * We keep active contexts alive until after a subsequent context + * switch as the final write from the context-save will be after + * we retire the final request. We track when we unpin the context, + * under the presumption that the final pin is from the last request, + * and instead of immediately unpinning the context, we add a task + * to unpin the context from the next idle-barrier. + * + * This test makes sure that the context is kept alive until a + * subsequent idle-barrier (emitted when the engine wakeref hits 0 + * with no more outstanding requests). + */ + + if (intel_engine_pm_is_awake(engine)) { + pr_err("%s is awake before starting %s!\n", + engine->name, __func__); + return -EINVAL; + } + + ce = intel_context_create(fixme, engine); + if (!ce) + return -ENOMEM; + + for (pass = 0; pass <= 2; pass++) { + struct i915_request *rq; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err; + } + + err = request_sync(rq); + if (err) + goto err; + + /* Context will be kept active until after an idle-barrier. */ + if (i915_active_is_idle(&ce->active)) { + pr_err("context is not active; expected idle-barrier (%s pass %d)\n", + engine->name, pass); + err = -EINVAL; + goto err; + } + + if (!intel_engine_pm_is_awake(engine)) { + pr_err("%s is asleep before idle-barrier\n", + engine->name); + err = -EINVAL; + goto err; + } + } + + /* Now make sure our idle-barriers are flushed */ + err = context_sync(engine->kernel_context); + if (err) + goto err; + + if (!i915_active_is_idle(&ce->active)) { + pr_err("context is still active!"); + err = -EINVAL; + } + + if (intel_engine_pm_is_awake(engine)) { + struct drm_printer p = drm_debug_printer(__func__); + + intel_engine_dump(engine, &p, + "%s is still awake after idle-barriers\n", + engine->name); + GEM_TRACE_DUMP(); + + err = -EINVAL; + goto err; + } + +err: + intel_context_put(ce); + return err; +} + +static int live_active_context(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + enum intel_engine_id id; + struct drm_file *file; + int err = 0; + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(>->i915->drm.struct_mutex); + + fixme = live_context(gt->i915, file); + if (!fixme) { + err = -ENOMEM; + goto unlock; + } + + for_each_engine(engine, gt->i915, id) { + err = __live_active_context(engine, fixme); + if (err) + break; + + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + if (err) + break; + } + +unlock: + mutex_unlock(>->i915->drm.struct_mutex); + mock_file_free(gt->i915, file); + return err; +} + +static int __remote_sync(struct intel_context *ce, struct intel_context *remote) +{ + struct i915_request *rq; + int err; + + err = intel_context_pin(remote); + if (err) + return err; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto unpin; + } + + err = intel_context_prepare_remote_request(remote, rq); + if (err) { + i915_request_add(rq); + goto unpin; + } + + err = request_sync(rq); + +unpin: + intel_context_unpin(remote); + return err; +} + +static int __live_remote_context(struct intel_engine_cs *engine, + struct i915_gem_context *fixme) +{ + struct intel_context *local, *remote; + int pass; + int err; + + /* + * Check that our idle barriers do not interfere with normal + * activity tracking. In particular, check that operating + * on the context image remotely (intel_context_prepare_remote_request), + * which inserts foreign fences into intel_context.active, does not + * clobber the idle-barrier. + */ + + remote = intel_context_create(fixme, engine); + if (!remote) + return -ENOMEM; + + local = intel_context_create(fixme, engine); + if (!local) { + err = -ENOMEM; + goto err_remote; + } + + for (pass = 0; pass <= 2; pass++) { + err = __remote_sync(local, remote); + if (err) + break; + + err = __remote_sync(engine->kernel_context, remote); + if (err) + break; + + if (i915_active_is_idle(&remote->active)) { + pr_err("remote context is not active; expected idle-barrier (%s pass %d)\n", + engine->name, pass); + err = -EINVAL; + break; + } + } + + intel_context_put(local); +err_remote: + intel_context_put(remote); + return err; +} + +static int live_remote_context(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + enum intel_engine_id id; + struct drm_file *file; + int err = 0; + + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(>->i915->drm.struct_mutex); + + fixme = live_context(gt->i915, file); + if (!fixme) { + err = -ENOMEM; + goto unlock; + } + + for_each_engine(engine, gt->i915, id) { + err = __live_remote_context(engine, fixme); + if (err) + break; + + err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + if (err) + break; + } + +unlock: + mutex_unlock(>->i915->drm.struct_mutex); + mock_file_free(gt->i915, file); + return err; +} + +int intel_context_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_active_context), + SUBTEST(live_remote_context), + }; + struct intel_gt *gt = &i915->gt; + + if (intel_gt_is_wedged(gt)) + return 0; + + return intel_gt_live_subtests(tests, gt); +} diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index d32db8a4db5c..1e09722b5317 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -33,6 +33,38 @@ struct active_node { u64 timeline; }; +static inline struct active_node * +node_from_active(struct i915_active_request *active) +{ + return container_of(active, struct active_node, base); +} + +#define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers) + +static inline bool is_barrier(const struct i915_active_request *active) +{ + return IS_ERR(rcu_access_pointer(active->request)); +} + +static inline struct llist_node *barrier_to_ll(struct active_node *node) +{ + GEM_BUG_ON(!is_barrier(&node->base)); + return (struct llist_node *)&node->base.link; +} + +static inline struct intel_engine_cs * +barrier_to_engine(struct active_node *node) +{ + GEM_BUG_ON(!is_barrier(&node->base)); + return (struct intel_engine_cs *)node->base.link.prev; +} + +static inline struct active_node *barrier_from_ll(struct llist_node *x) +{ + return container_of((struct list_head *)x, + struct active_node, base.link); +} + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) static void *active_debug_hint(void *addr) @@ -127,7 +159,7 @@ active_retire(struct i915_active *ref) static void node_retire(struct i915_active_request *base, struct i915_request *rq) { - active_retire(container_of(base, struct active_node, base)->ref); + active_retire(node_from_active(base)->ref); } static struct i915_active_request * @@ -184,6 +216,7 @@ out: ref->cache = node; mutex_unlock(&ref->mutex); + BUILD_BUG_ON(offsetof(typeof(*node), base)); return &node->base; } @@ -201,11 +234,52 @@ void __i915_active_init(struct drm_i915_private *i915, ref->retire = retire; ref->tree = RB_ROOT; ref->cache = NULL; - init_llist_head(&ref->barriers); + init_llist_head(&ref->preallocated_barriers); atomic_set(&ref->count, 0); __mutex_init(&ref->mutex, "i915_active", key); } +static bool __active_del_barrier(struct i915_active *ref, + struct active_node *node) +{ + struct intel_engine_cs *engine = barrier_to_engine(node); + struct llist_node *head = NULL, *tail = NULL; + struct llist_node *pos, *next; + + GEM_BUG_ON(node->timeline != engine->kernel_context->ring->timeline->fence_context); + + /* + * Rebuild the llist excluding our node. We may perform this + * outside of the kernel_context timeline mutex and so someone + * else may be manipulating the engine->barrier_tasks, in + * which case either we or they will be upset :) + * + * A second __active_del_barrier() will report failure to claim + * the active_node and the caller will just shrug and know not to + * claim ownership of its node. + * + * A concurrent i915_request_add_active_barriers() will miss adding + * any of the tasks, but we will try again on the next -- and since + * we are actively using the barrier, we know that there will be + * at least another opportunity when we idle. + */ + llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) { + if (node == barrier_from_ll(pos)) { + node = NULL; + continue; + } + + pos->next = head; + head = pos; + if (!tail) + tail = pos; + } + if (head) + llist_add_batch(head, tail, &engine->barrier_tasks); + + return !node; +} + int i915_active_ref(struct i915_active *ref, u64 timeline, struct i915_request *rq) @@ -224,8 +298,20 @@ int i915_active_ref(struct i915_active *ref, goto out; } - if (!i915_active_request_isset(active)) - atomic_inc(&ref->count); + if (is_barrier(active)) { /* proto-node used by our idle barrier */ + /* + * This request is on the kernel_context timeline, and so + * we can use it to substitute for the pending idle-barrer + * request that we want to emit on the kernel_context. + */ + __active_del_barrier(ref, node_from_active(active)); + RCU_INIT_POINTER(active->request, NULL); + INIT_LIST_HEAD(&active->link); + } else { + if (!i915_active_request_isset(active)) + atomic_inc(&ref->count); + } + GEM_BUG_ON(!atomic_read(&ref->count)); __i915_active_request_set(active, rq); out: @@ -312,6 +398,11 @@ int i915_active_wait(struct i915_active *ref) } rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + if (is_barrier(&it->base)) { /* unconnected idle-barrier */ + err = -EBUSY; + break; + } + err = i915_active_request_retire(&it->base, BKL(ref)); if (err) break; @@ -374,6 +465,92 @@ void i915_active_fini(struct i915_active *ref) } #endif +static inline bool is_idle_barrier(struct active_node *node, u64 idx) +{ + return node->timeline == idx && !i915_active_request_isset(&node->base); +} + +static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) +{ + struct rb_node *prev, *p; + + if (RB_EMPTY_ROOT(&ref->tree)) + return NULL; + + mutex_lock(&ref->mutex); + GEM_BUG_ON(i915_active_is_idle(ref)); + + /* + * Try to reuse any existing barrier nodes already allocated for this + * i915_active, due to overlapping active phases there is likely a + * node kept alive (as we reuse before parking). We prefer to reuse + * completely idle barriers (less hassle in manipulating the llists), + * but otherwise any will do. + */ + if (ref->cache && is_idle_barrier(ref->cache, idx)) { + p = &ref->cache->node; + goto match; + } + + prev = NULL; + p = ref->tree.rb_node; + while (p) { + struct active_node *node = + rb_entry(p, struct active_node, node); + + if (is_idle_barrier(node, idx)) + goto match; + + prev = p; + if (node->timeline < idx) + p = p->rb_right; + else + p = p->rb_left; + } + + /* + * No quick match, but we did find the leftmost rb_node for the + * kernel_context. Walk the rb_tree in-order to see if there were + * any idle-barriers on this timeline that we missed, or just use + * the first pending barrier. + */ + for (p = prev; p; p = rb_next(p)) { + struct active_node *node = + rb_entry(p, struct active_node, node); + + if (node->timeline > idx) + break; + + if (node->timeline < idx) + continue; + + if (is_idle_barrier(node, idx)) + goto match; + + /* + * The list of pending barriers is protected by the + * kernel_context timeline, which notably we do not hold + * here. i915_request_add_active_barriers() may consume + * the barrier before we claim it, so we have to check + * for success. + */ + if (is_barrier(&node->base) && __active_del_barrier(ref, node)) + goto match; + } + + mutex_unlock(&ref->mutex); + + return NULL; + +match: + rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ + if (p == &ref->cache->node) + ref->cache = NULL; + mutex_unlock(&ref->mutex); + + return rb_entry(p, struct active_node, node); +} + int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine) { @@ -382,39 +559,61 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct llist_node *pos, *next; int err; - GEM_BUG_ON(!mask); + GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); + + /* + * Preallocate a node for each physical engine supporting the target + * engine (remember virtual engines have more than one sibling). + * We can then use the preallocated nodes in + * i915_active_acquire_barrier() + */ for_each_engine_masked(engine, i915, mask, tmp) { - struct intel_context *kctx = engine->kernel_context; + u64 idx = engine->kernel_context->ring->timeline->fence_context; struct active_node *node; - node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); - if (unlikely(!node)) { - err = -ENOMEM; - goto unwind; + node = reuse_idle_barrier(ref, idx); + if (!node) { + node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); + if (!node) { + err = ENOMEM; + goto unwind; + } + + RCU_INIT_POINTER(node->base.request, NULL); + node->base.retire = node_retire; + node->timeline = idx; + node->ref = ref; } - i915_active_request_init(&node->base, - (void *)engine, node_retire); - node->timeline = kctx->ring->timeline->fence_context; - node->ref = ref; - atomic_inc(&ref->count); + if (!i915_active_request_isset(&node->base)) { + /* + * Mark this as being *our* unconnected proto-node. + * + * Since this node is not in any list, and we have + * decoupled it from the rbtree, we can reuse the + * request to indicate this is an idle-barrier node + * and then we can use the rb_node and list pointers + * for our tracking of the pending barrier. + */ + RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN)); + node->base.link.prev = (void *)engine; + atomic_inc(&ref->count); + } + GEM_BUG_ON(barrier_to_engine(node) != engine); + llist_add(barrier_to_ll(node), &ref->preallocated_barriers); intel_engine_pm_get(engine); - llist_add((struct llist_node *)&node->base.link, - &ref->barriers); } return 0; unwind: - llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) { - struct active_node *node; + llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { + struct active_node *node = barrier_from_ll(pos); - node = container_of((struct list_head *)pos, - typeof(*node), base.link); - engine = (void *)rcu_access_pointer(node->base.request); + atomic_dec(&ref->count); + intel_engine_pm_put(barrier_to_engine(node)); - intel_engine_pm_put(engine); kmem_cache_free(global.slab_cache, node); } return err; @@ -426,25 +625,27 @@ void i915_active_acquire_barrier(struct i915_active *ref) GEM_BUG_ON(i915_active_is_idle(ref)); + /* + * Transfer the list of preallocated barriers into the + * i915_active rbtree, but only as proto-nodes. They will be + * populated by i915_request_add_active_barriers() to point to the + * request that will eventually release them. + */ mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); - llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) { - struct intel_engine_cs *engine; - struct active_node *node; + llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { + struct active_node *node = barrier_from_ll(pos); + struct intel_engine_cs *engine = barrier_to_engine(node); struct rb_node **p, *parent; - node = container_of((struct list_head *)pos, - typeof(*node), base.link); - - engine = (void *)rcu_access_pointer(node->base.request); - RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN)); - parent = NULL; p = &ref->tree.rb_node; while (*p) { + struct active_node *it; + parent = *p; - if (rb_entry(parent, - struct active_node, - node)->timeline < node->timeline) + + it = rb_entry(parent, struct active_node, node); + if (it->timeline < node->timeline) p = &parent->rb_right; else p = &parent->rb_left; @@ -452,20 +653,29 @@ void i915_active_acquire_barrier(struct i915_active *ref) rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &ref->tree); - llist_add((struct llist_node *)&node->base.link, - &engine->barrier_tasks); + llist_add(barrier_to_ll(node), &engine->barrier_tasks); intel_engine_pm_put(engine); } mutex_unlock(&ref->mutex); } -void i915_request_add_barriers(struct i915_request *rq) +void i915_request_add_active_barriers(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; struct llist_node *node, *next; - llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) + GEM_BUG_ON(intel_engine_is_virtual(engine)); + GEM_BUG_ON(rq->timeline != engine->kernel_context->ring->timeline); + + /* + * Attach the list of proto-fences to the in-flight request such + * that the parent i915_active will be released when this request + * is retired. + */ + llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { + RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq); list_add_tail((struct list_head *)node, &rq->active_list); + } } int i915_active_request_set(struct i915_active_request *active, diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index ba68b077ec6c..566336c99ed7 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -413,6 +413,6 @@ static inline void i915_active_fini(struct i915_active *ref) { } int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine); void i915_active_acquire_barrier(struct i915_active *ref); -void i915_request_add_barriers(struct i915_request *rq); +void i915_request_add_active_barriers(struct i915_request *rq); #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index 74743dd0d5f0..ae3ee441c114 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -42,7 +42,7 @@ struct i915_active { int (*active)(struct i915_active *ref); void (*retire)(struct i915_active *ref); - struct llist_head barriers; + struct llist_head preallocated_barriers; }; #endif /* _I915_ACTIVE_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 2b31a4ee0b4c..a841d3f9bedc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests) selftest(timelines, intel_timeline_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) +selftest(gt_contexts, intel_context_live_selftests) selftest(objects, i915_gem_object_live_selftests) selftest(mman, i915_gem_mman_live_selftests) selftest(dmabuf, i915_gem_dmabuf_live_selftests) @@ -24,7 +25,7 @@ selftest(gtt, i915_gem_gtt_live_selftests) selftest(gem, i915_gem_live_selftests) selftest(evict, i915_gem_evict_live_selftests) selftest(hugepages, i915_gem_huge_page_live_selftests) -selftest(contexts, i915_gem_context_live_selftests) +selftest(gem_contexts, i915_gem_context_live_selftests) selftest(blt, i915_gem_object_blt_live_selftests) selftest(client, i915_gem_client_blt_live_selftests) selftest(reset, intel_reset_live_selftests) -- cgit v1.2.3 From c23ef285fc67f5fd2d7faac4d14d8b8c5a2b23ab Mon Sep 17 00:00:00 2001 From: Guido Günther Date: Fri, 2 Aug 2019 12:55:18 +0200 Subject: drm/imx: Drop unused imx-ipuv3-crtc.o build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since commit 3d1df96ad468 ("drm/imx: merge imx-drm-core and ipuv3-crtc in one module") the former contents of imx-ipuv3-crtc.o are built via imxdrm-objs. So there's no need to keep an extra entry with a non existing config value (CONFIG_DRM_IMX_IPUV3). Fixes: 3d1df96ad468 ("drm/imx: merge imx-drm-core and ipuv3-crtc in one module") Signed-off-by: Guido Günther Reviewed-by: Fabio Estevam Signed-off-by: Philipp Zabel --- drivers/gpu/drm/imx/Makefile | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile index ab6c83caceb7..21cdcc2faabc 100644 --- a/drivers/gpu/drm/imx/Makefile +++ b/drivers/gpu/drm/imx/Makefile @@ -8,5 +8,4 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o -obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o -- cgit v1.2.3 From ae0f8f0f9a4b7d2b37adcec54e540f8a03238f3d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 30 Jul 2019 14:34:32 +0300 Subject: drm/i915/oa: update the generated files Update the generated files to make the headers self-contained, switch to the kernel preferred SPDX comment format, and update the copyright year. Also add the Makefile stanza to run header tests on the files. Other changes produced by gputop i915-perf-kernelgen.py were manually stripped out, and left to the folks who actually know something about the OA stuff. Cc: Chris Wilson Cc: Lionel Landwerlin Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190730113432.22146-3-jani.nikula@intel.com --- drivers/gpu/drm/i915/oa/Makefile | 3 +++ drivers/gpu/drm/i915/oa/i915_oa_bdw.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_bdw.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_bxt.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_bxt.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_chv.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_chv.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_cnl.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_cnl.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_glk.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_glk.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_hsw.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_hsw.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_icl.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_icl.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h | 7 ++++--- drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c | 5 ++--- drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h | 7 ++++--- 29 files changed, 87 insertions(+), 84 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile index 256bfde4a287..df028e2b0d64 100644 --- a/drivers/gpu/drm/i915/oa/Makefile +++ b/drivers/gpu/drm/i915/oa/Makefile @@ -2,3 +2,6 @@ # For building individual subdir files on the command line subdir-ccflags-y += -I$(srctree)/$(src)/.. + +# Extra header tests +header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c index 4acdb94555b7..440b6b9058da 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h index b5ed68882588..0cee3334f0a6 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_bdw.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_BDW_H__ #define __I915_OA_BDW_H__ +struct drm_i915_private; + void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c index a44195c39923..0b268e3fe2ab 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h index 43c3e4ab030a..0bdf391323ec 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_bxt.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_BXT_H__ #define __I915_OA_BXT_H__ +struct drm_i915_private; + void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c index 7f60d51b8761..89542d30c55c 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h index 1b4b563bc585..6b862280ab78 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_CFLGT2_H__ #define __I915_OA_CFLGT2_H__ +struct drm_i915_private; + void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c index a92c38e3a0ce..51b118b03716 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h index 500565e055cd..4ca9d8f89b2f 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_CFLGT3_H__ #define __I915_OA_CFLGT3_H__ +struct drm_i915_private; + void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c index 71ec889a0114..c70c5af8a765 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_chv.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h index ad85d6a6a573..3cac7bbc9c71 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_chv.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_CHV_H__ #define __I915_OA_CHV_H__ +struct drm_i915_private; + void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c index 5c23d883d6c9..6d13983600e7 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h index 9faaca38b587..db379f5fcbb9 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_cnl.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_CNL_H__ #define __I915_OA_CNL_H__ +struct drm_i915_private; + void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c index 4bdda66df7d2..668c9aa15bc5 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_glk.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h index cc13a1e9fd3e..779f343efd11 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_glk.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_GLK_H__ #define __I915_OA_GLK_H__ +struct drm_i915_private; + void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c index cc6526fdd2bd..2e10a73127a2 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h index f0ddcc79c761..ba97f732f136 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_hsw.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_HSW_H__ #define __I915_OA_HSW_H__ +struct drm_i915_private; + void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c index baa51427a543..6dc388de4518 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_icl.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h index e501651d385b..5c64112d720e 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_icl.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_ICL_H__ #define __I915_OA_ICL_H__ +struct drm_i915_private; + void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c index 168e49ab0d4d..45b76366c4f3 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h index dc460e6e0fae..810532fa6b63 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_KBLGT2_H__ #define __I915_OA_KBLGT2_H__ +struct drm_i915_private; + void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c index 6ffa553c388e..35a340f48353 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h index 5926992b735a..13d70456fabd 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_KBLGT3_H__ #define __I915_OA_KBLGT3_H__ +struct drm_i915_private; + void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c index 7ce6ee851d43..309bb49d234c 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h index 353db35b36c1..fda70c51a6ec 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_SKLGT2_H__ #define __I915_OA_SKLGT2_H__ +struct drm_i915_private; + void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c index 086ca2631e1c..4096e3ba5bb5 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h index 52f94c674b62..df74eba5799e 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_SKLGT3_H__ #define __I915_OA_SKLGT3_H__ +struct drm_i915_private; + void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv); #endif diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c index b291a6eb8a87..b9575b04f752 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c @@ -1,7 +1,6 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! diff --git a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h index 8e364820cc63..378ab7ab78d5 100644 --- a/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h +++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * - * Copyright © 2018 Intel Corporation + * Copyright © 2018-2019 Intel Corporation * * Autogenerated file by GPU Top : https://github.com/rib/gputop * DO NOT EDIT manually! @@ -10,6 +9,8 @@ #ifndef __I915_OA_SKLGT4_H__ #define __I915_OA_SKLGT4_H__ +struct drm_i915_private; + void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv); #endif -- cgit v1.2.3 From e4661f144497f588b6baa6f00d7f3e4d58b29077 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 2 Aug 2019 12:47:39 +0000 Subject: drm/i915: Fix documentation for __intel_wait_for_register_fw* Use section name "Return" and proper error code -ETIMEDOUT Signed-off-by: Michal Wajdeczko Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190802124739.12548-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 2b839acfa0f6..4e9c15b60e93 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1860,7 +1860,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, * wish to wait without holding forcewake for the duration (i.e. you expect * the wait to be slow). * - * Returns 0 if the register matches the desired condition, or -ETIMEOUT. + * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. */ int __intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t reg, @@ -1908,7 +1908,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore, * * Otherwise, the wait will timeout after @timeout_ms milliseconds. * - * Returns 0 if the register matches the desired condition, or -ETIMEOUT. + * Return: 0 if the register matches the desired condition, or -ETIMEDOUT. */ int __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t reg, -- cgit v1.2.3 From cd2a4eaf8c79daa41bdeb7251dbf66413291fd70 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Jul 2019 21:58:05 +0100 Subject: drm/i915: Report resv_obj allocation failure Since commit 64d6c500a384 ("drm/i915: Generalise GPU activity tracking"), we have been prepared for i915_vma_move_to_active() to fail. We can take advantage of this to report the failure for allocating the shared-fence slot in the reservation_object. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190730205805.3733-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_vma.c | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index eb16a1a93bbc..7734d6218ce7 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -886,23 +886,6 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) list_del(&vma->obj->userfault_link); } -static void export_fence(struct i915_vma *vma, - struct i915_request *rq, - unsigned int flags) -{ - struct reservation_object *resv = vma->resv; - - /* - * Ignore errors from failing to allocate the new fence, we can't - * handle an error right now. Worst case should be missed - * synchronisation leading to rendering corruption. - */ - if (flags & EXEC_OBJECT_WRITE) - reservation_object_add_excl_fence(resv, &rq->fence); - else if (reservation_object_reserve_shared(resv, 1) == 0) - reservation_object_add_shared_fence(resv, &rq->fence); -} - int i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags) @@ -926,14 +909,20 @@ int i915_vma_move_to_active(struct i915_vma *vma, if (unlikely(err)) return err; - obj->write_domain = 0; if (flags & EXEC_OBJECT_WRITE) { - obj->write_domain = I915_GEM_DOMAIN_RENDER; - if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) __i915_active_request_set(&obj->frontbuffer_write, rq); + reservation_object_add_excl_fence(vma->resv, &rq->fence); + obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->read_domains = 0; + } else { + err = reservation_object_reserve_shared(vma->resv, 1); + if (unlikely(err)) + return err; + + reservation_object_add_shared_fence(vma->resv, &rq->fence); + obj->write_domain = 0; } obj->read_domains |= I915_GEM_GPU_DOMAINS; obj->mm.dirty = true; @@ -941,8 +930,6 @@ int i915_vma_move_to_active(struct i915_vma *vma, if (flags & EXEC_OBJECT_NEEDS_FENCE) __i915_active_request_set(&vma->last_fence, rq); - export_fence(vma, rq, flags); - GEM_BUG_ON(!i915_vma_is_active(vma)); return 0; } -- cgit v1.2.3 From 63dc6e63e682cf756ab8c18aa1b85b0efb358dad Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 1 Aug 2019 13:44:58 +0100 Subject: Revert "drm/vgem: fix cache synchronization on arm/arm64" commit 7e9e5ead55be ("drm/vgem: fix cache synchronization on arm/arm64") broke all of the !llc i915-vgem coherency tests in CI, and left the HW very, very unhappy (which is even more scary). Fixes: 7e9e5ead55be ("drm/vgem: fix cache synchronization on arm/arm64") Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: Rob Clark Cc: Sean Paul Acked-by: Sean Paul Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20190801124458.24949-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/vgem/vgem_drv.c | 130 +++++++++++++++------------------------- 1 file changed, 47 insertions(+), 83 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index fc04803ff403..11a8f99ba18c 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -47,16 +47,10 @@ static struct vgem_device { struct platform_device *platform; } *vgem_device; -static void sync_and_unpin(struct drm_vgem_gem_object *bo); -static struct page **pin_and_sync(struct drm_vgem_gem_object *bo); - static void vgem_gem_free_object(struct drm_gem_object *obj) { struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); - if (!obj->import_attach) - sync_and_unpin(vgem_obj); - kvfree(vgem_obj->pages); mutex_destroy(&vgem_obj->pages_lock); @@ -84,15 +78,40 @@ static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; mutex_lock(&obj->pages_lock); - if (!obj->pages) - pin_and_sync(obj); if (obj->pages) { get_page(obj->pages[page_offset]); vmf->page = obj->pages[page_offset]; ret = 0; } mutex_unlock(&obj->pages_lock); + if (ret) { + struct page *page; + + page = shmem_read_mapping_page( + file_inode(obj->base.filp)->i_mapping, + page_offset); + if (!IS_ERR(page)) { + vmf->page = page; + ret = 0; + } else switch (PTR_ERR(page)) { + case -ENOSPC: + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + case -EBUSY: + ret = VM_FAULT_RETRY; + break; + case -EFAULT: + case -EINVAL: + ret = VM_FAULT_SIGBUS; + break; + default: + WARN_ON(PTR_ERR(page)); + ret = VM_FAULT_SIGBUS; + break; + } + } return ret; } @@ -258,93 +277,32 @@ static const struct file_operations vgem_driver_fops = { .release = drm_release, }; -/* Called under pages_lock, except in free path (where it can't race): */ -static void sync_and_unpin(struct drm_vgem_gem_object *bo) -{ - struct drm_device *dev = bo->base.dev; - - if (bo->table) { - dma_sync_sg_for_cpu(dev->dev, bo->table->sgl, - bo->table->nents, DMA_BIDIRECTIONAL); - sg_free_table(bo->table); - kfree(bo->table); - bo->table = NULL; - } - - if (bo->pages) { - drm_gem_put_pages(&bo->base, bo->pages, true, true); - bo->pages = NULL; - } -} - -static struct page **pin_and_sync(struct drm_vgem_gem_object *bo) -{ - struct drm_device *dev = bo->base.dev; - int npages = bo->base.size >> PAGE_SHIFT; - struct page **pages; - struct sg_table *sgt; - - WARN_ON(!mutex_is_locked(&bo->pages_lock)); - - pages = drm_gem_get_pages(&bo->base); - if (IS_ERR(pages)) { - bo->pages_pin_count--; - mutex_unlock(&bo->pages_lock); - return pages; - } - - sgt = drm_prime_pages_to_sg(pages, npages); - if (IS_ERR(sgt)) { - dev_err(dev->dev, - "failed to allocate sgt: %ld\n", - PTR_ERR(bo->table)); - drm_gem_put_pages(&bo->base, pages, false, false); - mutex_unlock(&bo->pages_lock); - return ERR_CAST(bo->table); - } - - /* - * Flush the object from the CPU cache so that importers - * can rely on coherent indirect access via the exported - * dma-address. - */ - dma_sync_sg_for_device(dev->dev, sgt->sgl, - sgt->nents, DMA_BIDIRECTIONAL); - - bo->pages = pages; - bo->table = sgt; - - return pages; -} - static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) { - struct page **pages; - mutex_lock(&bo->pages_lock); - if (bo->pages_pin_count++ == 0 && !bo->pages) { - pages = pin_and_sync(bo); - } else { - WARN_ON(!bo->pages); - pages = bo->pages; + if (bo->pages_pin_count++ == 0) { + struct page **pages; + + pages = drm_gem_get_pages(&bo->base); + if (IS_ERR(pages)) { + bo->pages_pin_count--; + mutex_unlock(&bo->pages_lock); + return pages; + } + + bo->pages = pages; } mutex_unlock(&bo->pages_lock); - return pages; + return bo->pages; } static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) { - /* - * We shouldn't hit this for imported bo's.. in the import - * case we don't own the scatter-table - */ - WARN_ON(bo->base.import_attach); - mutex_lock(&bo->pages_lock); if (--bo->pages_pin_count == 0) { - WARN_ON(!bo->table); - sync_and_unpin(bo); + drm_gem_put_pages(&bo->base, bo->pages, true, true); + bo->pages = NULL; } mutex_unlock(&bo->pages_lock); } @@ -352,12 +310,18 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) static int vgem_prime_pin(struct drm_gem_object *obj) { struct drm_vgem_gem_object *bo = to_vgem_bo(obj); + long n_pages = obj->size >> PAGE_SHIFT; struct page **pages; pages = vgem_pin_pages(bo); if (IS_ERR(pages)) return PTR_ERR(pages); + /* Flush the object from the CPU cache so that importers can rely + * on coherent indirect access via the exported dma-address. + */ + drm_clflush_pages(pages, n_pages); + return 0; } -- cgit v1.2.3 From 67194518cb1979d775e10089d00c015d6737c194 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Wed, 31 Jul 2019 15:37:07 +0800 Subject: drm/amd/powerplay: sort feature status index by asic feature id for smu before this change, the pp_feature sysfs show feature enable state by logic feature id, it is not easy to read. this change will sort pp_features show index by asic feature id. before: features high: 0x00000623 low: 0xb3cdaffb 00. DPM_PREFETCHER ( 0) : enabeld 01. DPM_GFXCLK ( 1) : enabeld 02. DPM_UCLK ( 3) : enabeld 03. DPM_SOCCLK ( 4) : enabeld 04. DPM_MP0CLK ( 5) : enabeld 05. DPM_LINK ( 6) : enabeld 06. DPM_DCEFCLK ( 7) : enabeld 07. DS_GFXCLK (10) : enabeld 08. DS_SOCCLK (11) : enabeld 09. DS_LCLK (12) : disabled 10. PPT (23) : enabeld 11. TDC (24) : enabeld 12. THERMAL (33) : enabeld 13. RM (35) : disabled ...... after: features high: 0x00000623 low: 0xb3cdaffb 00. DPM_PREFETCHER ( 0) : enabeld 01. DPM_GFXCLK ( 1) : enabeld 02. DPM_GFX_PACE ( 2) : disabled 03. DPM_UCLK ( 3) : enabeld 04. DPM_SOCCLK ( 4) : enabeld 05. DPM_MP0CLK ( 5) : enabeld 06. DPM_LINK ( 6) : enabeld 07. DPM_DCEFCLK ( 7) : enabeld 08. MEM_VDDCI_SCALING ( 8) : enabeld 09. MEM_MVDD_SCALING ( 9) : enabeld 10. DS_GFXCLK (10) : enabeld 11. DS_SOCCLK (11) : enabeld 12. DS_LCLK (12) : disabled 13. DS_DCEFCLK (13) : enabeld ...... Signed-off-by: Kevin Wang Reviewed-by: Kenneth Feng Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 13b2c8a60232..7414ed9a5013 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -63,6 +63,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) uint32_t feature_mask[2] = { 0 }; int32_t feature_index = 0; uint32_t count = 0; + uint32_t sort_feature[SMU_FEATURE_COUNT]; + uint64_t hw_feature_count = 0; ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) @@ -75,11 +77,17 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) feature_index = smu_feature_get_index(smu, i); if (feature_index < 0) continue; + sort_feature[feature_index] = i; + hw_feature_count++; + } + + for (i = 0; i < hw_feature_count; i++) { size += sprintf(buf + size, "%02d. %-20s (%2d) : %s\n", count++, - smu_get_feature_name(smu, i), - feature_index, - !!smu_feature_is_enabled(smu, i) ? "enabeld" : "disabled"); + smu_get_feature_name(smu, sort_feature[i]), + i, + !!smu_feature_is_enabled(smu, sort_feature[i]) ? + "enabeld" : "disabled"); } failed: -- cgit v1.2.3 From 4cd4c5c064bd5cb9045bab3ed79bce292504d5b8 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 30 Jul 2019 17:21:19 +0800 Subject: drm/amdgpu: cleanup vega10 SRIOV code path we can simplify all those unnecessary function under SRIOV for vega10 since: 1) PSP L1 policy is by force enabled in SRIOV 2) original logic always set all flags which make itself a dummy step besides, 1) the ih_doorbell_range set should also be skipped for VEGA10 SRIOV. 2) the gfx_common registers should also be skipped for VEGA10 SRIOV. Signed-off-by: Monk Liu Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 -- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 45 ------------------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 13 --------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 ++++++----- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 +++---- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 15 ---------- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 17 ++++++----- drivers/gpu/drm/amd/amdgpu/soc15.c | 11 +++----- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 5 ++-- drivers/gpu/drm/amd/amdgpu/vega10_ih.c | 18 ++++++------ 11 files changed, 38 insertions(+), 118 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6940600ebf0e..0e284294b27a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1643,9 +1643,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) r = amdgpu_virt_request_full_gpu(adev, true); if (r) return -EAGAIN; - - /* query the reg access mode at the very beginning */ - amdgpu_virt_init_reg_access_mode(adev); } adev->pm.pp_feature = amdgpu_pp_feature_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 59dd204498c5..e32ae906d797 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -430,48 +430,3 @@ uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) return clk; } - -void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev) -{ - struct amdgpu_virt *virt = &adev->virt; - - if (virt->ops && virt->ops->init_reg_access_mode) - virt->ops->init_reg_access_mode(adev); -} - -bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev) -{ - bool ret = false; - struct amdgpu_virt *virt = &adev->virt; - - if (amdgpu_sriov_vf(adev) - && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH)) - ret = true; - - return ret; -} - -bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev) -{ - bool ret = false; - struct amdgpu_virt *virt = &adev->virt; - - if (amdgpu_sriov_vf(adev) - && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC) - && !(amdgpu_sriov_runtime(adev))) - ret = true; - - return ret; -} - -bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev) -{ - bool ret = false; - struct amdgpu_virt *virt = &adev->virt; - - if (amdgpu_sriov_vf(adev) - && (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING)) - ret = true; - - return ret; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f5107731e9c4..b0b2bdc750df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -48,12 +48,6 @@ struct amdgpu_vf_error_buffer { uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE]; }; -/* According to the fw feature, some new reg access modes are supported */ -#define AMDGPU_VIRT_REG_ACCESS_LEGACY (1 << 0) /* directly mmio */ -#define AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH (1 << 1) /* by PSP */ -#define AMDGPU_VIRT_REG_ACCESS_RLC (1 << 2) /* by RLC */ -#define AMDGPU_VIRT_REG_SKIP_SEETING (1 << 3) /* Skip setting reg */ - /** * struct amdgpu_virt_ops - amdgpu device virt operations */ @@ -65,7 +59,6 @@ struct amdgpu_virt_ops { void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); - void (*init_reg_access_mode)(struct amdgpu_device *adev); }; /* @@ -315,10 +308,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); - -void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev); -bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev); -bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev); -bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6756fc70b537..02bbe109cfe0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -715,14 +715,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA10: - if (!amdgpu_virt_support_skip_setting(adev)) { - soc15_program_register_sequence(adev, - golden_settings_gc_9_0, - ARRAY_SIZE(golden_settings_gc_9_0)); - soc15_program_register_sequence(adev, - golden_settings_gc_9_0_vg10, - ARRAY_SIZE(golden_settings_gc_9_0_vg10)); - } + soc15_program_register_sequence(adev, + golden_settings_gc_9_0, + ARRAY_SIZE(golden_settings_gc_9_0)); + soc15_program_register_sequence(adev, + golden_settings_gc_9_0_vg10, + ARRAY_SIZE(golden_settings_gc_9_0_vg10)); break; case CHIP_VEGA12: soc15_program_register_sequence(adev, @@ -3801,7 +3799,8 @@ static int gfx_v9_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gfx_v9_0_init_golden_registers(adev); + if (!amdgpu_sriov_vf(adev)) + gfx_v9_0_init_golden_registers(adev); gfx_v9_0_constants_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 06fca08b6513..7c4d9d99c6d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1201,7 +1201,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - if (amdgpu_virt_support_skip_setting(adev)) + if (amdgpu_sriov_vf(adev)) break; /* fall through */ case CHIP_VEGA20: diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 3abd02bd5222..292f3b1cddf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -111,7 +111,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); - if (amdgpu_virt_support_skip_setting(adev)) + if (amdgpu_sriov_vf(adev)) return; /* Set default page address. */ @@ -159,7 +159,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; - if (amdgpu_virt_support_skip_setting(adev)) + if (amdgpu_sriov_vf(adev)) return; /* Setup L2 cache */ @@ -208,7 +208,7 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev) static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev) { - if (amdgpu_virt_support_skip_setting(adev)) + if (amdgpu_sriov_vf(adev)) return; WREG32_SOC15(MMHUB, 0, mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, @@ -348,7 +348,7 @@ void mmhub_v1_0_gart_disable(struct amdgpu_device *adev) 0); WREG32_SOC15(MMHUB, 0, mmMC_VM_MX_L1_TLB_CNTL, tmp); - if (!amdgpu_virt_support_skip_setting(adev)) { + if (!amdgpu_sriov_vf(adev)) { /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); @@ -367,7 +367,7 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; - if (amdgpu_virt_support_skip_setting(adev)) + if (amdgpu_sriov_vf(adev)) return; tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 235548c0b41f..cc5bf595f9b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -449,20 +449,6 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); } -static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev) -{ - adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY; - - /* Enable L1 security reg access mode by defaul, as non-security VF - * will no longer be supported. - */ - adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC; - - adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH; - - adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING; -} - const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .req_full_gpu = xgpu_ai_request_full_gpu_access, .rel_full_gpu = xgpu_ai_release_full_gpu_access, @@ -471,5 +457,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { .trans_msg = xgpu_ai_mailbox_trans_msg, .get_pp_clk = xgpu_ai_get_pp_clk, .force_dpm_level = xgpu_ai_force_dpm_level, - .init_reg_access_mode = xgpu_ai_init_reg_access_mode, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index dda0b8d005f8..61d995dd6a28 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -330,14 +330,12 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA10: - if (!amdgpu_virt_support_skip_setting(adev)) { - soc15_program_register_sequence(adev, - golden_settings_sdma_4, - ARRAY_SIZE(golden_settings_sdma_4)); - soc15_program_register_sequence(adev, - golden_settings_sdma_vg10, - ARRAY_SIZE(golden_settings_sdma_vg10)); - } + soc15_program_register_sequence(adev, + golden_settings_sdma_4, + ARRAY_SIZE(golden_settings_sdma_4)); + soc15_program_register_sequence(adev, + golden_settings_sdma_vg10, + ARRAY_SIZE(golden_settings_sdma_vg10)); break; case CHIP_VEGA12: soc15_program_register_sequence(adev, @@ -1833,7 +1831,8 @@ static int sdma_v4_0_hw_init(void *handle) adev->powerplay.pp_funcs->set_powergating_by_smu) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); - sdma_v4_0_init_golden_registers(adev); + if (!amdgpu_sriov_vf(adev)) + sdma_v4_0_init_golden_registers(adev); r = sdma_v4_0_start(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index bac232f9c627..e604d46d5f12 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1123,21 +1123,18 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev) int i; struct amdgpu_ring *ring; - /* Two reasons to skip - * 1, Host driver already programmed them - * 2, To avoid registers program violations in SR-IOV - */ - if (!amdgpu_virt_support_skip_setting(adev)) { + /* sdma/ih doorbell range are programed by hypervisor */ + if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, adev->doorbell_index.sdma_doorbell_range); } - } - adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, + adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index); + } } static int soc15_common_hw_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 47f74dab365d..839f186e1182 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -69,9 +69,10 @@ } \ } while (0) +#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a))) #define WREG32_RLC(reg, value) \ do { \ - if (amdgpu_virt_support_rlc_prg_reg(adev)) { \ + if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ uint32_t i = 0; \ uint32_t retries = 50000; \ uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \ @@ -96,7 +97,7 @@ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\ - if (amdgpu_virt_support_rlc_prg_reg(adev)) { \ + if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \ uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \ uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \ diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 614d9cefb24f..c1c0a39ae269 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -50,7 +50,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); return; @@ -64,7 +64,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, RB_ENABLE, 1); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl)) { DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); @@ -80,7 +80,7 @@ static void vega10_ih_enable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, RB_ENABLE, 1); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl)) { DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); @@ -106,7 +106,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); return; @@ -125,7 +125,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, RB_ENABLE, 0); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl)) { DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); @@ -145,7 +145,7 @@ static void vega10_ih_disable_interrupts(struct amdgpu_device *adev) ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, RB_ENABLE, 0); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl)) { DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); @@ -238,7 +238,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); return -ETIMEDOUT; @@ -281,7 +281,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) WPTR_OVERFLOW_ENABLE, 0); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl)) { DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n"); @@ -308,7 +308,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); - if (amdgpu_virt_support_psp_prg_ih_reg(adev)) { + if (amdgpu_sriov_vf(adev)) { if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl)) { DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n"); -- cgit v1.2.3 From 9244d3a6eb45248b07ec2ec084b05b9ef102c588 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 30 Jul 2019 17:32:27 +0800 Subject: drm/amdgpu: fix incorrect judge on sos fw version for SRIOV the SOS fw of PSP is loaded in hypervisor thus guest won't tell the version of it, and judging feature by reading the sos fw version in guest side is completely wrong Signed-off-by: Monk Liu Reviewed-by: Emily Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 019c47feee42..c2ebc0020e5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -636,7 +636,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) static bool psp_v3_1_support_vmr_ring(struct psp_context *psp) { - if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version >= 0x80455) + if (amdgpu_sriov_vf(psp->adev)) return true; return false; -- cgit v1.2.3 From 482f0e5385805ebbca25a1967da3538773bcdbea Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 31 Jul 2019 16:47:56 +0800 Subject: drm/amdgpu: fix double ucode load by PSP(v3) previously the ucode loading of PSP was repreated, one executed in phase_1 init/re-init/resume and the other in fw_loading routine Avoid this double loading by clearing ip_blocks.status.hw in suspend or reset prior to the FW loading and any block's hw_init/resume v2: still do the smu fw loading since it is needed by bare-metal v3: drop the change in reinit_early_sriov, just clear all block's status.hw in the head place and set the status.hw after hw_init done is enough Signed-off-by: Monk Liu Reviewed-by: Emily Deng Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 59 +++++++++++++++++++----------- 1 file changed, 38 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0e284294b27a..2363e7658d36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1745,28 +1745,34 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) if (adev->asic_type >= CHIP_VEGA10) { for (i = 0; i < adev->num_ip_blocks; i++) { - if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { - if (adev->in_gpu_reset || adev->in_suspend) { - if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) - break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */ - r = adev->ip_blocks[i].version->funcs->resume(adev); - if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", + if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP) + continue; + + /* no need to do the fw loading again if already done*/ + if (adev->ip_blocks[i].status.hw == true) + break; + + if (adev->in_gpu_reset || adev->in_suspend) { + r = adev->ip_blocks[i].version->funcs->resume(adev); + if (r) { + DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); - return r; - } - } else { - r = adev->ip_blocks[i].version->funcs->hw_init(adev); - if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } + return r; + } + } else { + r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; } - adev->ip_blocks[i].status.hw = true; } + + adev->ip_blocks[i].status.hw = true; + break; } } + r = amdgpu_pm_load_smu_firmware(adev, &smu_version); return r; @@ -2208,7 +2214,9 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) if (r) { DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); + return r; } + adev->ip_blocks[i].status.hw = false; } } @@ -2248,14 +2256,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) if (is_support_sw_smu(adev)) { /* todo */ } else if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->set_mp1_state) { + adev->powerplay.pp_funcs->set_mp1_state) { r = adev->powerplay.pp_funcs->set_mp1_state( adev->powerplay.pp_handle, adev->mp1_state); if (r) { DRM_ERROR("SMC failed to set mp1 state %d, %d\n", adev->mp1_state, r); + return r; } + adev->ip_blocks[i].status.hw = false; } } } @@ -2310,6 +2320,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) for (j = 0; j < adev->num_ip_blocks; j++) { block = &adev->ip_blocks[j]; + block->status.hw = false; if (block->version->type != ip_order[i] || !block->status.valid) continue; @@ -2318,6 +2329,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; + block->status.hw = true; } } @@ -2345,13 +2357,15 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) block = &adev->ip_blocks[j]; if (block->version->type != ip_order[i] || - !block->status.valid) + !block->status.valid || + block->status.hw) continue; r = block->version->funcs->hw_init(adev); DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); if (r) return r; + block->status.hw = true; } } @@ -2375,17 +2389,19 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { + r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } } @@ -2410,7 +2426,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || @@ -2423,6 +2439,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->name, r); return r; } + adev->ip_blocks[i].status.hw = true; } return 0; -- cgit v1.2.3 From d196bbbc28fab82624f7686f8b0da8e8644b6e6a Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 3 Jul 2019 22:52:16 -0700 Subject: drm/amd/display: Use proper enum conversion functions clang warns: drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm_pp_smu.c:336:8: warning: implicit conversion from enumeration type 'enum smu_clk_type' to different enumeration type 'enum amd_pp_clock_type' [-Wenum-conversion] dc_to_smu_clock_type(clk_type), ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm_pp_smu.c:421:14: warning: implicit conversion from enumeration type 'enum amd_pp_clock_type' to different enumeration type 'enum smu_clk_type' [-Wenum-conversion] dc_to_pp_clock_type(clk_type), ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There are functions to properly convert between all of these types, use them so there are no longer any warnings. Fixes: a43913ea50a5 ("drm/amd/powerplay: add function get_clock_by_type_with_latency for navi10") Fixes: e5e4e22391c2 ("drm/amd/powerplay: add interface to get clock by type with latency for display (v2)") Link: https://github.com/ClangBuiltLinux/linux/issues/586 Signed-off-by: Nathan Chancellor Reviewed-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index e4d0b5b845c5..ea0e17e87c08 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -334,7 +334,7 @@ bool dm_pp_get_clock_levels_by_type( } } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { if (smu_get_clock_by_type(&adev->smu, - dc_to_smu_clock_type(clk_type), + dc_to_pp_clock_type(clk_type), &pp_clks)) { get_default_clock_levels(clk_type, dc_clks); return true; @@ -419,7 +419,7 @@ bool dm_pp_get_clock_levels_by_type_with_latency( return false; } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) { if (smu_get_clock_by_type_with_latency(&adev->smu, - dc_to_pp_clock_type(clk_type), + dc_to_smu_clock_type(clk_type), &pp_clks)) return false; } -- cgit v1.2.3 From d9ec5cfd5a2eca87b7c5f697ae07b5c9ae9b771f Mon Sep 17 00:00:00 2001 From: Leo Li Date: Thu, 25 Jul 2019 13:12:24 -0400 Subject: drm/amd/display: Use switch table for dc_to_smu_clock_type Using a static int array will cause errors if the given dm_pp_clk_type is out-of-bounds. For robustness, use a switch table, with a default case to handle all invalid values. v2: 0 is a valid clock type for smu_clk_type. Return SMU_CLK_COUNT instead on invalid mapping. Signed-off-by: Leo Li Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 37 +++++++++++++++------- 1 file changed, 25 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index ea0e17e87c08..f4cfa0caeba8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -151,18 +151,31 @@ static void get_default_clock_levels( static enum smu_clk_type dc_to_smu_clock_type( enum dm_pp_clock_type dm_pp_clk_type) { -#define DCCLK_MAP_SMUCLK(dcclk, smuclk) \ - [dcclk] = smuclk - - static int dc_clk_type_map[] = { - DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DISPLAY_CLK, SMU_DISPCLK), - DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_ENGINE_CLK, SMU_GFXCLK), - DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_MEMORY_CLK, SMU_MCLK), - DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DCEFCLK, SMU_DCEFCLK), - DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_SOCCLK, SMU_SOCCLK), - }; - - return dc_clk_type_map[dm_pp_clk_type]; + enum smu_clk_type smu_clk_type = SMU_CLK_COUNT; + + switch (dm_pp_clk_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + smu_clk_type = SMU_DISPCLK; + break; + case DM_PP_CLOCK_TYPE_ENGINE_CLK: + smu_clk_type = SMU_GFXCLK; + break; + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + smu_clk_type = SMU_MCLK; + break; + case DM_PP_CLOCK_TYPE_DCEFCLK: + smu_clk_type = SMU_DCEFCLK; + break; + case DM_PP_CLOCK_TYPE_SOCCLK: + smu_clk_type = SMU_SOCCLK; + break; + default: + DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", + dm_pp_clk_type); + break; + } + + return smu_clk_type; } static enum amd_pp_clock_type dc_to_pp_clock_type( -- cgit v1.2.3 From 274840e544225657fbca4f12efa1ee55474bb800 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 9 Jul 2019 19:09:42 -0400 Subject: drm/ttm: Add release_notify callback to ttm_bo_driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This notifies the driver that a BO is about to be released. Releasing a BO also invokes the move_notify callback from ttm_bo_cleanup_memtype_use, but that happens too late for anything that would add fences to the BO and require a delayed delete. Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/ttm/ttm_bo.c | 3 +++ include/drm/ttm/ttm_bo_driver.h | 10 ++++++++++ 2 files changed, 13 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a7fd5a4955c9..939b9258d513 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -671,6 +671,9 @@ static void ttm_bo_release(struct kref *kref) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + if (bo->bdev->driver->release_notify) + bo->bdev->driver->release_notify(bo); + drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index a2d810a2504d..81077e5b4b7e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -390,6 +390,16 @@ struct ttm_bo_driver { * notify driver that a BO was deleted from LRU. */ void (*del_from_lru_notify)(struct ttm_buffer_object *bo); + + /** + * Notify the driver that we're about to release a BO + * + * @bo: BO that is about to be released + * + * Gives the driver a chance to do any cleanup, including + * adding fences that may force a delayed delete + */ + void (*release_notify)(struct ttm_buffer_object *bo); }; /** -- cgit v1.2.3 From ab2f7a5c18b5c17cc94aaab7ae2e7d1fa08993d6 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 9 Jul 2019 19:12:44 -0400 Subject: drm/amdgpu: Implement VRAM wipe on release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Wipe VRAM memory containing sensitive data when moving or releasing BOs. Clearing the memory is pipelined to minimize any impact on subsequent memory allocation latency. Use of a poison value should help debug future use-after-free bugs. When moving BOs, the existing ttm_bo_pipelined_move ensures that the memory won't be reused before being wiped. When releasing BOs, the BO is fenced with the memory fill operation, which results in queuing the BO for a delayed delete. v2: Move amdgpu_amdkfd_unreserve_memory_limit into amdgpu_bo_release_notify so that KFD can use memory that's still being cleared in the background Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 39 +++++++++++++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 ++ 4 files changed, 56 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 531251dff150..0c0a8e83ab83 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -80,9 +80,6 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) if (bo->pin_count > 0) amdgpu_bo_subtract_pin_size(bo); - if (bo->kfd_bo) - amdgpu_amdkfd_unreserve_memory_limit(bo); - amdgpu_bo_kunmap(bo); if (bo->gem_base.import_attach) @@ -1220,6 +1217,42 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } +/** + * amdgpu_bo_move_notify - notification about a BO being released + * @bo: pointer to a buffer object + * + * Wipes VRAM buffers whose contents should not be leaked before the + * memory is released. + */ +void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) +{ + struct dma_fence *fence = NULL; + struct amdgpu_bo *abo; + int r; + + if (!amdgpu_bo_is_amdgpu_bo(bo)) + return; + + abo = ttm_to_amdgpu_bo(bo); + + if (abo->kfd_bo) + amdgpu_amdkfd_unreserve_memory_limit(abo); + + if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || + !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) + return; + + reservation_object_lock(bo->resv, NULL); + + r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->resv, &fence); + if (!WARN_ON(r)) { + amdgpu_bo_fence(abo, fence, false); + dma_fence_put(fence); + } + + reservation_object_unlock(bo->resv); +} + /** * amdgpu_bo_fault_reserve_notify - notification about a memory fault * @bo: pointer to a buffer object diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index dc44cf36d025..5a3c1779e200 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -265,6 +265,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_mem_reg *new_mem); +void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, bool shared); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2bb9420b77a1..018d1d347896 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -444,6 +444,22 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, if (r) goto error; + /* clear the space being freed */ + if (old_mem->mem_type == TTM_PL_VRAM && + (ttm_to_amdgpu_bo(bo)->flags & + AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { + struct dma_fence *wipe_fence = NULL; + + r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON, + NULL, &wipe_fence); + if (r) { + goto error; + } else if (wipe_fence) { + dma_fence_put(fence); + fence = wipe_fence; + } + } + /* Always block for VM page tables before committing the new location */ if (bo->type == ttm_bo_type_kernel) r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); @@ -1599,6 +1615,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .move = &amdgpu_bo_move, .verify_access = &amdgpu_verify_access, .move_notify = &amdgpu_bo_move_notify, + .release_notify = &amdgpu_bo_release_notify, .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_free = &amdgpu_ttm_io_mem_free, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index caa76c693700..bccb8c49e597 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -38,6 +38,8 @@ #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 +#define AMDGPU_POISON 0xd0bed0be + struct amdgpu_mman { struct ttm_bo_device bdev; bool mem_global_referenced; -- cgit v1.2.3 From 6856e4b65f64eeb3f17148f79b36c1d60c627529 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Mon, 8 Jul 2019 20:01:22 -0400 Subject: drm/amdgpu: Mark KFD VRAM allocations for wipe on release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Memory used by KFD applications can contain sensitive information that should not be leaked to other processes. The current approach to prevent leaks is to clear VRAM at allocation time. This is not effective because memory can be reused in other ways without being cleared. Synchronously clearing memory on the allocation path also carries a significant performance penalty. Stop clearing memory at allocation time. Instead mark the memory for wipe on release. Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6a5c96e519b1..14d9c250b3d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1090,7 +1090,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( */ if (flags & ALLOC_MEM_FLAGS_VRAM) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; - alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED; + alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; -- cgit v1.2.3 From 19ed70ff5d2458798387274186e738dabc2e05ad Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Thu, 1 Aug 2019 11:44:17 -0400 Subject: drm/amdgpu: Add amdgpu_asic_funcs.reset_method for Vega20 Fixes GPU reset crash. Signed-off-by: Andrey Grodzovsky Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index e604d46d5f12..25c4cf2d2128 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -896,6 +896,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .get_pcie_usage = &soc15_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, + .reset_method = &soc15_asic_reset_method }; static int soc15_common_early_init(void *handle) -- cgit v1.2.3 From 57d352f7699efedeebcf809df5e01e6fd51e2191 Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Wed, 31 Jul 2019 09:23:45 -0400 Subject: drm/amdgpu: Update NBIO headers to add TXCLK3/4 These are added for VG20, and are needed for PCIe bandwidth. Signed-off-by: Kent Russell Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h | 30 ++++++++++++++++++++++ .../drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h | 6 +++++ 2 files changed, 36 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h index 88602479a1aa..ee8c15e4543d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h @@ -74709,6 +74709,36 @@ //PCIE_PERF_COUNT1_TXCLK2 #define PCIE_PERF_COUNT1_TXCLK2__COUNTER1__SHIFT 0x0 #define PCIE_PERF_COUNT1_TXCLK2__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_TXCLK3 +#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_TXCLK3__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_TXCLK3__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_TXCLK3__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_TXCLK3__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_TXCLK3__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_TXCLK3__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_TXCLK3__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_TXCLK3 +#define PCIE_PERF_COUNT0_TXCLK3__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_TXCLK3__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_TXCLK3 +#define PCIE_PERF_COUNT1_TXCLK3__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_TXCLK3__COUNTER1_MASK 0xFFFFFFFFL +//PCIE_PERF_CNTL_TXCLK4 +#define PCIE_PERF_CNTL_TXCLK4__EVENT0_SEL__SHIFT 0x0 +#define PCIE_PERF_CNTL_TXCLK4__EVENT1_SEL__SHIFT 0x8 +#define PCIE_PERF_CNTL_TXCLK4__COUNTER0_UPPER__SHIFT 0x10 +#define PCIE_PERF_CNTL_TXCLK4__COUNTER1_UPPER__SHIFT 0x18 +#define PCIE_PERF_CNTL_TXCLK4__EVENT0_SEL_MASK 0x000000FFL +#define PCIE_PERF_CNTL_TXCLK4__EVENT1_SEL_MASK 0x0000FF00L +#define PCIE_PERF_CNTL_TXCLK4__COUNTER0_UPPER_MASK 0x00FF0000L +#define PCIE_PERF_CNTL_TXCLK4__COUNTER1_UPPER_MASK 0xFF000000L +//PCIE_PERF_COUNT0_TXCLK4 +#define PCIE_PERF_COUNT0_TXCLK4__COUNTER0__SHIFT 0x0 +#define PCIE_PERF_COUNT0_TXCLK4__COUNTER0_MASK 0xFFFFFFFFL +//PCIE_PERF_COUNT1_TXCLK4 +#define PCIE_PERF_COUNT1_TXCLK4__COUNTER1__SHIFT 0x0 +#define PCIE_PERF_COUNT1_TXCLK4__COUNTER1_MASK 0xFFFFFFFFL //PCIE_PRBS_CLR #define PCIE_PRBS_CLR__PRBS_CLR__SHIFT 0x0 #define PCIE_PRBS_CLR__PRBS_POLARITY_EN__SHIFT 0x18 diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h index caf5ffdc130a..6702575bc6e3 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h @@ -50,6 +50,12 @@ #define smnPCIE_PERF_CNTL_TXCLK2 0x11180254 #define smnPCIE_PERF_COUNT0_TXCLK2 0x11180258 #define smnPCIE_PERF_COUNT1_TXCLK2 0x1118025c +#define smnPCIE_PERF_CNTL_TXCLK3 0x1118021c +#define smnPCIE_PERF_COUNT0_TXCLK3 0x11180220 +#define smnPCIE_PERF_COUNT1_TXCLK3 0x11180224 +#define smnPCIE_PERF_CNTL_TXCLK4 0x11180228 +#define smnPCIE_PERF_COUNT0_TXCLK4 0x1118022c +#define smnPCIE_PERF_COUNT1_TXCLK4 0x11180230 #define smnPCIE_RX_NUM_NAK 0x11180038 #define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c -- cgit v1.2.3 From 612e4ed99bdd7d4aae207a69dea16e609d304e3b Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Wed, 31 Jul 2019 09:24:32 -0400 Subject: drm/amdgpu: Fix pcie_bw on Vega20 The registers used for VG20 are different in that certain performance counters were split off to TXCLK3/4. Vega10/12 doesn't have this, so add a new vg20_get_pcie_usage to reflect this change. Signed-off-by: Kent Russell Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 60 +++++++++++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 25c4cf2d2128..5116d0bf9e4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -785,14 +785,9 @@ static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, /* Set the 2 events that we wish to watch, defined above */ /* Reg 40 is # received msgs */ + /* Reg 104 is # of posted requests sent */ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); - /* Pre-VG20, Reg 104 is # of posted requests sent. On VG20 it's 108 */ - if (adev->asic_type == CHIP_VEGA20) - perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, - EVENT1_SEL, 108); - else - perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, - EVENT1_SEL, 104); + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); /* Write to enable desired perf counters */ WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); @@ -822,6 +817,55 @@ static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); } +static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, + uint64_t *count1) +{ + uint32_t perfctr = 0; + uint64_t cnt0_of, cnt1_of; + int tmp; + + /* This reports 0 on APUs, so return to avoid writing/reading registers + * that may or may not be different from their GPU counterparts + */ + if (adev->flags & AMD_IS_APU) + return; + + /* Set the 2 events that we wish to watch, defined above */ + /* Reg 40 is # received msgs */ + /* Reg 108 is # of posted requests sent on VG20 */ + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, + EVENT0_SEL, 40); + perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, + EVENT1_SEL, 108); + + /* Write to enable desired perf counters */ + WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); + /* Zero out and enable the perf counters + * Write 0x5: + * Bit 0 = Start all counters(1) + * Bit 2 = Global counter reset enable(1) + */ + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); + + msleep(1000); + + /* Load the shadow and disable the perf counters + * Write 0x2: + * Bit 0 = Stop counters(0) + * Bit 1 = Load the shadow counters(1) + */ + WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); + + /* Read register values to get any >32bit overflow */ + tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3); + cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER); + cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER); + + /* Get the values and add the overflow */ + *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32); + *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32); +} + static bool soc15_need_reset_on_init(struct amdgpu_device *adev) { u32 sol_reg; @@ -893,7 +937,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs = .invalidate_hdp = &soc15_invalidate_hdp, .need_full_reset = &soc15_need_full_reset, .init_doorbell_index = &vega20_doorbell_index_init, - .get_pcie_usage = &soc15_get_pcie_usage, + .get_pcie_usage = &vega20_get_pcie_usage, .need_reset_on_init = &soc15_need_reset_on_init, .get_pcie_replay_count = &soc15_get_pcie_replay_count, .reset_method = &soc15_asic_reset_method -- cgit v1.2.3 From 857b82d0df4c85d3d1c043b325dcd8354fd09c6e Mon Sep 17 00:00:00 2001 From: Le Ma Date: Mon, 8 Jul 2019 20:17:48 +0800 Subject: drm/amdgpu: support get_cu_info for Arcturus This change is because SE/SH layout on Arcturus is 8*1, different from 4*2(or 4*1) on Vega ASICs. Currently the cu bitmap array is 4x4 size, and besides the bitmap is used widely across SW stack. To mostly reduce the scale of impact, we make the cu bitmap array compatible with SE/SH layout on Arcturus. Then the store of cu bits of each shader array for Arcturus will be like below: SE0,SH0 --> bitmap[0][0] SE1,SH0 --> bitmap[1][0] SE2,SH0 --> bitmap[2][0] SE3,SH0 --> bitmap[3][0] SE4,SH0 --> bitmap[0][1] SE5,SH0 --> bitmap[1][1] SE6,SH0 --> bitmap[2][1] SE7,SH0 --> bitmap[3][1] Signed-off-by: Le Ma Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 35 ++++++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 02bbe109cfe0..c4df0c525270 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -6353,12 +6353,21 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, { int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; - unsigned disable_masks[4 * 2]; + unsigned disable_masks[4 * 4]; if (!adev || !cu_info) return -EINVAL; - amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + /* + * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs + */ + if (adev->gfx.config.max_shader_engines * + adev->gfx.config.max_sh_per_se > 16) + return -EINVAL; + + amdgpu_gfx_parse_disable_cu(disable_masks, + adev->gfx.config.max_shader_engines, + adev->gfx.config.max_sh_per_se); mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { @@ -6367,11 +6376,23 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, ao_bitmap = 0; counter = 0; gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); - if (i < 4 && j < 2) - gfx_v9_0_set_user_cu_inactive_bitmap( - adev, disable_masks[i * 2 + j]); + gfx_v9_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]); bitmap = gfx_v9_0_get_cu_active_bitmap(adev); - cu_info->bitmap[i][j] = bitmap; + + /* + * The bitmap(and ao_cu_bitmap) in cu_info structure is + * 4x4 size array, and it's usually suitable for Vega + * ASICs which has 4*2 SE/SH layout. + * But for Arcturus, SE/SH layout is changed to 8*1. + * To mostly reduce the impact, we make it compatible + * with current bitmap array as below: + * SE4,SH0 --> bitmap[0][1] + * SE5,SH0 --> bitmap[1][1] + * SE6,SH0 --> bitmap[2][1] + * SE7,SH0 --> bitmap[3][1] + */ + cu_info->bitmap[i % 4][j + i / 4] = bitmap; for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { if (bitmap & mask) { @@ -6384,7 +6405,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, active_cu_number += counter; if (i < 2 && j < 2) ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); - cu_info->ao_cu_bitmap[i][j] = ao_bitmap; + cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap; } } gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); -- cgit v1.2.3 From 5145d57ec5f5cf7dadaa6ccd9c9f1e4dae82570b Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Thu, 18 Jul 2019 16:57:22 -0500 Subject: drm/amdkfd: Extend CU mask to 8 SEs (v3) Following bitmap layout logic introduced by: "drm/amdgpu: support get_cu_info for Arcturus". v2: squash in fixup for gfx_v9_0.c (Alex) v3: squash in debug print output fix Signed-off-by: Jay Cornwall Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 10 +++++----- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 18 +++++++++++++++--- drivers/gpu/drm/amd/include/v9_structs.h | 8 ++++---- 5 files changed, 30 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c4df0c525270..c7910be64bf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3341,6 +3341,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) mqd->compute_static_thread_mgmt_se1 = 0xffffffff; mqd->compute_static_thread_mgmt_se2 = 0xffffffff; mqd->compute_static_thread_mgmt_se3 = 0xffffffff; + mqd->compute_static_thread_mgmt_se4 = 0xffffffff; + mqd->compute_static_thread_mgmt_se5 = 0xffffffff; + mqd->compute_static_thread_mgmt_se6 = 0xffffffff; + mqd->compute_static_thread_mgmt_se7 = 0xffffffff; mqd->compute_misc_reserved = 0x00000003; mqd->dynamic_cu_mask_addr_lo = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index d6cf391da591..88813dad731f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -98,8 +98,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, uint32_t *se_mask) { struct kfd_cu_info cu_info; - uint32_t cu_per_sh[4] = {0}; - int i, se, cu = 0; + uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0}; + int i, se, sh, cu = 0; amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info); @@ -107,8 +107,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, cu_mask_count = cu_info.cu_active_number; for (se = 0; se < cu_info.num_shader_engines; se++) - for (i = 0; i < 4; i++) - cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]); + for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) + cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]); /* Symmetrically map cu_mask to all SEs: * cu_mask[0] bit0 -> se_mask[0] bit0; @@ -128,6 +128,6 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, se = 0; cu++; } - } while (cu >= cu_per_sh[se] && cu < 32); + } while (cu >= cu_per_se[se] && cu < 32); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 550b61e81015..fbdb16418847 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -26,6 +26,8 @@ #include "kfd_priv.h" +#define KFD_MAX_NUM_SE 8 + /** * struct mqd_manager * diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 0c58f91b3ff3..d3380c5bdbde 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -46,7 +46,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct queue_properties *q) { struct v9_mqd *m; - uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ + uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; if (q->cu_mask_count == 0) return; @@ -59,12 +59,20 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, m->compute_static_thread_mgmt_se1 = se_mask[1]; m->compute_static_thread_mgmt_se2 = se_mask[2]; m->compute_static_thread_mgmt_se3 = se_mask[3]; + m->compute_static_thread_mgmt_se4 = se_mask[4]; + m->compute_static_thread_mgmt_se5 = se_mask[5]; + m->compute_static_thread_mgmt_se6 = se_mask[6]; + m->compute_static_thread_mgmt_se7 = se_mask[7]; - pr_debug("update cu mask to %#x %#x %#x %#x\n", + pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n", m->compute_static_thread_mgmt_se0, m->compute_static_thread_mgmt_se1, m->compute_static_thread_mgmt_se2, - m->compute_static_thread_mgmt_se3); + m->compute_static_thread_mgmt_se3, + m->compute_static_thread_mgmt_se4, + m->compute_static_thread_mgmt_se5, + m->compute_static_thread_mgmt_se6, + m->compute_static_thread_mgmt_se7); } static void set_priority(struct v9_mqd *m, struct queue_properties *q) @@ -125,6 +133,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF; m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h index 8b383dbe1cda..a0c672889fe4 100644 --- a/drivers/gpu/drm/amd/include/v9_structs.h +++ b/drivers/gpu/drm/amd/include/v9_structs.h @@ -196,10 +196,10 @@ struct v9_mqd { uint32_t compute_wave_restore_addr_lo; uint32_t compute_wave_restore_addr_hi; uint32_t compute_wave_restore_control; - uint32_t reserved_39; - uint32_t reserved_40; - uint32_t reserved_41; - uint32_t reserved_42; + uint32_t compute_static_thread_mgmt_se4; + uint32_t compute_static_thread_mgmt_se5; + uint32_t compute_static_thread_mgmt_se6; + uint32_t compute_static_thread_mgmt_se7; uint32_t reserved_43; uint32_t reserved_44; uint32_t reserved_45; -- cgit v1.2.3 From 6f7fe9a93e6c09bf988c5059403f5f88e17e21e6 Mon Sep 17 00:00:00 2001 From: KyleMahlkuch Date: Wed, 31 Jul 2019 17:10:14 -0500 Subject: drm/radeon: Fix EEH during kexec During kexec some adapters hit an EEH since they are not properly shut down in the radeon_pci_shutdown() function. Adding radeon_suspend_kms() fixes this issue. Signed-off-by: KyleMahlkuch Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_drv.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index b2bb74d5bffb..66ff84ec665b 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -349,11 +349,19 @@ radeon_pci_remove(struct pci_dev *pdev) static void radeon_pci_shutdown(struct pci_dev *pdev) { + struct drm_device *ddev = pci_get_drvdata(pdev); + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown */ if (radeon_device_is_virtual()) radeon_pci_remove(pdev); + + /* Some adapters need to be suspended before a + * shutdown occurs in order to prevent an error + * during kexec. + */ + radeon_suspend_kms(ddev, true, true, false); } static int radeon_pmops_suspend(struct device *dev) -- cgit v1.2.3 From e3bf125bdbb6efed2d650effbc3c13320602d64a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 1 Aug 2019 12:15:41 +0100 Subject: drm/amd/powerplay: fix off-by-one upper bounds limit checks There are two occurrances of off-by-one upper bound checking of indexes causing potential out-of-bounds array reads. Fix these. Addresses-Coverity: ("Out-of-bounds read") Fixes: cb33363d0e85 ("drm/amd/powerplay: add smu feature name support") Fixes: 6b294793e384 ("drm/amd/powerplay: add smu message name support") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 7414ed9a5013..fb9d6c1b3f33 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -38,7 +38,7 @@ static const char* __smu_message_names[] = { const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) { - if (type < 0 || type > SMU_MSG_MAX_COUNT) + if (type < 0 || type >= SMU_MSG_MAX_COUNT) return "unknow smu message"; return __smu_message_names[type]; } @@ -51,7 +51,7 @@ static const char* __smu_feature_names[] = { const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) { - if (feature < 0 || feature > SMU_FEATURE_COUNT) + if (feature < 0 || feature >= SMU_FEATURE_COUNT) return "unknow smu feature"; return __smu_feature_names[feature]; } -- cgit v1.2.3 From f3eb9b8f67bc28783eddc142ad805ebdc53d6339 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Mon, 29 Jul 2019 16:36:44 +0800 Subject: gpu: drm: radeon: Fix a possible null-pointer dereference in radeon_connector_set_property() In radeon_connector_set_property(), there is an if statement on line 743 to check whether connector->encoder is NULL: if (connector->encoder) When connector->encoder is NULL, it is used on line 755: if (connector->encoder->crtc) Thus, a possible null-pointer dereference may occur. To fix this bug, connector->encoder is checked before being used. This bug is found by a static analysis tool STCheck written by us. Signed-off-by: Jia-Ju Bai Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_connectors.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c60d1a44d22a..b684cd719612 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -752,7 +752,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct radeon_encoder->output_csc = val; - if (connector->encoder->crtc) { + if (connector->encoder && connector->encoder->crtc) { struct drm_crtc *crtc = connector->encoder->crtc; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); -- cgit v1.2.3 From ab63131155b0d86156b892a5d380dc337271aed9 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 1 Aug 2019 09:39:41 +0100 Subject: drm/amd/powerplay: fix a few spelling mistakes There are a few spelling mistakes "unknow" -> "unknown" and "enabeld" -> "enabled". Fix these. Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index fb9d6c1b3f33..4b9d51231a46 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -39,7 +39,7 @@ static const char* __smu_message_names[] = { const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type) { if (type < 0 || type >= SMU_MSG_MAX_COUNT) - return "unknow smu message"; + return "unknown smu message"; return __smu_message_names[type]; } @@ -52,7 +52,7 @@ static const char* __smu_feature_names[] = { const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature) { if (feature < 0 || feature >= SMU_FEATURE_COUNT) - return "unknow smu feature"; + return "unknown smu feature"; return __smu_feature_names[feature]; } @@ -87,7 +87,7 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) smu_get_feature_name(smu, sort_feature[i]), i, !!smu_feature_is_enabled(smu, sort_feature[i]) ? - "enabeld" : "disabled"); + "enabled" : "disabled"); } failed: -- cgit v1.2.3 From f0ced3f61b4d2a21a3e0f0aa79fb5ad6c6717c31 Mon Sep 17 00:00:00 2001 From: Matt Coffin Date: Wed, 31 Jul 2019 14:14:35 -0600 Subject: drm/amd/powerplay: Allow changing of fan_control in smu_v11_0 [Why] Before this change, the fan control state on smu_v11 was not able to be changed because the capability check for checking if the fan control capability existed was inverted. [How] The capability check for fan control in smu_v11_0_auto_fan_control was inverted, to correctly check for the absence, instead of presence of fan control capabilities. Reviewed-by: Evan Quan Signed-off-by: Matt Coffin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 35669e80a246..848ad04837a8 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1364,7 +1364,7 @@ smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control) { int ret = 0; - if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) + if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) return 0; ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control); -- cgit v1.2.3 From ac4bf4a1ebce7554628db89498ec49054c7bcdba Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 1 Aug 2019 12:01:45 +0100 Subject: drm/amdgpu: fix unsigned variable instance compared to less than zero Currenly the error check on variable instance is always false because it is a uint32_t type and this is never less than zero. Fix this by making it an int type. Addresses-Coverity: ("Unsigned compared against 0") Fixes: 7d0e6329dfdc ("drm/amdgpu: update more sdma instances irq support") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 61d995dd6a28..30153c36efd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1963,7 +1963,8 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, struct ras_err_data *err_data, struct amdgpu_iv_entry *entry) { - uint32_t instance, err_source; + uint32_t err_source; + int instance; instance = sdma_v4_0_irq_id_to_seq(entry->client_id); if (instance < 0) -- cgit v1.2.3 From e16e37efb4c9eb7bcb9dab756c975040c5257e98 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 10 Jun 2019 08:47:57 -0400 Subject: drm/amd/display: Allow cursor async updates for framebuffer swaps [Why] We previously allowed framebuffer swaps as async updates for cursor planes but had to disable them due to a bug in DRM with async update handling and incorrect ref counting. The check to block framebuffer swaps has been added to DRM for a while now, so this check is redundant. The real fix that allows this to properly in DRM has also finally been merged and is getting backported into stable branches, so dropping this now seems to be the right time to do so. [How] Drop the redundant check for old_fb != new_fb. With the proper fix in DRM, this should also fix some cursor stuttering issues with xf86-video-amdgpu since it double buffers the cursor. IGT tests that swap framebuffers (-varying-size for example) should also pass again. Signed-off-by: Nicholas Kazlauskas Acked-by: Alex Deucher Reviewed-by: David Francis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 94438117bcda..a833b23f34ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4544,20 +4544,10 @@ static int dm_plane_atomic_check(struct drm_plane *plane, static int dm_plane_atomic_async_check(struct drm_plane *plane, struct drm_plane_state *new_plane_state) { - struct drm_plane_state *old_plane_state = - drm_atomic_get_old_plane_state(new_plane_state->state, plane); - /* Only support async updates on cursor planes. */ if (plane->type != DRM_PLANE_TYPE_CURSOR) return -EINVAL; - /* - * DRM calls prepare_fb and cleanup_fb on new_plane_state for - * async commits so don't allow fb changes. - */ - if (old_plane_state->fb != new_plane_state->fb) - return -EINVAL; - return 0; } -- cgit v1.2.3 From 43d10d30df156f7834fa91aecb69614fefc8bb0a Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 31 Jul 2019 09:45:16 -0400 Subject: drm/amd/display: Skip determining update type for async updates [Why] By passing through the dm_determine_update_type_for_commit for atomic commits that can be done asynchronously we are incurring a performance penalty by locking access to the global private object and holding that access until the end of the programming sequence. This is also allocating a new large dc_state on every access in addition to retaining all the references on each stream and plane until the end of the programming sequence. [How] Shift the determination for async update before validation. Return early if it's going to be an async update. Signed-off-by: Nicholas Kazlauskas Acked-by: Alex Deucher Reviewed-by: David Francis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 27 +++++++++++++++++------ 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a833b23f34ab..511480df7e63 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7275,6 +7275,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + if (state->legacy_cursor_update) { + /* + * This is a fast cursor update coming from the plane update + * helper, check if it can be done asynchronously for better + * performance. + */ + state->async_update = + !drm_atomic_helper_async_check(dev, state); + + /* + * Skip the remaining global validation if this is an async + * update. Cursor updates can be done without affecting + * state or bandwidth calcs and this avoids the performance + * penalty of locking the private state object and + * allocating a new dc_state. + */ + if (state->async_update) + return 0; + } + /* Check scaling and underscan changes*/ /* TODO Removed scaling changes validation due to inability to commit * new stream into context w\o causing full reset. Need to @@ -7327,13 +7347,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = -EINVAL; goto fail; } - } else if (state->legacy_cursor_update) { - /* - * This is a fast cursor update coming from the plane update - * helper, check if it can be done asynchronously for better - * performance. - */ - state->async_update = !drm_atomic_helper_async_check(dev, state); } /* Must be success */ -- cgit v1.2.3 From bd200d190f45b62c006d1ad0a63eeffd87db7a47 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 31 Jul 2019 10:33:54 -0400 Subject: drm/amd/display: Don't replace the dc_state for fast updates [Why] DRM private objects have no hw_done/flip_done fencing mechanism on their own and cannot be used to sequence commits accordingly. When issuing commits that don't touch the same set of hardware resources like page-flips on different CRTCs we can run into the issue below because of this: 1. Client requests non-blocking Commit #1, has a new dc_state #1, state is swapped, commit tail is deferred to work queue 2. Client requests non-blocking Commit #2, has a new dc_state #2, state is swapped, commit tail is deferred to work queue 3. Commit #2 work starts, commit tail finishes, atomic state is cleared, dc_state #1 is freed 4. Commit #1 work starts, commit tail encounters null pointer deref on dc_state #1 In order to change the DC state as in the private object we need to ensure that we wait for all outstanding commits to finish and that any other pending commits must wait for the current one to finish as well. We do this for MEDIUM and FULL updates. But not for FAST updates, nor would we want to since it would cause stuttering from the delays. FAST updates that go through dm_determine_update_type_for_commit always create a new dc_state and lock the DRM private object if there are any changed planes. We need the old state to validate, but we don't actually need the new state here. [How] If the commit isn't a full update then the use after free can be resolved by simply discarding the new state entirely and retaining the existing one instead. With this change the sequence above can be reexamined. Commit #2 will still free Commit #1's reference, but before this happens we actually added an additional reference as part of Commit #2. If an update comes in during this that needs to change the dc_state it will need to wait on Commit #1 and Commit #2 to finish. Then it'll swap the state, finish the work in commit tail and drop the last reference on Commit #2's dc_state. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204181 Fixes: 004b3938e637 ("drm/amd/display: Check scaling info when determing update type") Signed-off-by: Nicholas Kazlauskas Acked-by: Alex Deucher Reviewed-by: David Francis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 511480df7e63..dca371a4407c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7347,6 +7347,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ret = -EINVAL; goto fail; } + } else { + /* + * The commit is a fast update. Fast updates shouldn't change + * the DC context, affect global validation, and can have their + * commit work done in parallel with other commits not touching + * the same resource. If we have a new DC context as part of + * the DM atomic state from validation we need to free it and + * retain the existing one instead. + */ + struct dm_atomic_state *new_dm_state, *old_dm_state; + + new_dm_state = dm_atomic_get_new_state(state); + old_dm_state = dm_atomic_get_old_state(state); + + if (new_dm_state && old_dm_state) { + if (new_dm_state->context) + dc_release_state(new_dm_state->context); + + new_dm_state->context = old_dm_state->context; + + if (old_dm_state->context) + dc_retain_state(old_dm_state->context); + } } /* Must be success */ -- cgit v1.2.3 From b94afb61cdaeaa86f77846fdec419eb5e9d9f537 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Fri, 2 Aug 2019 12:01:00 +0800 Subject: drm/amd/powerplay: honor hw limit on fetching metrics data for navi10 too frequently to update mertrics table will cause smu internal error. Signed-off-by: Kevin Wang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 56 ++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 106352a4fb82..3fbf622f9a87 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -516,6 +516,8 @@ static int navi10_store_powerplay_table(struct smu_context *smu) static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) { + struct smu_table_context *smu_table = &smu->smu_table; + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), @@ -530,9 +532,35 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + return 0; } +static int navi10_get_metrics_table(struct smu_context *smu, + SmuMetrics_t *metrics_table) +{ + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + static int navi10_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -612,15 +640,10 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) { - static SmuMetrics_t metrics; int ret = 0, clk_id = 0; + SmuMetrics_t metrics; - if (!value) - return -EINVAL; - - memset(&metrics, 0, sizeof(metrics)); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; @@ -908,8 +931,9 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value) if (!value) return -EINVAL; - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, - false); + ret = navi10_get_metrics_table(smu, &metrics); + if (ret) + return ret; if (ret) return ret; @@ -928,10 +952,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu, if (!value) return -EINVAL; - msleep(1); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; @@ -970,10 +991,9 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - memset(&metrics, 0, sizeof(metrics)); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); + if (ret) + return ret; if (ret) return ret; @@ -1326,7 +1346,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu, if (!value) return -EINVAL; - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; -- cgit v1.2.3 From a55c8d7bda4f83e86e2b7ed7b1704e762ed50db3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 29 Jul 2019 10:28:57 +0800 Subject: drm/amdgpu: remove the clear of MCA_ADDR clearing MCA_STATUS is enough to reset the whole MCA, writing zero to MCA_ADDR is unnecessary Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index bff1a12f2cc9..035e4fea472c 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -229,8 +229,6 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, /* clear umc status */ WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL); - /* clear error address register */ - WREG64_PCIE(smnMCA_UMC0_MCUMC_ADDRT0 + umc_reg_offset * 4, 0x0ULL); } } -- cgit v1.2.3 From 33b97cf896d4ec6c05e8febdf73ee30c508a0481 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 29 Jul 2019 14:10:54 +0800 Subject: drm/amdgpu: add more parameters and functions to amdgpu_umc structure expose more parameters and functions of specific umc version to common umc layer, so amdgpu_umc layer and other blocks could access them Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/umc_v6_1.h | 2 ++ 2 files changed, 15 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index dfa1a39e57af..2604f5076867 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -22,15 +22,28 @@ #define __AMDGPU_UMC_H__ struct amdgpu_umc_funcs { + void (*ras_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); + void (*enable_umc_index_mode)(struct amdgpu_device *adev, + uint32_t umc_instance); + void (*disable_umc_index_mode)(struct amdgpu_device *adev); }; struct amdgpu_umc { /* max error count in one ras query call */ uint32_t max_ras_err_cnt_per_query; + /* number of umc channel instance with memory map register access */ + uint32_t channel_inst_num; + /* number of umc instance with memory map register access */ + uint32_t umc_inst_num; + /* UMC regiser per channel offset */ + uint32_t channel_offs; + /* channel index table of interleaved memory */ + const uint32_t *channel_idx_tbl; + const struct amdgpu_umc_funcs *funcs; }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index d25ae414f4d8..bddaf14a77f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -31,6 +31,8 @@ #define UMC_V6_1_CHANNEL_INSTANCE_NUM 4 /* number of umc instance with memory map register access */ #define UMC_V6_1_UMC_INSTANCE_NUM 8 +/* total channel instances in one umc block */ +#define UMC_V6_1_TOTAL_CHANNEL_NUM (UMC_V6_1_CHANNEL_INSTANCE_NUM * UMC_V6_1_UMC_INSTANCE_NUM) /* UMC regiser per channel offset */ #define UMC_V6_1_PER_CHANNEL_OFFSET 0x800 -- cgit v1.2.3 From 3aacf4ea1102f24c8dc63eb6f3d734cbc8bad86e Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 29 Jul 2019 14:28:35 +0800 Subject: drm/amdgpu: initialize new parameters and functions for amdgpu_umc structure add initialization for new members of amdgpu_umc structure Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 10 +++++++++- drivers/gpu/drm/amd/amdgpu/umc_v6_1.h | 3 +++ 3 files changed, 17 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7c4d9d99c6d1..24387026fdee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -635,8 +635,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA20: - adev->umc.max_ras_err_cnt_per_query = - UMC_V6_1_UMC_INSTANCE_NUM * UMC_V6_1_CHANNEL_INSTANCE_NUM; + adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; + adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; + adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; + adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET; + adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; adev->umc.funcs = &umc_v6_1_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 035e4fea472c..9ba015d7eb57 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -41,7 +41,7 @@ /* offset in 256B block */ #define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) -static uint32_t +const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { {2, 18, 11, 27}, {4, 20, 13, 29}, {1, 17, 8, 24}, {7, 23, 14, 30}, @@ -235,7 +235,15 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, umc_v6_1_disable_umc_index_mode(adev); } +static void umc_v6_1_ras_init(struct amdgpu_device *adev) +{ + +} + const struct amdgpu_umc_funcs umc_v6_1_funcs = { + .ras_init = umc_v6_1_ras_init, .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, + .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, + .disable_umc_index_mode = umc_v6_1_disable_umc_index_mode, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index bddaf14a77f9..ad4598c0e495 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -24,6 +24,7 @@ #define __UMC_V6_1_H__ #include "soc15_common.h" +#include "amdgpu.h" /* HBM Memory Channel Width */ #define UMC_V6_1_HBM_MEMORY_CHANNEL_WIDTH 128 @@ -37,5 +38,7 @@ #define UMC_V6_1_PER_CHANNEL_OFFSET 0x800 extern const struct amdgpu_umc_funcs umc_v6_1_funcs; +extern const uint32_t + umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM]; #endif -- cgit v1.2.3 From fee858ba5f96e6cbcbe8167444aeb6532519bb8c Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 29 Jul 2019 14:50:35 +0800 Subject: drm/amdgpu: add macro of umc for each channel common function for all umc versions, loop for each umc channel is a frequent used operation in umc block, define it as a macro to simplify code Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 2604f5076867..9efdd66279e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -21,6 +21,29 @@ #ifndef __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__ +/* + * void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data, + * uint32_t umc_reg_offset, uint32_t channel_index) + */ +#define amdgpu_umc_for_each_channel(func) \ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \ + uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \ + for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \ + /* enable the index mode to query eror count per channel */ \ + adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \ + for (channel_inst = 0; \ + channel_inst < adev->umc.channel_inst_num; \ + channel_inst++) { \ + /* calc the register offset according to channel instance */ \ + umc_reg_offset = adev->umc.channel_offs * channel_inst; \ + /* get channel index of interleaved memory */ \ + channel_index = adev->umc.channel_idx_tbl[ \ + umc_inst * adev->umc.channel_inst_num + channel_inst]; \ + (func)(adev, err_data, umc_reg_offset, channel_index); \ + } \ + } \ + adev->umc.funcs->disable_umc_index_mode(adev); + struct amdgpu_umc_funcs { void (*ras_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, -- cgit v1.2.3 From 2b671b6049efafc7ae6de9f67acb964b9c534f3a Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 1 Aug 2019 11:37:25 +0800 Subject: drm/amdgpu: apply umc_for_each_channel macro to umc_6_1 use umc_for_each_channel to make code simpler Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 84 ++++++++++++----------------------- 1 file changed, 28 insertions(+), 56 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 9ba015d7eb57..5747a0252624 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -142,46 +142,39 @@ static void umc_v6_1_querry_uncorrectable_error_count(struct amdgpu_device *adev *error_count += 1; } +static void umc_v6_1_query_error_count(struct amdgpu_device *adev, + struct ras_err_data *err_data, uint32_t umc_reg_offset, + uint32_t channel_index) +{ + umc_v6_1_query_correctable_error_count(adev, umc_reg_offset, + &(err_data->ce_count)); + umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, + &(err_data->ue_count)); +} + static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t umc_inst, channel_inst, umc_reg_offset, mc_umc_status_addr; - - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); - - for (umc_inst = 0; umc_inst < UMC_V6_1_UMC_INSTANCE_NUM; umc_inst++) { - /* enable the index mode to query eror count per channel */ - umc_v6_1_enable_umc_index_mode(adev, umc_inst); - for (channel_inst = 0; channel_inst < UMC_V6_1_CHANNEL_INSTANCE_NUM; channel_inst++) { - /* calc the register offset according to channel instance */ - umc_reg_offset = UMC_V6_1_PER_CHANNEL_OFFSET * channel_inst; - umc_v6_1_query_correctable_error_count(adev, umc_reg_offset, - &(err_data->ce_count)); - umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, - &(err_data->ue_count)); - /* clear umc status */ - WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL); - } - } - umc_v6_1_disable_umc_index_mode(adev); + amdgpu_umc_for_each_channel(umc_v6_1_query_error_count); } static void umc_v6_1_query_error_address(struct amdgpu_device *adev, - uint32_t umc_reg_offset, uint32_t channel_index, - struct ras_err_data *err_data) + struct ras_err_data *err_data, + uint32_t umc_reg_offset, uint32_t channel_index) { - uint32_t lsb; + uint32_t lsb, mc_umc_status_addr; uint64_t mc_umc_status, err_addr; - uint32_t mc_umc_status_addr; + + mc_umc_status_addr = + SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); /* skip error address process if -ENOMEM */ - if (!err_data->err_addr) + if (!err_data->err_addr) { + /* clear umc status */ + WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL); return; + } - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); mc_umc_status = RREG64(mc_umc_status_addr + umc_reg_offset); /* calculate error address if ue/ce error is detected */ @@ -197,42 +190,21 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, /* translate umc channel address to soc pa, 3 parts are included */ err_data->err_addr[err_data->err_addr_cnt] = - ADDR_OF_8KB_BLOCK(err_addr) - | ADDR_OF_256B_BLOCK(channel_index) - | OFFSET_IN_256B_BLOCK(err_addr); + ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); err_data->err_addr_cnt++; } + + /* clear umc status */ + WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL); } static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, void *ras_error_status) { - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t umc_inst, channel_inst, umc_reg_offset; - uint32_t channel_index, mc_umc_status_addr; - - mc_umc_status_addr = - SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); - - for (umc_inst = 0; umc_inst < UMC_V6_1_UMC_INSTANCE_NUM; umc_inst++) { - /* enable the index mode to query eror count per channel */ - umc_v6_1_enable_umc_index_mode(adev, umc_inst); - for (channel_inst = 0; channel_inst < UMC_V6_1_CHANNEL_INSTANCE_NUM; channel_inst++) { - /* calc the register offset according to channel instance */ - umc_reg_offset = UMC_V6_1_PER_CHANNEL_OFFSET * channel_inst; - /* get channel index of interleaved memory */ - channel_index = umc_v6_1_channel_idx_tbl[umc_inst][channel_inst]; - - umc_v6_1_query_error_address(adev, umc_reg_offset, - channel_index, err_data); - - /* clear umc status */ - WREG64(mc_umc_status_addr + umc_reg_offset, 0x0ULL); - } - } - - umc_v6_1_disable_umc_index_mode(adev); + amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); } static void umc_v6_1_ras_init(struct amdgpu_device *adev) -- cgit v1.2.3 From 13b7c46c18e26182450d01efd0988e5c9fa8260f Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 1 Aug 2019 11:41:39 +0800 Subject: drm/amdgpu: add error address query for umc ras umc error address query can get ce/ue error address and clear error status Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 5 +++++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index e15fedb0ce73..2a4878646b73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -601,6 +601,11 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, case AMDGPU_RAS_BLOCK__UMC: if (adev->umc.funcs->query_ras_error_count) adev->umc.funcs->query_ras_error_count(adev, &err_data); + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + if (adev->umc.funcs->query_ras_error_address) + adev->umc.funcs->query_ras_error_address(adev, &err_data); break; case AMDGPU_RAS_BLOCK__GFX: if (adev->gfx.funcs->query_ras_error_count) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 24387026fdee..c7647c6988df 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -249,6 +249,11 @@ static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); if (adev->umc.funcs->query_ras_error_count) adev->umc.funcs->query_ras_error_count(adev, err_data); + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + if (adev->umc.funcs->query_ras_error_address) + adev->umc.funcs->query_ras_error_address(adev, err_data); amdgpu_ras_reset_gpu(adev, 0); return AMDGPU_RAS_UE; } -- cgit v1.2.3 From 51437623a0a14a5af74aee144a0f2d5790ab1420 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 29 Jul 2019 16:04:33 +0800 Subject: drm/amdgpu: support ce interrupt in ras module correctable error can also trigger interrupt in some ras blocks Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2a4878646b73..d4c084711daf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1049,12 +1049,12 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) * the error. */ if (ret == AMDGPU_RAS_UE) { + /* these counts could be left as 0 if + * some blocks do not count error number + */ obj->err_data.ue_count += err_data.ue_count; + obj->err_data.ce_count += err_data.ce_count; } - /* Might need get ce count by register, but not all IP - * saves ce count, some IP just use one bit or two bits - * to indicate ce happened. - */ } } } @@ -1551,6 +1551,10 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (amdgpu_ras_fs_init(adev)) goto fs_out; + /* ras init for each ras block */ + if (adev->umc.funcs->ras_init) + adev->umc.funcs->ras_init(adev); + DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); -- cgit v1.2.3 From b7f92097f5bc5129cb386340ec54e6f40639d6e3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 29 Jul 2019 17:01:39 +0800 Subject: drm/amdgpu: implement umc ras init function enable umc ce interrupt and initialize ecc error count Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 32 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/umc_v6_1.h | 7 +++++++ 2 files changed, 39 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 5747a0252624..0ab2e96b4f77 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -207,9 +207,41 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); } +static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, uint32_t channel_index) +{ + uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; + uint32_t ecc_err_cnt_addr; + + ecc_err_cnt_sel_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); + ecc_err_cnt_addr = + SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt); + + /* select the lower chip and check the error count */ + ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 0); + /* set ce error interrupt type to APIC based interrupt */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, + EccErrInt, 0x1); + WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); + /* set error count to initial value */ + WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); + + /* select the higher chip and check the err counter */ + ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, + EccErrCntCsSel, 1); + WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); + WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); +} + static void umc_v6_1_ras_init(struct amdgpu_device *adev) { + void *ras_error_status = NULL; + amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel); } const struct amdgpu_umc_funcs umc_v6_1_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index ad4598c0e495..dab9cbd292c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -37,6 +37,13 @@ /* UMC regiser per channel offset */ #define UMC_V6_1_PER_CHANNEL_OFFSET 0x800 +/* EccErrCnt max value */ +#define UMC_V6_1_CE_CNT_MAX 0xffff +/* umc ce interrupt threshold */ +#define UMC_V6_1_CE_INT_THRESHOLD 0xffff +/* umc ce count initial value */ +#define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD) + extern const struct amdgpu_umc_funcs umc_v6_1_funcs; extern const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM]; -- cgit v1.2.3 From b1a5895352dc1a154f1605702745ef2f63a5d797 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 29 Jul 2019 17:19:57 +0800 Subject: drm/amdgpu: update the calc algorithm of umc ecc error count the initial value of ecc error count can be adjusted Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 0ab2e96b4f77..64df37b860dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -98,9 +98,10 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); *error_count += - REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt); + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - + UMC_V6_1_CE_CNT_INIT); /* clear the lower chip err count */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, 0); + WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); /* select the higher chip and check the err counter */ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, @@ -108,9 +109,10 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); *error_count += - REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt); + (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - + UMC_V6_1_CE_CNT_INIT); /* clear the higher chip err count */ - WREG32(ecc_err_cnt_addr + umc_reg_offset, 0); + WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); /* check for SRAM correctable error MCUMC_STATUS is a 64 bit register */ -- cgit v1.2.3 From 91ba68f8d5727adb1c17d039af34af0f8297dff3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 1 Aug 2019 12:52:54 +0800 Subject: drm/amdgpu: only uncorrectable error needs gpu reset we only read error information for correctable error in interrupt handler, gpu reset is unnecessary since there is no data lost in correctable error Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c7647c6988df..a3575522f83d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -254,7 +254,11 @@ static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, */ if (adev->umc.funcs->query_ras_error_address) adev->umc.funcs->query_ras_error_address(adev, err_data); - amdgpu_ras_reset_gpu(adev, 0); + + /* only uncorrectable error needs gpu reset */ + if (err_data->ue_count) + amdgpu_ras_reset_gpu(adev, 0); + return AMDGPU_RAS_UE; } -- cgit v1.2.3 From bd2280da46dbdbc3d70dc538f0613afb6fcc4efa Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 1 Aug 2019 17:30:35 +0800 Subject: drm/amdgpu: replace AMDGPU_RAS_UE with AMDGPU_RAS_SUCCESS ce can also trigger interrupt, and even both ce and ue error can be found in one ras query, distinguishing between ce and ue in interrupt handler is uncessary. Signed-off-by: Tao Zhou Suggested-by: Guchun Chen Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index d4c084711daf..709d22912381 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1048,7 +1048,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) * But leave IP do that recovery, here we just dispatch * the error. */ - if (ret == AMDGPU_RAS_UE) { + if (ret == AMDGPU_RAS_SUCCESS) { /* these counts could be left as 0 if * some blocks do not count error number */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c7910be64bf1..206ef9f528ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -5654,7 +5654,7 @@ static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, if (adev->gfx.funcs->query_ras_error_count) adev->gfx.funcs->query_ras_error_count(adev, err_data); amdgpu_ras_reset_gpu(adev, 0); - return AMDGPU_RAS_UE; + return AMDGPU_RAS_SUCCESS; } static const struct { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index a3575522f83d..0c77b9f244bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -259,7 +259,7 @@ static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, if (err_data->ue_count) amdgpu_ras_reset_gpu(adev, 0); - return AMDGPU_RAS_UE; + return AMDGPU_RAS_SUCCESS; } static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 30153c36efd6..5a73966b7d27 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1985,7 +1985,7 @@ static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, amdgpu_ras_reset_gpu(adev, 0); - return AMDGPU_RAS_UE; + return AMDGPU_RAS_SUCCESS; } static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev, -- cgit v1.2.3 From 839f9117e1c27593245fb2af952a81d11a13b7c9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 30 Jul 2019 16:39:45 +0800 Subject: drm/amd/powerplay: guard consistency between CPU copy and local VRAM This can prevent CPU to use the out-dated copy. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 ++++ drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 4 ++++ drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 4 ++++ drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c | 4 ++++ drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c | 8 ++++++++ 5 files changed, 24 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 4b9d51231a46..0a20279a5ff8 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -451,6 +451,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int void *table_data, bool drv2smu) { struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; struct smu_table *table = NULL; int ret = 0; int table_id = smu_table_get_index(smu, table_index); @@ -478,6 +479,9 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int if (ret) return ret; + /* flush hdp cache */ + adev->nbio_funcs->hdp_flush(adev, NULL); + if (!drv2smu) memcpy(table_data, table->cpu_addr, table->size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 7fb3e57cfc41..3f12cf341511 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -118,6 +118,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, { struct smu10_smumgr *priv = (struct smu10_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL;); @@ -135,6 +136,9 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id); + /* flush hdp cache */ + adev->nbio_funcs->hdp_flush(adev, NULL); + memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 967d34b1dc51..0dbdde69f2d9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -39,6 +39,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, uint8_t *table, int16_t table_id) { struct vega10_smumgr *priv = hwmgr->smu_backend; + struct amdgpu_device *adev = hwmgr->adev; PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, "Invalid SMU Table ID!", return -EINVAL); @@ -56,6 +57,9 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, PPSMC_MSG_TransferTableSmu2Dram, priv->smu_tables.entry[table_id].table_id); + /* flush hdp cache */ + adev->nbio_funcs->hdp_flush(adev, NULL); + memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index bab3df85fdcd..f9589806bf83 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -42,6 +42,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, { struct vega12_smumgr *priv = (struct vega12_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, "Invalid SMU Table ID!", return -EINVAL); @@ -64,6 +65,9 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return -EINVAL); + /* flush hdp cache */ + adev->nbio_funcs->hdp_flush(adev, NULL); + memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index 957446cf467e..3e97b83950dc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -163,6 +163,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; int ret = 0; PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, @@ -187,6 +188,9 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", return ret); + /* flush hdp cache */ + adev->nbio_funcs->hdp_flush(adev, NULL); + memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -266,6 +270,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); + struct amdgpu_device *adev = hwmgr->adev; int ret = 0; PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, @@ -284,6 +289,9 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, "[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!", return ret); + /* flush hdp cache */ + adev->nbio_funcs->hdp_flush(adev, NULL); + memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); -- cgit v1.2.3 From 7aa3f675d1439bb0ca68505b1eab06f75891e81b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Jul 2019 15:17:27 +0800 Subject: drm/amd/powerplay: support power profile retrieval and setting on arcturus Enable arcturus power profile retrieval and setting. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 74 ++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 215f7173fca8..896fff2a446d 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -1371,6 +1371,78 @@ static int arcturus_get_power_limit(struct smu_context *smu, return 0; } +static int arcturus_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + static const char *profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM"}; + uint32_t i, size = 0; + int16_t workload_type = 0; + + if (!smu->pm_enabled || !buf) + return -EINVAL; + + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* + * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT + * Not all profile modes are supported on arcturus. + */ + workload_type = smu_workload_get_type(smu, i); + if (workload_type < 0) + continue; + + size += sprintf(buf + size, "%2d %14s%s\n", + i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + } + + return size; +} + +static int arcturus_set_power_profile_mode(struct smu_context *smu, + long *input, + uint32_t size) +{ + int workload_type = 0; + uint32_t profile_mode = input[size]; + int ret = 0; + + if (!smu->pm_enabled) + return -EINVAL; + + if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + pr_err("Invalid power profile mode %d\n", profile_mode); + return -EINVAL; + } + + /* + * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT + * Not all profile modes are supported on arcturus. + */ + workload_type = smu_workload_get_type(smu, profile_mode); + if (workload_type < 0) { + pr_err("Unsupported power profile mode %d on arcturus\n", profile_mode); + return -EINVAL; + } + + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + 1 << workload_type); + if (ret) { + pr_err("Fail to set workload type %d\n", workload_type); + return ret; + } + + smu->power_profile_mode = profile_mode; + + return 0; +} + static void arcturus_dump_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -1834,6 +1906,8 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .force_dpm_limit_value = arcturus_force_dpm_limit_value, .unforce_dpm_levels = arcturus_unforce_dpm_levels, .get_profiling_clk_mask = arcturus_get_profiling_clk_mask, + .get_power_profile_mode = arcturus_get_power_profile_mode, + .set_power_profile_mode = arcturus_set_power_profile_mode, /* debug (internal used) */ .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, -- cgit v1.2.3 From 4abc1765d2f74f706fdae067f3aec5c0778154d6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 31 Jul 2019 10:34:36 +0800 Subject: drm/amd/powerplay: enable SW SMU power profile switch support in KFD Hook up the SW SMU power profile switch in KFD routine. Signed-off-by: Evan Quan Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 8 ++++-- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 36 ++++++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 3 +++ 3 files changed, 45 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0640fcdab557..f1cba95b1b0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -655,8 +655,12 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->switch_power_profile) + if (is_support_sw_smu(adev)) + smu_switch_power_profile(&adev->smu, + PP_SMC_POWER_PROFILE_COMPUTE, + !idle); + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->switch_power_profile) amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 0a20279a5ff8..07d0f0c5d2c1 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1677,6 +1677,42 @@ int smu_handle_task(struct smu_context *smu, return ret; } +int smu_switch_power_profile(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE type, + bool en) +{ + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + long workload; + uint32_t index; + + if (!smu->pm_enabled) + return -EINVAL; + + if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) + return -EINVAL; + + mutex_lock(&smu->mutex); + + if (!en) { + smu->workload_mask &= ~(1 << smu->workload_prority[type]); + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + workload = smu->workload_setting[index]; + } else { + smu->workload_mask |= (1 << smu->workload_prority[type]); + index = fls(smu->workload_mask); + index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + workload = smu->workload_setting[index]; + } + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + smu_set_power_profile_mode(smu, &workload, 0); + + mutex_unlock(&smu->mutex); + + return 0; +} + enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) { struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 1ecd73cd768c..d3a478471cd2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -787,6 +787,9 @@ extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, b extern int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, enum amd_pp_task task_id); +int smu_switch_power_profile(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE type, + bool en); int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version); int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, uint16_t level, uint32_t *value); -- cgit v1.2.3 From 706e50829eb7f667f259c83629bdf9e6c992772b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 2 Aug 2019 16:38:32 +0800 Subject: drm/amd/powerplay: correct navi10 vcn powergate vcn dpm on is a prerequisite for vcn power gate control. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 +++- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 + drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 23 +++++++++++++++-------- 3 files changed, 19 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 07d0f0c5d2c1..00651118102c 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -409,6 +409,8 @@ int smu_get_power_num_states(struct smu_context *smu, int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; switch (sensor) { @@ -433,7 +435,7 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *size = 4; break; case AMDGPU_PP_SENSOR_VCN_POWER_STATE: - *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1; *size = 4; break; default: diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index d3a478471cd2..f813072ab9e4 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -277,6 +277,7 @@ struct smu_dpm_context { struct smu_power_gate { bool uvd_gated; bool vce_gated; + bool vcn_gated; }; struct smu_power_context { diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 3fbf622f9a87..d62c2784b102 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -619,20 +619,27 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; + /* vcn dpm on is a prerequisite for vcn power gate messages */ + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; + } + power_gate->vcn_gated = false; } else { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; + } + power_gate->vcn_gated = true; } - ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); - return ret; } -- cgit v1.2.3 From c0dac3c9f5458c623fab7856f24c258303d15cfb Mon Sep 17 00:00:00 2001 From: John Clements Date: Wed, 31 Jul 2019 16:11:38 +0800 Subject: drm/amdgpu: removed duplicate line Remove duplicate break. Signed-off-by: John Clements Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 206ef9f528ee..21c120c52fdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1334,7 +1334,6 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) else chip_name = "raven"; break; - break; case CHIP_ARCTURUS: chip_name = "arcturus"; break; -- cgit v1.2.3 From f36d9ab95f0f775fd21d3b8dc4e15fd34eda8004 Mon Sep 17 00:00:00 2001 From: John Clements Date: Thu, 1 Aug 2019 18:05:50 +0800 Subject: drm/amdgpu: add PSP SW init support for Arcturus Add arcturus cases to psp init sewquence. Signed-off-by: John Clements Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 + drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 1f9105a6c050..24fefd19d410 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -53,6 +53,7 @@ static int psp_early_init(void *handle) psp->autoload_supported = false; break; case CHIP_VEGA20: + case CHIP_ARCTURUS: psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = false; break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 2ccd9489a41d..40f19718c515 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/navi10_sos.bin"); MODULE_FIRMWARE("amdgpu/navi10_asd.bin"); MODULE_FIRMWARE("amdgpu/navi14_sos.bin"); MODULE_FIRMWARE("amdgpu/navi14_asd.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -164,6 +166,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) break; case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_ARCTURUS: break; default: BUG(); -- cgit v1.2.3 From dc0d962297af444fa45ecea077e4ae53d7518719 Mon Sep 17 00:00:00 2001 From: John Clements Date: Mon, 22 Jul 2019 18:06:58 +0800 Subject: drm/amdgpu: add PSP KDB loading support for Arcturus Add support for the arcturus specific psp metadata to the amdgpu firmware and properly parse it when loading it. Signed-off-by: John Clements Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 10 ++++++++++ 3 files changed, 29 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index df5ebf72a979..9e974ce26378 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -269,6 +269,16 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr) DRM_DEBUG("kdb_size_bytes: %u\n", le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes)); } + if (version_minor == 2) { + const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 = + container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0); + DRM_DEBUG("kdb_header_version: %u\n", + le32_to_cpu(psp_hdr_v1_2->kdb_header_version)); + DRM_DEBUG("kdb_offset_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_2->kdb_offset_bytes)); + DRM_DEBUG("kdb_size_bytes: %u\n", + le32_to_cpu(psp_hdr_v1_2->kdb_size_bytes)); + } } else { DRM_ERROR("Unknown PSP ucode version: %u.%u\n", version_major, version_minor); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 2be106e81eda..4f1b167a9394 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -90,6 +90,15 @@ struct psp_firmware_header_v1_1 { uint32_t kdb_size_bytes; }; +/* version_major=1, version_minor=2 */ +struct psp_firmware_header_v1_2 { + struct psp_firmware_header_v1_0 v1_0; + uint32_t reserve[3]; + uint32_t kdb_header_version; + uint32_t kdb_offset_bytes; + uint32_t kdb_size_bytes; +}; + /* version_major=1, version_minor=0 */ struct ta_firmware_header_v1_0 { struct common_firmware_header header; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 40f19718c515..38956e41718b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -64,6 +64,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) int err = 0; const struct psp_firmware_header_v1_0 *sos_hdr; const struct psp_firmware_header_v1_1 *sos_hdr_v1_1; + const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; const struct psp_firmware_header_v1_0 *asd_hdr; const struct ta_firmware_header_v1_0 *ta_hdr; @@ -79,6 +80,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_NAVI14: chip_name = "navi14"; break; + case CHIP_ARCTURUS: + chip_name = "arcturus"; + break; default: BUG(); } @@ -114,6 +118,12 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); } + if (sos_hdr->header.header_version_minor == 2) { + sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); + adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + + le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); + } break; default: dev_err(adev->dev, -- cgit v1.2.3 From 6c37bde9c63b4116913fd79abbb2e5abaeb52e23 Mon Sep 17 00:00:00 2001 From: John Clements Date: Thu, 25 Jul 2019 15:29:11 +0800 Subject: drm/amdgpu: update PSP CMD fail response status print Print the response in hex with the apprpriate mask. Signed-off-by: John Clements Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 24fefd19d410..18454ff64db2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -164,8 +164,8 @@ psp_cmd_submit_buf(struct psp_context *psp, if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); - DRM_WARN("psp command failed and response status is (%d)\n", - psp->cmd_buf_mem->resp.status); + DRM_WARN("psp command failed and response status is (0x%X)\n", + psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); if (!timeout) { mutex_unlock(&psp->mutex); return -EINVAL; -- cgit v1.2.3 From 8fda90e8214413cc7f9e3c64ad0df7eddffcff62 Mon Sep 17 00:00:00 2001 From: John Clements Date: Wed, 31 Jul 2019 16:11:08 +0800 Subject: drm/amdgpu: disable MEC2 JT context init for Arcturus We don't need to handle it like other asics. Signed-off-by: John Clements Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 21c120c52fdd..0951b91180c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1286,11 +1286,17 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; - info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; - info->fw = adev->gfx.mec2_fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); + + /* TODO: Determine if MEC2 JT FW loading can be removed + for all GFX V9 asic and above */ + if (adev->asic_type != CHIP_ARCTURUS) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; + info->fw = adev->gfx.mec2_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, + PAGE_SIZE); + } } } -- cgit v1.2.3 From b86f8d8b2bc07b0f6802e9c6b481049a63f4a637 Mon Sep 17 00:00:00 2001 From: John Clements Date: Thu, 1 Aug 2019 17:59:55 +0800 Subject: drm/amdgpu: extend PSP FW loading support to 8 SDMA instances Arcturus has 8 instances of SDMA. Update host to PSP interface to handle it. Signed-off-by: John Clements Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 27 ++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 6 ++++++ drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 11 +++++++++-- 3 files changed, 41 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 18454ff64db2..314e7cbabc87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -833,7 +833,6 @@ static int psp_hw_start(struct psp_context *psp) "XGMI: Failed to initialize XGMI session\n"); } - if (psp->adev->psp.ta_fw) { ret = psp_ras_initialize(psp); if (ret) @@ -854,6 +853,24 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, case AMDGPU_UCODE_ID_SDMA1: *type = GFX_FW_TYPE_SDMA1; break; + case AMDGPU_UCODE_ID_SDMA2: + *type = GFX_FW_TYPE_SDMA2; + break; + case AMDGPU_UCODE_ID_SDMA3: + *type = GFX_FW_TYPE_SDMA3; + break; + case AMDGPU_UCODE_ID_SDMA4: + *type = GFX_FW_TYPE_SDMA4; + break; + case AMDGPU_UCODE_ID_SDMA5: + *type = GFX_FW_TYPE_SDMA5; + break; + case AMDGPU_UCODE_ID_SDMA6: + *type = GFX_FW_TYPE_SDMA6; + break; + case AMDGPU_UCODE_ID_SDMA7: + *type = GFX_FW_TYPE_SDMA7; + break; case AMDGPU_UCODE_ID_CP_CE: *type = GFX_FW_TYPE_CP_CE; break; @@ -982,12 +999,20 @@ out: if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC && (psp_smu_reload_quirk(psp) || psp->autoload_supported)) continue; + if (amdgpu_sriov_vf(adev) && (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 + || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) /*skip ucode loading in SRIOV VF */ continue; + if (psp->autoload_supported && (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT || ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 4f1b167a9394..b34f00d42049 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -271,6 +271,12 @@ union amdgpu_firmware_header { enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_SDMA0 = 0, AMDGPU_UCODE_ID_SDMA1, + AMDGPU_UCODE_ID_SDMA2, + AMDGPU_UCODE_ID_SDMA3, + AMDGPU_UCODE_ID_SDMA4, + AMDGPU_UCODE_ID_SDMA5, + AMDGPU_UCODE_ID_SDMA6, + AMDGPU_UCODE_ID_SDMA7, AMDGPU_UCODE_ID_CP_CE, AMDGPU_UCODE_ID_CP_PFP, AMDGPU_UCODE_ID_CP_ME, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 5080a73a95a5..74a9fe8e0cfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -233,8 +233,15 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_RLCP_CAM = 46, /* RLCP CAM NV */ GFX_FW_TYPE_RLC_SPP_CAM_EXT = 47, /* RLC SPP CAM EXT NV */ GFX_FW_TYPE_RLX6_DRAM_BOOT = 48, /* RLX6 DRAM BOOT NV */ - GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV */ - GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV */ + GFX_FW_TYPE_VCN0_RAM = 49, /* VCN_RAM NV + RN */ + GFX_FW_TYPE_VCN1_RAM = 50, /* VCN_RAM NV + RN */ + GFX_FW_TYPE_DMUB = 51, /* DMUB RN */ + GFX_FW_TYPE_SDMA2 = 52, /* SDMA2 MI */ + GFX_FW_TYPE_SDMA3 = 53, /* SDMA3 MI */ + GFX_FW_TYPE_SDMA4 = 54, /* SDMA4 MI */ + GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */ + GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */ + GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */ GFX_FW_TYPE_MAX }; -- cgit v1.2.3 From 8c2ef8ca0e0ea6a5b10b52a5eac8f3e7db0b663e Mon Sep 17 00:00:00 2001 From: John Clements Date: Thu, 1 Aug 2019 15:16:12 +0800 Subject: drm/amdgpu: update SDMA V4 microcode init Removed loading duplicate instances of SDMA FW for Arcturus. We use a single image for all instances. Signed-off-by: John Clements Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 110 ++++++++++++++++++++++----------- 1 file changed, 75 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 5a73966b7d27..c04259182614 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -68,13 +68,6 @@ MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); MODULE_FIRMWARE("amdgpu/picasso_sdma.bin"); MODULE_FIRMWARE("amdgpu/raven2_sdma.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_sdma1.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_sdma2.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_sdma3.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_sdma4.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_sdma5.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_sdma6.bin"); -MODULE_FIRMWARE("amdgpu/arcturus_sdma7.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L @@ -379,6 +372,43 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) } } +static int sdma_v4_0_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst) +{ + int err = 0; + const struct sdma_firmware_header_v1_0 *hdr; + + err = amdgpu_ucode_validate(sdma_inst->fw); + if (err) + return err; + + hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data; + sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version); + sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version); + + if (sdma_inst->feature_version >= 20) + sdma_inst->burst_nop = true; + + return 0; +} + +static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->sdma.num_instances; i++) { + if (adev->sdma.instance[i].fw != NULL) + release_firmware(adev->sdma.instance[i].fw); + + /* arcturus shares the same FW memory across + all SDMA isntances */ + if (adev->asic_type == CHIP_ARCTURUS) + break; + } + + memset((void*)adev->sdma.instance, 0, + sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES); +} + /** * sdma_v4_0_init_microcode - load ucode images from disk * @@ -398,7 +428,6 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) int err = 0, i; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; - const struct sdma_firmware_header_v1_0 *hdr; DRM_DEBUG("\n"); @@ -427,26 +456,42 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) BUG(); } - for (i = 0; i < adev->sdma.num_instances; i++) { - if (i == 0) - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); - else + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); + + err = request_firmware(&adev->sdma.instance[0].fw, fw_name, adev->dev); + if (err) + goto out; + + err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[0]); + if (err) + goto out; + + for (i = 1; i < adev->sdma.num_instances; i++) { + if (adev->asic_type == CHIP_ARCTURUS) { + /* Acturus will leverage the same FW memory + for every SDMA instance */ + memcpy((void*)&adev->sdma.instance[i], + (void*)&adev->sdma.instance[0], + sizeof(struct amdgpu_sdma_instance)); + } + else { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i); - err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); - if (err) - goto out; - hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; - adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); - adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); - if (adev->sdma.instance[i].feature_version >= 20) - adev->sdma.instance[i].burst_nop = true; - DRM_DEBUG("psp_load == '%s'\n", - adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); - - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + + err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); + if (err) + goto out; + + err = sdma_v4_0_init_inst_ctx(&adev->sdma.instance[i]); + if (err) + goto out; + } + } + + DRM_DEBUG("psp_load == '%s'\n", + adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + for (i = 0; i < adev->sdma.num_instances; i++) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; info->fw = adev->sdma.instance[i].fw; @@ -455,13 +500,11 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); } } + out: if (err) { DRM_ERROR("sdma_v4_0: Failed to load firmware \"%s\"\n", fw_name); - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + sdma_v4_0_destroy_inst_ctx(adev); } return err; } @@ -1814,10 +1857,7 @@ static int sdma_v4_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->sdma.instance[i].page); } - for (i = 0; i < adev->sdma.num_instances; i++) { - release_firmware(adev->sdma.instance[i].fw); - adev->sdma.instance[i].fw = NULL; - } + sdma_v4_0_destroy_inst_ctx(adev); return 0; } -- cgit v1.2.3 From 6d622903285a878e1177981fb952e662e0f416a0 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 14 May 2019 15:18:19 +0800 Subject: drm/amdgpu: add ip offset header for navi12 (v2) This adds the absolute offsets of each IP regiser block. v2: Squash in MP1 update Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/navi12_ip_offset.h | 1119 ++++++++++++++++++++++++ 1 file changed, 1119 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/navi12_ip_offset.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/navi12_ip_offset.h b/drivers/gpu/drm/amd/include/navi12_ip_offset.h new file mode 100644 index 000000000000..229e8fddfcc1 --- /dev/null +++ b/drivers/gpu/drm/amd/include/navi12_ip_offset.h @@ -0,0 +1,1119 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _navi10_ip_offset_HEADER +#define _navi10_ip_offset_HEADER + +#define MAX_INSTANCE 7 +#define MAX_SEGMENT 5 + + +struct IP_BASE_INSTANCE +{ + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE +{ + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + + +static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C00, 0x02408C00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x02401800, 0, 0, 0 } }, + { { 0x00016E00, 0x02401C00, 0, 0, 0 } }, + { { 0x00017000, 0x02402000, 0, 0, 0 } }, + { { 0x00017200, 0x02402400, 0, 0, 0 } }, + { { 0x0001B000, 0x0242D800, 0, 0, 0 } }, + { { 0x00017E00, 0x0240BC00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0x0240B800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DIO_BASE ={ { { { 0x02404000, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DMU_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DPCS_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0x00009000, 0x02403C00 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0x02401400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE GC_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE HDA_BASE ={ { { { 0x004C0000, 0x02404800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0x0240A400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0x02408800, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0x00DC0000, 0x00E00000, 0x00E40000, 0x0243FC00 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0x00E80000, 0x00EC0000, 0x00F00000, 0x02400400 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0x0241B000 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE PCIE0_BASE ={ { { { 0x02411800, 0x04440000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA_BASE ={ { { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } }, + { { 0x00001260, 0x0000A000, 0x02402C00, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0x00440000, 0x02401000, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0x02400C00, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0x02425800, 0, 0, 0 } }, + { { 0x00054000, 0x02425C00, 0, 0, 0 } }, + { { 0x00094000, 0x02426000, 0, 0, 0 } }, + { { 0x000D4000, 0x02426400, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE USB0_BASE ={ { { { 0x0242A800, 0x05B00000, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x02403000, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; + + +#define ATHUB_BASE__INST0_SEG0 0x00000C00 +#define ATHUB_BASE__INST0_SEG1 0x02408C00 +#define ATHUB_BASE__INST0_SEG2 0 +#define ATHUB_BASE__INST0_SEG3 0 +#define ATHUB_BASE__INST0_SEG4 0 + +#define ATHUB_BASE__INST1_SEG0 0 +#define ATHUB_BASE__INST1_SEG1 0 +#define ATHUB_BASE__INST1_SEG2 0 +#define ATHUB_BASE__INST1_SEG3 0 +#define ATHUB_BASE__INST1_SEG4 0 + +#define ATHUB_BASE__INST2_SEG0 0 +#define ATHUB_BASE__INST2_SEG1 0 +#define ATHUB_BASE__INST2_SEG2 0 +#define ATHUB_BASE__INST2_SEG3 0 +#define ATHUB_BASE__INST2_SEG4 0 + +#define ATHUB_BASE__INST3_SEG0 0 +#define ATHUB_BASE__INST3_SEG1 0 +#define ATHUB_BASE__INST3_SEG2 0 +#define ATHUB_BASE__INST3_SEG3 0 +#define ATHUB_BASE__INST3_SEG4 0 + +#define ATHUB_BASE__INST4_SEG0 0 +#define ATHUB_BASE__INST4_SEG1 0 +#define ATHUB_BASE__INST4_SEG2 0 +#define ATHUB_BASE__INST4_SEG3 0 +#define ATHUB_BASE__INST4_SEG4 0 + +#define ATHUB_BASE__INST5_SEG0 0 +#define ATHUB_BASE__INST5_SEG1 0 +#define ATHUB_BASE__INST5_SEG2 0 +#define ATHUB_BASE__INST5_SEG3 0 +#define ATHUB_BASE__INST5_SEG4 0 + +#define ATHUB_BASE__INST6_SEG0 0 +#define ATHUB_BASE__INST6_SEG1 0 +#define ATHUB_BASE__INST6_SEG2 0 +#define ATHUB_BASE__INST6_SEG3 0 +#define ATHUB_BASE__INST6_SEG4 0 + +#define CLK_BASE__INST0_SEG0 0x00016C00 +#define CLK_BASE__INST0_SEG1 0x02401800 +#define CLK_BASE__INST0_SEG2 0 +#define CLK_BASE__INST0_SEG3 0 +#define CLK_BASE__INST0_SEG4 0 + +#define CLK_BASE__INST1_SEG0 0x00016E00 +#define CLK_BASE__INST1_SEG1 0x02401C00 +#define CLK_BASE__INST1_SEG2 0 +#define CLK_BASE__INST1_SEG3 0 +#define CLK_BASE__INST1_SEG4 0 + +#define CLK_BASE__INST2_SEG0 0x00017000 +#define CLK_BASE__INST2_SEG1 0x02402000 +#define CLK_BASE__INST2_SEG2 0 +#define CLK_BASE__INST2_SEG3 0 +#define CLK_BASE__INST2_SEG4 0 + +#define CLK_BASE__INST3_SEG0 0x00017200 +#define CLK_BASE__INST3_SEG1 0x02402400 +#define CLK_BASE__INST3_SEG2 0 +#define CLK_BASE__INST3_SEG3 0 +#define CLK_BASE__INST3_SEG4 0 + +#define CLK_BASE__INST4_SEG0 0x0001B000 +#define CLK_BASE__INST4_SEG1 0x0242D800 +#define CLK_BASE__INST4_SEG2 0 +#define CLK_BASE__INST4_SEG3 0 +#define CLK_BASE__INST4_SEG4 0 + +#define CLK_BASE__INST5_SEG0 0x00017E00 +#define CLK_BASE__INST5_SEG1 0x0240BC00 +#define CLK_BASE__INST5_SEG2 0 +#define CLK_BASE__INST5_SEG3 0 +#define CLK_BASE__INST5_SEG4 0 + +#define CLK_BASE__INST6_SEG0 0 +#define CLK_BASE__INST6_SEG1 0 +#define CLK_BASE__INST6_SEG2 0 +#define CLK_BASE__INST6_SEG3 0 +#define CLK_BASE__INST6_SEG4 0 + +#define DF_BASE__INST0_SEG0 0x00007000 +#define DF_BASE__INST0_SEG1 0x0240B800 +#define DF_BASE__INST0_SEG2 0 +#define DF_BASE__INST0_SEG3 0 +#define DF_BASE__INST0_SEG4 0 + +#define DF_BASE__INST1_SEG0 0 +#define DF_BASE__INST1_SEG1 0 +#define DF_BASE__INST1_SEG2 0 +#define DF_BASE__INST1_SEG3 0 +#define DF_BASE__INST1_SEG4 0 + +#define DF_BASE__INST2_SEG0 0 +#define DF_BASE__INST2_SEG1 0 +#define DF_BASE__INST2_SEG2 0 +#define DF_BASE__INST2_SEG3 0 +#define DF_BASE__INST2_SEG4 0 + +#define DF_BASE__INST3_SEG0 0 +#define DF_BASE__INST3_SEG1 0 +#define DF_BASE__INST3_SEG2 0 +#define DF_BASE__INST3_SEG3 0 +#define DF_BASE__INST3_SEG4 0 + +#define DF_BASE__INST4_SEG0 0 +#define DF_BASE__INST4_SEG1 0 +#define DF_BASE__INST4_SEG2 0 +#define DF_BASE__INST4_SEG3 0 +#define DF_BASE__INST4_SEG4 0 + +#define DF_BASE__INST5_SEG0 0 +#define DF_BASE__INST5_SEG1 0 +#define DF_BASE__INST5_SEG2 0 +#define DF_BASE__INST5_SEG3 0 +#define DF_BASE__INST5_SEG4 0 + +#define DF_BASE__INST6_SEG0 0 +#define DF_BASE__INST6_SEG1 0 +#define DF_BASE__INST6_SEG2 0 +#define DF_BASE__INST6_SEG3 0 +#define DF_BASE__INST6_SEG4 0 + +#define DIO_BASE__INST0_SEG0 0x02404000 +#define DIO_BASE__INST0_SEG1 0 +#define DIO_BASE__INST0_SEG2 0 +#define DIO_BASE__INST0_SEG3 0 +#define DIO_BASE__INST0_SEG4 0 + +#define DIO_BASE__INST1_SEG0 0 +#define DIO_BASE__INST1_SEG1 0 +#define DIO_BASE__INST1_SEG2 0 +#define DIO_BASE__INST1_SEG3 0 +#define DIO_BASE__INST1_SEG4 0 + +#define DIO_BASE__INST2_SEG0 0 +#define DIO_BASE__INST2_SEG1 0 +#define DIO_BASE__INST2_SEG2 0 +#define DIO_BASE__INST2_SEG3 0 +#define DIO_BASE__INST2_SEG4 0 + +#define DIO_BASE__INST3_SEG0 0 +#define DIO_BASE__INST3_SEG1 0 +#define DIO_BASE__INST3_SEG2 0 +#define DIO_BASE__INST3_SEG3 0 +#define DIO_BASE__INST3_SEG4 0 + +#define DIO_BASE__INST4_SEG0 0 +#define DIO_BASE__INST4_SEG1 0 +#define DIO_BASE__INST4_SEG2 0 +#define DIO_BASE__INST4_SEG3 0 +#define DIO_BASE__INST4_SEG4 0 + +#define DIO_BASE__INST5_SEG0 0 +#define DIO_BASE__INST5_SEG1 0 +#define DIO_BASE__INST5_SEG2 0 +#define DIO_BASE__INST5_SEG3 0 +#define DIO_BASE__INST5_SEG4 0 + +#define DIO_BASE__INST6_SEG0 0 +#define DIO_BASE__INST6_SEG1 0 +#define DIO_BASE__INST6_SEG2 0 +#define DIO_BASE__INST6_SEG3 0 +#define DIO_BASE__INST6_SEG4 0 + +#define DMU_BASE__INST0_SEG0 0x00000012 +#define DMU_BASE__INST0_SEG1 0x000000C0 +#define DMU_BASE__INST0_SEG2 0x000034C0 +#define DMU_BASE__INST0_SEG3 0x00009000 +#define DMU_BASE__INST0_SEG4 0x02403C00 + +#define DMU_BASE__INST1_SEG0 0 +#define DMU_BASE__INST1_SEG1 0 +#define DMU_BASE__INST1_SEG2 0 +#define DMU_BASE__INST1_SEG3 0 +#define DMU_BASE__INST1_SEG4 0 + +#define DMU_BASE__INST2_SEG0 0 +#define DMU_BASE__INST2_SEG1 0 +#define DMU_BASE__INST2_SEG2 0 +#define DMU_BASE__INST2_SEG3 0 +#define DMU_BASE__INST2_SEG4 0 + +#define DMU_BASE__INST3_SEG0 0 +#define DMU_BASE__INST3_SEG1 0 +#define DMU_BASE__INST3_SEG2 0 +#define DMU_BASE__INST3_SEG3 0 +#define DMU_BASE__INST3_SEG4 0 + +#define DMU_BASE__INST4_SEG0 0 +#define DMU_BASE__INST4_SEG1 0 +#define DMU_BASE__INST4_SEG2 0 +#define DMU_BASE__INST4_SEG3 0 +#define DMU_BASE__INST4_SEG4 0 + +#define DMU_BASE__INST5_SEG0 0 +#define DMU_BASE__INST5_SEG1 0 +#define DMU_BASE__INST5_SEG2 0 +#define DMU_BASE__INST5_SEG3 0 +#define DMU_BASE__INST5_SEG4 0 + +#define DMU_BASE__INST6_SEG0 0 +#define DMU_BASE__INST6_SEG1 0 +#define DMU_BASE__INST6_SEG2 0 +#define DMU_BASE__INST6_SEG3 0 +#define DMU_BASE__INST6_SEG4 0 + +#define DPCS_BASE__INST0_SEG0 0x00000012 +#define DPCS_BASE__INST0_SEG1 0x000000C0 +#define DPCS_BASE__INST0_SEG2 0x000034C0 +#define DPCS_BASE__INST0_SEG3 0x00009000 +#define DPCS_BASE__INST0_SEG4 0x02403C00 + +#define DPCS_BASE__INST1_SEG0 0 +#define DPCS_BASE__INST1_SEG1 0 +#define DPCS_BASE__INST1_SEG2 0 +#define DPCS_BASE__INST1_SEG3 0 +#define DPCS_BASE__INST1_SEG4 0 + +#define DPCS_BASE__INST2_SEG0 0 +#define DPCS_BASE__INST2_SEG1 0 +#define DPCS_BASE__INST2_SEG2 0 +#define DPCS_BASE__INST2_SEG3 0 +#define DPCS_BASE__INST2_SEG4 0 + +#define DPCS_BASE__INST3_SEG0 0 +#define DPCS_BASE__INST3_SEG1 0 +#define DPCS_BASE__INST3_SEG2 0 +#define DPCS_BASE__INST3_SEG3 0 +#define DPCS_BASE__INST3_SEG4 0 + +#define DPCS_BASE__INST4_SEG0 0 +#define DPCS_BASE__INST4_SEG1 0 +#define DPCS_BASE__INST4_SEG2 0 +#define DPCS_BASE__INST4_SEG3 0 +#define DPCS_BASE__INST4_SEG4 0 + +#define DPCS_BASE__INST5_SEG0 0 +#define DPCS_BASE__INST5_SEG1 0 +#define DPCS_BASE__INST5_SEG2 0 +#define DPCS_BASE__INST5_SEG3 0 +#define DPCS_BASE__INST5_SEG4 0 + +#define DPCS_BASE__INST6_SEG0 0 +#define DPCS_BASE__INST6_SEG1 0 +#define DPCS_BASE__INST6_SEG2 0 +#define DPCS_BASE__INST6_SEG3 0 +#define DPCS_BASE__INST6_SEG4 0 + +#define FUSE_BASE__INST0_SEG0 0x00017400 +#define FUSE_BASE__INST0_SEG1 0x02401400 +#define FUSE_BASE__INST0_SEG2 0 +#define FUSE_BASE__INST0_SEG3 0 +#define FUSE_BASE__INST0_SEG4 0 + +#define FUSE_BASE__INST1_SEG0 0 +#define FUSE_BASE__INST1_SEG1 0 +#define FUSE_BASE__INST1_SEG2 0 +#define FUSE_BASE__INST1_SEG3 0 +#define FUSE_BASE__INST1_SEG4 0 + +#define FUSE_BASE__INST2_SEG0 0 +#define FUSE_BASE__INST2_SEG1 0 +#define FUSE_BASE__INST2_SEG2 0 +#define FUSE_BASE__INST2_SEG3 0 +#define FUSE_BASE__INST2_SEG4 0 + +#define FUSE_BASE__INST3_SEG0 0 +#define FUSE_BASE__INST3_SEG1 0 +#define FUSE_BASE__INST3_SEG2 0 +#define FUSE_BASE__INST3_SEG3 0 +#define FUSE_BASE__INST3_SEG4 0 + +#define FUSE_BASE__INST4_SEG0 0 +#define FUSE_BASE__INST4_SEG1 0 +#define FUSE_BASE__INST4_SEG2 0 +#define FUSE_BASE__INST4_SEG3 0 +#define FUSE_BASE__INST4_SEG4 0 + +#define FUSE_BASE__INST5_SEG0 0 +#define FUSE_BASE__INST5_SEG1 0 +#define FUSE_BASE__INST5_SEG2 0 +#define FUSE_BASE__INST5_SEG3 0 +#define FUSE_BASE__INST5_SEG4 0 + +#define FUSE_BASE__INST6_SEG0 0 +#define FUSE_BASE__INST6_SEG1 0 +#define FUSE_BASE__INST6_SEG2 0 +#define FUSE_BASE__INST6_SEG3 0 +#define FUSE_BASE__INST6_SEG4 0 + +#define GC_BASE__INST0_SEG0 0x00001260 +#define GC_BASE__INST0_SEG1 0x0000A000 +#define GC_BASE__INST0_SEG2 0x02402C00 +#define GC_BASE__INST0_SEG3 0 +#define GC_BASE__INST0_SEG4 0 + +#define GC_BASE__INST1_SEG0 0 +#define GC_BASE__INST1_SEG1 0 +#define GC_BASE__INST1_SEG2 0 +#define GC_BASE__INST1_SEG3 0 +#define GC_BASE__INST1_SEG4 0 + +#define GC_BASE__INST2_SEG0 0 +#define GC_BASE__INST2_SEG1 0 +#define GC_BASE__INST2_SEG2 0 +#define GC_BASE__INST2_SEG3 0 +#define GC_BASE__INST2_SEG4 0 + +#define GC_BASE__INST3_SEG0 0 +#define GC_BASE__INST3_SEG1 0 +#define GC_BASE__INST3_SEG2 0 +#define GC_BASE__INST3_SEG3 0 +#define GC_BASE__INST3_SEG4 0 + +#define GC_BASE__INST4_SEG0 0 +#define GC_BASE__INST4_SEG1 0 +#define GC_BASE__INST4_SEG2 0 +#define GC_BASE__INST4_SEG3 0 +#define GC_BASE__INST4_SEG4 0 + +#define GC_BASE__INST5_SEG0 0 +#define GC_BASE__INST5_SEG1 0 +#define GC_BASE__INST5_SEG2 0 +#define GC_BASE__INST5_SEG3 0 +#define GC_BASE__INST5_SEG4 0 + +#define GC_BASE__INST6_SEG0 0 +#define GC_BASE__INST6_SEG1 0 +#define GC_BASE__INST6_SEG2 0 +#define GC_BASE__INST6_SEG3 0 +#define GC_BASE__INST6_SEG4 0 + +#define HDA_BASE__INST0_SEG0 0x004C0000 +#define HDA_BASE__INST0_SEG1 0x02404800 +#define HDA_BASE__INST0_SEG2 0 +#define HDA_BASE__INST0_SEG3 0 +#define HDA_BASE__INST0_SEG4 0 + +#define HDA_BASE__INST1_SEG0 0 +#define HDA_BASE__INST1_SEG1 0 +#define HDA_BASE__INST1_SEG2 0 +#define HDA_BASE__INST1_SEG3 0 +#define HDA_BASE__INST1_SEG4 0 + +#define HDA_BASE__INST2_SEG0 0 +#define HDA_BASE__INST2_SEG1 0 +#define HDA_BASE__INST2_SEG2 0 +#define HDA_BASE__INST2_SEG3 0 +#define HDA_BASE__INST2_SEG4 0 + +#define HDA_BASE__INST3_SEG0 0 +#define HDA_BASE__INST3_SEG1 0 +#define HDA_BASE__INST3_SEG2 0 +#define HDA_BASE__INST3_SEG3 0 +#define HDA_BASE__INST3_SEG4 0 + +#define HDA_BASE__INST4_SEG0 0 +#define HDA_BASE__INST4_SEG1 0 +#define HDA_BASE__INST4_SEG2 0 +#define HDA_BASE__INST4_SEG3 0 +#define HDA_BASE__INST4_SEG4 0 + +#define HDA_BASE__INST5_SEG0 0 +#define HDA_BASE__INST5_SEG1 0 +#define HDA_BASE__INST5_SEG2 0 +#define HDA_BASE__INST5_SEG3 0 +#define HDA_BASE__INST5_SEG4 0 + +#define HDA_BASE__INST6_SEG0 0 +#define HDA_BASE__INST6_SEG1 0 +#define HDA_BASE__INST6_SEG2 0 +#define HDA_BASE__INST6_SEG3 0 +#define HDA_BASE__INST6_SEG4 0 + +#define HDP_BASE__INST0_SEG0 0x00000F20 +#define HDP_BASE__INST0_SEG1 0x0240A400 +#define HDP_BASE__INST0_SEG2 0 +#define HDP_BASE__INST0_SEG3 0 +#define HDP_BASE__INST0_SEG4 0 + +#define HDP_BASE__INST1_SEG0 0 +#define HDP_BASE__INST1_SEG1 0 +#define HDP_BASE__INST1_SEG2 0 +#define HDP_BASE__INST1_SEG3 0 +#define HDP_BASE__INST1_SEG4 0 + +#define HDP_BASE__INST2_SEG0 0 +#define HDP_BASE__INST2_SEG1 0 +#define HDP_BASE__INST2_SEG2 0 +#define HDP_BASE__INST2_SEG3 0 +#define HDP_BASE__INST2_SEG4 0 + +#define HDP_BASE__INST3_SEG0 0 +#define HDP_BASE__INST3_SEG1 0 +#define HDP_BASE__INST3_SEG2 0 +#define HDP_BASE__INST3_SEG3 0 +#define HDP_BASE__INST3_SEG4 0 + +#define HDP_BASE__INST4_SEG0 0 +#define HDP_BASE__INST4_SEG1 0 +#define HDP_BASE__INST4_SEG2 0 +#define HDP_BASE__INST4_SEG3 0 +#define HDP_BASE__INST4_SEG4 0 + +#define HDP_BASE__INST5_SEG0 0 +#define HDP_BASE__INST5_SEG1 0 +#define HDP_BASE__INST5_SEG2 0 +#define HDP_BASE__INST5_SEG3 0 +#define HDP_BASE__INST5_SEG4 0 + +#define HDP_BASE__INST6_SEG0 0 +#define HDP_BASE__INST6_SEG1 0 +#define HDP_BASE__INST6_SEG2 0 +#define HDP_BASE__INST6_SEG3 0 +#define HDP_BASE__INST6_SEG4 0 + +#define MMHUB_BASE__INST0_SEG0 0x0001A000 +#define MMHUB_BASE__INST0_SEG1 0x02408800 +#define MMHUB_BASE__INST0_SEG2 0 +#define MMHUB_BASE__INST0_SEG3 0 +#define MMHUB_BASE__INST0_SEG4 0 + +#define MMHUB_BASE__INST1_SEG0 0 +#define MMHUB_BASE__INST1_SEG1 0 +#define MMHUB_BASE__INST1_SEG2 0 +#define MMHUB_BASE__INST1_SEG3 0 +#define MMHUB_BASE__INST1_SEG4 0 + +#define MMHUB_BASE__INST2_SEG0 0 +#define MMHUB_BASE__INST2_SEG1 0 +#define MMHUB_BASE__INST2_SEG2 0 +#define MMHUB_BASE__INST2_SEG3 0 +#define MMHUB_BASE__INST2_SEG4 0 + +#define MMHUB_BASE__INST3_SEG0 0 +#define MMHUB_BASE__INST3_SEG1 0 +#define MMHUB_BASE__INST3_SEG2 0 +#define MMHUB_BASE__INST3_SEG3 0 +#define MMHUB_BASE__INST3_SEG4 0 + +#define MMHUB_BASE__INST4_SEG0 0 +#define MMHUB_BASE__INST4_SEG1 0 +#define MMHUB_BASE__INST4_SEG2 0 +#define MMHUB_BASE__INST4_SEG3 0 +#define MMHUB_BASE__INST4_SEG4 0 + +#define MMHUB_BASE__INST5_SEG0 0 +#define MMHUB_BASE__INST5_SEG1 0 +#define MMHUB_BASE__INST5_SEG2 0 +#define MMHUB_BASE__INST5_SEG3 0 +#define MMHUB_BASE__INST5_SEG4 0 + +#define MMHUB_BASE__INST6_SEG0 0 +#define MMHUB_BASE__INST6_SEG1 0 +#define MMHUB_BASE__INST6_SEG2 0 +#define MMHUB_BASE__INST6_SEG3 0 +#define MMHUB_BASE__INST6_SEG4 0 + +#define MP0_BASE__INST0_SEG0 0x00016000 +#define MP0_BASE__INST0_SEG1 0x00DC0000 +#define MP0_BASE__INST0_SEG2 0x00E00000 +#define MP0_BASE__INST0_SEG3 0x00E40000 +#define MP0_BASE__INST0_SEG4 0x0243FC00 + +#define MP0_BASE__INST1_SEG0 0 +#define MP0_BASE__INST1_SEG1 0 +#define MP0_BASE__INST1_SEG2 0 +#define MP0_BASE__INST1_SEG3 0 +#define MP0_BASE__INST1_SEG4 0 + +#define MP0_BASE__INST2_SEG0 0 +#define MP0_BASE__INST2_SEG1 0 +#define MP0_BASE__INST2_SEG2 0 +#define MP0_BASE__INST2_SEG3 0 +#define MP0_BASE__INST2_SEG4 0 + +#define MP0_BASE__INST3_SEG0 0 +#define MP0_BASE__INST3_SEG1 0 +#define MP0_BASE__INST3_SEG2 0 +#define MP0_BASE__INST3_SEG3 0 +#define MP0_BASE__INST3_SEG4 0 + +#define MP0_BASE__INST4_SEG0 0 +#define MP0_BASE__INST4_SEG1 0 +#define MP0_BASE__INST4_SEG2 0 +#define MP0_BASE__INST4_SEG3 0 +#define MP0_BASE__INST4_SEG4 0 + +#define MP0_BASE__INST5_SEG0 0 +#define MP0_BASE__INST5_SEG1 0 +#define MP0_BASE__INST5_SEG2 0 +#define MP0_BASE__INST5_SEG3 0 +#define MP0_BASE__INST5_SEG4 0 + +#define MP0_BASE__INST6_SEG0 0 +#define MP0_BASE__INST6_SEG1 0 +#define MP0_BASE__INST6_SEG2 0 +#define MP0_BASE__INST6_SEG3 0 +#define MP0_BASE__INST6_SEG4 0 + +#define MP1_BASE__INST0_SEG0 0x00016200 +#define MP1_BASE__INST0_SEG1 0x00E80000 +#define MP1_BASE__INST0_SEG2 0x00EC0000 +#define MP1_BASE__INST0_SEG3 0x00F00000 +#define MP1_BASE__INST0_SEG4 0x02400400 + +#define MP1_BASE__INST1_SEG0 0 +#define MP1_BASE__INST1_SEG1 0 +#define MP1_BASE__INST1_SEG2 0 +#define MP1_BASE__INST1_SEG3 0 +#define MP1_BASE__INST1_SEG4 0 + +#define MP1_BASE__INST2_SEG0 0 +#define MP1_BASE__INST2_SEG1 0 +#define MP1_BASE__INST2_SEG2 0 +#define MP1_BASE__INST2_SEG3 0 +#define MP1_BASE__INST2_SEG4 0 + +#define MP1_BASE__INST3_SEG0 0 +#define MP1_BASE__INST3_SEG1 0 +#define MP1_BASE__INST3_SEG2 0 +#define MP1_BASE__INST3_SEG3 0 +#define MP1_BASE__INST3_SEG4 0 + +#define MP1_BASE__INST4_SEG0 0 +#define MP1_BASE__INST4_SEG1 0 +#define MP1_BASE__INST4_SEG2 0 +#define MP1_BASE__INST4_SEG3 0 +#define MP1_BASE__INST4_SEG4 0 + +#define MP1_BASE__INST5_SEG0 0 +#define MP1_BASE__INST5_SEG1 0 +#define MP1_BASE__INST5_SEG2 0 +#define MP1_BASE__INST5_SEG3 0 +#define MP1_BASE__INST5_SEG4 0 + +#define MP1_BASE__INST6_SEG0 0 +#define MP1_BASE__INST6_SEG1 0 +#define MP1_BASE__INST6_SEG2 0 +#define MP1_BASE__INST6_SEG3 0 +#define MP1_BASE__INST6_SEG4 0 + +#define NBIF0_BASE__INST0_SEG0 0x00000000 +#define NBIF0_BASE__INST0_SEG1 0x00000014 +#define NBIF0_BASE__INST0_SEG2 0x00000D20 +#define NBIF0_BASE__INST0_SEG3 0x00010400 +#define NBIF0_BASE__INST0_SEG4 0x0241B000 + +#define NBIF0_BASE__INST1_SEG0 0 +#define NBIF0_BASE__INST1_SEG1 0 +#define NBIF0_BASE__INST1_SEG2 0 +#define NBIF0_BASE__INST1_SEG3 0 +#define NBIF0_BASE__INST1_SEG4 0 + +#define NBIF0_BASE__INST2_SEG0 0 +#define NBIF0_BASE__INST2_SEG1 0 +#define NBIF0_BASE__INST2_SEG2 0 +#define NBIF0_BASE__INST2_SEG3 0 +#define NBIF0_BASE__INST2_SEG4 0 + +#define NBIF0_BASE__INST3_SEG0 0 +#define NBIF0_BASE__INST3_SEG1 0 +#define NBIF0_BASE__INST3_SEG2 0 +#define NBIF0_BASE__INST3_SEG3 0 +#define NBIF0_BASE__INST3_SEG4 0 + +#define NBIF0_BASE__INST4_SEG0 0 +#define NBIF0_BASE__INST4_SEG1 0 +#define NBIF0_BASE__INST4_SEG2 0 +#define NBIF0_BASE__INST4_SEG3 0 +#define NBIF0_BASE__INST4_SEG4 0 + +#define NBIF0_BASE__INST5_SEG0 0 +#define NBIF0_BASE__INST5_SEG1 0 +#define NBIF0_BASE__INST5_SEG2 0 +#define NBIF0_BASE__INST5_SEG3 0 +#define NBIF0_BASE__INST5_SEG4 0 + +#define NBIF0_BASE__INST6_SEG0 0 +#define NBIF0_BASE__INST6_SEG1 0 +#define NBIF0_BASE__INST6_SEG2 0 +#define NBIF0_BASE__INST6_SEG3 0 +#define NBIF0_BASE__INST6_SEG4 0 + +#define OSSSYS_BASE__INST0_SEG0 0x000010A0 +#define OSSSYS_BASE__INST0_SEG1 0x0240A000 +#define OSSSYS_BASE__INST0_SEG2 0 +#define OSSSYS_BASE__INST0_SEG3 0 +#define OSSSYS_BASE__INST0_SEG4 0 + +#define OSSSYS_BASE__INST1_SEG0 0 +#define OSSSYS_BASE__INST1_SEG1 0 +#define OSSSYS_BASE__INST1_SEG2 0 +#define OSSSYS_BASE__INST1_SEG3 0 +#define OSSSYS_BASE__INST1_SEG4 0 + +#define OSSSYS_BASE__INST2_SEG0 0 +#define OSSSYS_BASE__INST2_SEG1 0 +#define OSSSYS_BASE__INST2_SEG2 0 +#define OSSSYS_BASE__INST2_SEG3 0 +#define OSSSYS_BASE__INST2_SEG4 0 + +#define OSSSYS_BASE__INST3_SEG0 0 +#define OSSSYS_BASE__INST3_SEG1 0 +#define OSSSYS_BASE__INST3_SEG2 0 +#define OSSSYS_BASE__INST3_SEG3 0 +#define OSSSYS_BASE__INST3_SEG4 0 + +#define OSSSYS_BASE__INST4_SEG0 0 +#define OSSSYS_BASE__INST4_SEG1 0 +#define OSSSYS_BASE__INST4_SEG2 0 +#define OSSSYS_BASE__INST4_SEG3 0 +#define OSSSYS_BASE__INST4_SEG4 0 + +#define OSSSYS_BASE__INST5_SEG0 0 +#define OSSSYS_BASE__INST5_SEG1 0 +#define OSSSYS_BASE__INST5_SEG2 0 +#define OSSSYS_BASE__INST5_SEG3 0 +#define OSSSYS_BASE__INST5_SEG4 0 + +#define OSSSYS_BASE__INST6_SEG0 0 +#define OSSSYS_BASE__INST6_SEG1 0 +#define OSSSYS_BASE__INST6_SEG2 0 +#define OSSSYS_BASE__INST6_SEG3 0 +#define OSSSYS_BASE__INST6_SEG4 0 + +#define PCIE0_BASE__INST0_SEG0 0x02411800 +#define PCIE0_BASE__INST0_SEG1 0x04440000 +#define PCIE0_BASE__INST0_SEG2 0 +#define PCIE0_BASE__INST0_SEG3 0 +#define PCIE0_BASE__INST0_SEG4 0 + +#define PCIE0_BASE__INST1_SEG0 0 +#define PCIE0_BASE__INST1_SEG1 0 +#define PCIE0_BASE__INST1_SEG2 0 +#define PCIE0_BASE__INST1_SEG3 0 +#define PCIE0_BASE__INST1_SEG4 0 + +#define PCIE0_BASE__INST2_SEG0 0 +#define PCIE0_BASE__INST2_SEG1 0 +#define PCIE0_BASE__INST2_SEG2 0 +#define PCIE0_BASE__INST2_SEG3 0 +#define PCIE0_BASE__INST2_SEG4 0 + +#define PCIE0_BASE__INST3_SEG0 0 +#define PCIE0_BASE__INST3_SEG1 0 +#define PCIE0_BASE__INST3_SEG2 0 +#define PCIE0_BASE__INST3_SEG3 0 +#define PCIE0_BASE__INST3_SEG4 0 + +#define PCIE0_BASE__INST4_SEG0 0 +#define PCIE0_BASE__INST4_SEG1 0 +#define PCIE0_BASE__INST4_SEG2 0 +#define PCIE0_BASE__INST4_SEG3 0 +#define PCIE0_BASE__INST4_SEG4 0 + +#define PCIE0_BASE__INST5_SEG0 0 +#define PCIE0_BASE__INST5_SEG1 0 +#define PCIE0_BASE__INST5_SEG2 0 +#define PCIE0_BASE__INST5_SEG3 0 +#define PCIE0_BASE__INST5_SEG4 0 + +#define PCIE0_BASE__INST6_SEG0 0 +#define PCIE0_BASE__INST6_SEG1 0 +#define PCIE0_BASE__INST6_SEG2 0 +#define PCIE0_BASE__INST6_SEG3 0 +#define PCIE0_BASE__INST6_SEG4 0 + +#define SDMA_BASE__INST0_SEG0 0x00001260 +#define SDMA_BASE__INST0_SEG1 0x0000A000 +#define SDMA_BASE__INST0_SEG2 0x02402C00 +#define SDMA_BASE__INST0_SEG3 0 +#define SDMA_BASE__INST0_SEG4 0 + +#define SDMA_BASE__INST1_SEG0 0x00001260 +#define SDMA_BASE__INST1_SEG1 0x0000A000 +#define SDMA_BASE__INST1_SEG2 0x02402C00 +#define SDMA_BASE__INST1_SEG3 0 +#define SDMA_BASE__INST1_SEG4 0 + +#define SDMA_BASE__INST2_SEG0 0 +#define SDMA_BASE__INST2_SEG1 0 +#define SDMA_BASE__INST2_SEG2 0 +#define SDMA_BASE__INST2_SEG3 0 +#define SDMA_BASE__INST2_SEG4 0 + +#define SDMA_BASE__INST3_SEG0 0 +#define SDMA_BASE__INST3_SEG1 0 +#define SDMA_BASE__INST3_SEG2 0 +#define SDMA_BASE__INST3_SEG3 0 +#define SDMA_BASE__INST3_SEG4 0 + +#define SDMA_BASE__INST4_SEG0 0 +#define SDMA_BASE__INST4_SEG1 0 +#define SDMA_BASE__INST4_SEG2 0 +#define SDMA_BASE__INST4_SEG3 0 +#define SDMA_BASE__INST4_SEG4 0 + +#define SDMA_BASE__INST5_SEG0 0 +#define SDMA_BASE__INST5_SEG1 0 +#define SDMA_BASE__INST5_SEG2 0 +#define SDMA_BASE__INST5_SEG3 0 +#define SDMA_BASE__INST5_SEG4 0 + +#define SDMA_BASE__INST6_SEG0 0 +#define SDMA_BASE__INST6_SEG1 0 +#define SDMA_BASE__INST6_SEG2 0 +#define SDMA_BASE__INST6_SEG3 0 +#define SDMA_BASE__INST6_SEG4 0 + +#define SMUIO_BASE__INST0_SEG0 0x00016800 +#define SMUIO_BASE__INST0_SEG1 0x00016A00 +#define SMUIO_BASE__INST0_SEG2 0x00440000 +#define SMUIO_BASE__INST0_SEG3 0x02401000 +#define SMUIO_BASE__INST0_SEG4 0 + +#define SMUIO_BASE__INST1_SEG0 0 +#define SMUIO_BASE__INST1_SEG1 0 +#define SMUIO_BASE__INST1_SEG2 0 +#define SMUIO_BASE__INST1_SEG3 0 +#define SMUIO_BASE__INST1_SEG4 0 + +#define SMUIO_BASE__INST2_SEG0 0 +#define SMUIO_BASE__INST2_SEG1 0 +#define SMUIO_BASE__INST2_SEG2 0 +#define SMUIO_BASE__INST2_SEG3 0 +#define SMUIO_BASE__INST2_SEG4 0 + +#define SMUIO_BASE__INST3_SEG0 0 +#define SMUIO_BASE__INST3_SEG1 0 +#define SMUIO_BASE__INST3_SEG2 0 +#define SMUIO_BASE__INST3_SEG3 0 +#define SMUIO_BASE__INST3_SEG4 0 + +#define SMUIO_BASE__INST4_SEG0 0 +#define SMUIO_BASE__INST4_SEG1 0 +#define SMUIO_BASE__INST4_SEG2 0 +#define SMUIO_BASE__INST4_SEG3 0 +#define SMUIO_BASE__INST4_SEG4 0 + +#define SMUIO_BASE__INST5_SEG0 0 +#define SMUIO_BASE__INST5_SEG1 0 +#define SMUIO_BASE__INST5_SEG2 0 +#define SMUIO_BASE__INST5_SEG3 0 +#define SMUIO_BASE__INST5_SEG4 0 + +#define SMUIO_BASE__INST6_SEG0 0 +#define SMUIO_BASE__INST6_SEG1 0 +#define SMUIO_BASE__INST6_SEG2 0 +#define SMUIO_BASE__INST6_SEG3 0 +#define SMUIO_BASE__INST6_SEG4 0 + +#define THM_BASE__INST0_SEG0 0x00016600 +#define THM_BASE__INST0_SEG1 0x02400C00 +#define THM_BASE__INST0_SEG2 0 +#define THM_BASE__INST0_SEG3 0 +#define THM_BASE__INST0_SEG4 0 + +#define THM_BASE__INST1_SEG0 0 +#define THM_BASE__INST1_SEG1 0 +#define THM_BASE__INST1_SEG2 0 +#define THM_BASE__INST1_SEG3 0 +#define THM_BASE__INST1_SEG4 0 + +#define THM_BASE__INST2_SEG0 0 +#define THM_BASE__INST2_SEG1 0 +#define THM_BASE__INST2_SEG2 0 +#define THM_BASE__INST2_SEG3 0 +#define THM_BASE__INST2_SEG4 0 + +#define THM_BASE__INST3_SEG0 0 +#define THM_BASE__INST3_SEG1 0 +#define THM_BASE__INST3_SEG2 0 +#define THM_BASE__INST3_SEG3 0 +#define THM_BASE__INST3_SEG4 0 + +#define THM_BASE__INST4_SEG0 0 +#define THM_BASE__INST4_SEG1 0 +#define THM_BASE__INST4_SEG2 0 +#define THM_BASE__INST4_SEG3 0 +#define THM_BASE__INST4_SEG4 0 + +#define THM_BASE__INST5_SEG0 0 +#define THM_BASE__INST5_SEG1 0 +#define THM_BASE__INST5_SEG2 0 +#define THM_BASE__INST5_SEG3 0 +#define THM_BASE__INST5_SEG4 0 + +#define THM_BASE__INST6_SEG0 0 +#define THM_BASE__INST6_SEG1 0 +#define THM_BASE__INST6_SEG2 0 +#define THM_BASE__INST6_SEG3 0 +#define THM_BASE__INST6_SEG4 0 + +#define UMC_BASE__INST0_SEG0 0x00014000 +#define UMC_BASE__INST0_SEG1 0x02425800 +#define UMC_BASE__INST0_SEG2 0 +#define UMC_BASE__INST0_SEG3 0 +#define UMC_BASE__INST0_SEG4 0 + +#define UMC_BASE__INST1_SEG0 0x00054000 +#define UMC_BASE__INST1_SEG1 0x02425C00 +#define UMC_BASE__INST1_SEG2 0 +#define UMC_BASE__INST1_SEG3 0 +#define UMC_BASE__INST1_SEG4 0 + +#define UMC_BASE__INST2_SEG0 0x00094000 +#define UMC_BASE__INST2_SEG1 0x02426000 +#define UMC_BASE__INST2_SEG2 0 +#define UMC_BASE__INST2_SEG3 0 +#define UMC_BASE__INST2_SEG4 0 + +#define UMC_BASE__INST3_SEG0 0x000D4000 +#define UMC_BASE__INST3_SEG1 0x02426400 +#define UMC_BASE__INST3_SEG2 0 +#define UMC_BASE__INST3_SEG3 0 +#define UMC_BASE__INST3_SEG4 0 + +#define UMC_BASE__INST4_SEG0 0 +#define UMC_BASE__INST4_SEG1 0 +#define UMC_BASE__INST4_SEG2 0 +#define UMC_BASE__INST4_SEG3 0 +#define UMC_BASE__INST4_SEG4 0 + +#define UMC_BASE__INST5_SEG0 0 +#define UMC_BASE__INST5_SEG1 0 +#define UMC_BASE__INST5_SEG2 0 +#define UMC_BASE__INST5_SEG3 0 +#define UMC_BASE__INST5_SEG4 0 + +#define UMC_BASE__INST6_SEG0 0 +#define UMC_BASE__INST6_SEG1 0 +#define UMC_BASE__INST6_SEG2 0 +#define UMC_BASE__INST6_SEG3 0 +#define UMC_BASE__INST6_SEG4 0 + +#define USB0_BASE__INST0_SEG0 0x0242A800 +#define USB0_BASE__INST0_SEG1 0x05B00000 +#define USB0_BASE__INST0_SEG2 0 +#define USB0_BASE__INST0_SEG3 0 +#define USB0_BASE__INST0_SEG4 0 + +#define USB0_BASE__INST1_SEG0 0 +#define USB0_BASE__INST1_SEG1 0 +#define USB0_BASE__INST1_SEG2 0 +#define USB0_BASE__INST1_SEG3 0 +#define USB0_BASE__INST1_SEG4 0 + +#define USB0_BASE__INST2_SEG0 0 +#define USB0_BASE__INST2_SEG1 0 +#define USB0_BASE__INST2_SEG2 0 +#define USB0_BASE__INST2_SEG3 0 +#define USB0_BASE__INST2_SEG4 0 + +#define USB0_BASE__INST3_SEG0 0 +#define USB0_BASE__INST3_SEG1 0 +#define USB0_BASE__INST3_SEG2 0 +#define USB0_BASE__INST3_SEG3 0 +#define USB0_BASE__INST3_SEG4 0 + +#define USB0_BASE__INST4_SEG0 0 +#define USB0_BASE__INST4_SEG1 0 +#define USB0_BASE__INST4_SEG2 0 +#define USB0_BASE__INST4_SEG3 0 +#define USB0_BASE__INST4_SEG4 0 + +#define USB0_BASE__INST5_SEG0 0 +#define USB0_BASE__INST5_SEG1 0 +#define USB0_BASE__INST5_SEG2 0 +#define USB0_BASE__INST5_SEG3 0 +#define USB0_BASE__INST5_SEG4 0 + +#define USB0_BASE__INST6_SEG0 0 +#define USB0_BASE__INST6_SEG1 0 +#define USB0_BASE__INST6_SEG2 0 +#define USB0_BASE__INST6_SEG3 0 +#define USB0_BASE__INST6_SEG4 0 + +#define UVD0_BASE__INST0_SEG0 0x00007800 +#define UVD0_BASE__INST0_SEG1 0x00007E00 +#define UVD0_BASE__INST0_SEG2 0x02403000 +#define UVD0_BASE__INST0_SEG3 0 +#define UVD0_BASE__INST0_SEG4 0 + +#define UVD0_BASE__INST1_SEG0 0 +#define UVD0_BASE__INST1_SEG1 0 +#define UVD0_BASE__INST1_SEG2 0 +#define UVD0_BASE__INST1_SEG3 0 +#define UVD0_BASE__INST1_SEG4 0 + +#define UVD0_BASE__INST2_SEG0 0 +#define UVD0_BASE__INST2_SEG1 0 +#define UVD0_BASE__INST2_SEG2 0 +#define UVD0_BASE__INST2_SEG3 0 +#define UVD0_BASE__INST2_SEG4 0 + +#define UVD0_BASE__INST3_SEG0 0 +#define UVD0_BASE__INST3_SEG1 0 +#define UVD0_BASE__INST3_SEG2 0 +#define UVD0_BASE__INST3_SEG3 0 +#define UVD0_BASE__INST3_SEG4 0 + +#define UVD0_BASE__INST4_SEG0 0 +#define UVD0_BASE__INST4_SEG1 0 +#define UVD0_BASE__INST4_SEG2 0 +#define UVD0_BASE__INST4_SEG3 0 +#define UVD0_BASE__INST4_SEG4 0 + +#define UVD0_BASE__INST5_SEG0 0 +#define UVD0_BASE__INST5_SEG1 0 +#define UVD0_BASE__INST5_SEG2 0 +#define UVD0_BASE__INST5_SEG3 0 +#define UVD0_BASE__INST5_SEG4 0 + +#define UVD0_BASE__INST6_SEG0 0 +#define UVD0_BASE__INST6_SEG1 0 +#define UVD0_BASE__INST6_SEG2 0 +#define UVD0_BASE__INST6_SEG3 0 +#define UVD0_BASE__INST6_SEG4 0 + +#endif -- cgit v1.2.3 From 03d0a073cf3f8eced0e27774f15b700a85c9f976 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 14 May 2019 15:22:53 +0800 Subject: drm/amdgpu: initialize reg base for navi12 Set up the register offset map for navi12. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c | 53 ++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/nv.c | 3 ++ drivers/gpu/drm/amd/amdgpu/nv.h | 1 + 4 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index cc38a6836825..8afa0bceb460 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -67,7 +67,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ - arct_reg_init.o + arct_reg_init.o navi12_reg_init.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c new file mode 100644 index 000000000000..cadc7603ca41 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "nv.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "navi12_ip_offset.h" + +int navi12_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the blocks needed by driver */ + uint32_t i; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); + } + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 595a907f4ea7..7b119285abe3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -386,6 +386,9 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) case CHIP_NAVI14: navi14_reg_base_init(adev); break; + case CHIP_NAVI12: + navi12_reg_base_init(adev); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 332d5cdc308e..82e6cb432f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -31,4 +31,5 @@ void nv_grbm_select(struct amdgpu_device *adev, int nv_set_ip_blocks(struct amdgpu_device *adev); int navi10_reg_base_init(struct amdgpu_device *adev); int navi14_reg_base_init(struct amdgpu_device *adev); +int navi12_reg_base_init(struct amdgpu_device *adev); #endif -- cgit v1.2.3 From 9802f5d78b3747467644edc609393f95d0b10e61 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:00:26 +0800 Subject: drm/amdgpu: add navi12 asic type Add asic type. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + include/drm/amd_asic_type.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2363e7658d36..6cd8670bc3eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -103,6 +103,7 @@ static const char *amdgpu_asic_name[] = { "ARCTURUS", "NAVI10", "NAVI14", + "NAVI12", "LAST", }; diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h index 0f5a12a99948..737a1e374f0c 100644 --- a/include/drm/amd_asic_type.h +++ b/include/drm/amd_asic_type.h @@ -52,6 +52,7 @@ enum amd_asic_type { CHIP_ARCTURUS, CHIP_NAVI10, CHIP_NAVI14, + CHIP_NAVI12, CHIP_LAST, }; -- cgit v1.2.3 From 42b325e5ec39655d9f0bf0aef73f3f636cc972aa Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:01:38 +0800 Subject: drm/amdgpu: add gpu_info firmware for navi12 gpu_info firmare store asic configuration details. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6cd8670bc3eb..6bc3dd76eb1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -73,6 +73,7 @@ MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -1469,6 +1470,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_NAVI14: chip_name = "navi14"; break; + case CHIP_NAVI12: + chip_name = "navi12"; + break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); -- cgit v1.2.3 From 4808cf9c2a82427148a0c66f8833d1f743e1c960 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 17:36:41 +0800 Subject: drm/amdgpu: set asic family and ip blocks for navi12 same with navi10 Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6bc3dd76eb1e..b3ad96add13c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1627,6 +1627,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) break; case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: adev->family = AMDGPU_FAMILY_NV; r = nv_set_ip_blocks(adev); -- cgit v1.2.3 From d4d838ba4e9d1ca0d46743141bba84de1f04a007 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:04:19 +0800 Subject: drm/amdgpu: use front door firmware loading for navi12 Same as other navi asics. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 9e974ce26378..dd18ebc2eb01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -362,6 +362,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_VEGA20: case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else -- cgit v1.2.3 From 74b5e509a082c70fa20798343cd0bbfa532ceda1 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 19:47:33 +0800 Subject: drm/amdgpu: initialize cg/pg flags and external rev id for navi12 don't enable any cg/pg features yet. v2: calculate external revision id from revision id so that we can differentiate navi12 A0 from A1 directly. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 7b119285abe3..eb859b311409 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -596,6 +596,11 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0x1; /* ??? */ break; + case CHIP_NAVI12: + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0xa; + break; default: /* FIXME: not supported yet */ return -EINVAL; -- cgit v1.2.3 From 7e17e58bdde26d1bb3588a8c1c546b9beed33405 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 19:51:12 +0800 Subject: drm/amdgpu: set nbio/hdp cg for navi12 Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index eb859b311409..e0400a3f09b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -794,6 +794,7 @@ static int nv_common_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, -- cgit v1.2.3 From 59ab8c292b9ada03c05b691252a5a6f1d625e66d Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 19:03:01 +0800 Subject: drm/amdgpu/gfx10: set gfx cg for navi12 Same as other navi asics. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f6c39583ff7b..d8d60d744f65 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4149,6 +4149,7 @@ static int gfx_v10_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: gfx_v10_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; -- cgit v1.2.3 From 6983469c1a654571529705339e097ca44ce3d57e Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 19:01:19 +0800 Subject: drm/amdgpu/gfx10: add gfx config for navi12 got from mmCP_MAX_CONTEXT and mmPA_SC_FIFO_SIZE v2: squash all navi asics together because the settings are the same. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d8d60d744f65..9dd12f692b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1095,18 +1095,12 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_NAVI10: - adev->gfx.config.max_hw_contexts = 8; - adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; - adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0; - adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; - gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); - break; case CHIP_NAVI14: + case CHIP_NAVI12: adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; - adev->gfx.config.sc_hiz_tile_fifo_size = 0x0; + adev->gfx.config.sc_hiz_tile_fifo_size = 0; adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); break; -- cgit v1.2.3 From 92c123aec1a28439c31a38500f35fef391b5953b Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 11 Jun 2019 11:16:38 +0800 Subject: drm/amdgpu/gfx10: declare cp/rlc firmwares for navi12 Set the name properly to load the right ucode. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 9dd12f692b33..94bae62911fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -77,6 +77,13 @@ MODULE_FIRMWARE("amdgpu/navi14_mec.bin"); MODULE_FIRMWARE("amdgpu/navi14_mec2.bin"); MODULE_FIRMWARE("amdgpu/navi14_rlc.bin"); +MODULE_FIRMWARE("amdgpu/navi12_ce.bin"); +MODULE_FIRMWARE("amdgpu/navi12_pfp.bin"); +MODULE_FIRMWARE("amdgpu/navi12_me.bin"); +MODULE_FIRMWARE("amdgpu/navi12_mec.bin"); +MODULE_FIRMWARE("amdgpu/navi12_mec2.bin"); +MODULE_FIRMWARE("amdgpu/navi12_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), @@ -550,6 +557,9 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) case CHIP_NAVI14: chip_name = "navi14"; break; + case CHIP_NAVI12: + chip_name = "navi12"; + break; default: BUG(); } -- cgit v1.2.3 From 716e9bb099c52efe5290c0b1f329e855b453663a Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 11 Jun 2019 11:16:54 +0800 Subject: drm/amdgpu/gfx10: add placeholder for navi12 golden settings Not used yet. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 94bae62911fc..754a212f4381 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -174,6 +174,11 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = /* Pending on emulation bring up */ }; +static const struct soc15_reg_golden golden_settings_gc_10_1_nv12[] = +{ + /* Pending on emulation bring up */ +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -323,6 +328,14 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_1_nv14, (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14)); break; + case CHIP_NAVI12: + soc15_program_register_sequence(adev, + golden_settings_gc_10_1, + (const u32)ARRAY_SIZE(golden_settings_gc_10_1)); + soc15_program_register_sequence(adev, + golden_settings_gc_10_1_nv12, + (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv12)); + break; default: break; } -- cgit v1.2.3 From 9ff3dba6d68db11f096d89b07c5a1db39ffdd6ff Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 20:01:03 +0800 Subject: drm/amdgpu/gfx10: set number of me(c)/pipe/queue for navi12 Same as other navi asics. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 754a212f4381..99669c1be8dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1228,6 +1228,7 @@ static int gfx_v10_0_sw_init(void *handle) switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 2; adev->gfx.me.num_queue_per_pipe = 1; -- cgit v1.2.3 From 4cdfc4a2be144ee84406e8e6dce5474a67f00c2c Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 20:02:14 +0800 Subject: drm/amdgpu/gfx10: set rlc funcs for navi12 Same as other navi asics. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 99669c1be8dc..a5d9dc96aaf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -5209,6 +5209,7 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs; break; default: -- cgit v1.2.3 From 6f523fd7b3d4e42d7d0e7591c8a121048d0142f8 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:05:32 +0800 Subject: drm/amdgpu/sdma5: declare sdma firmwares for navi12 Declare the firmwares and load the proper ones for navi12. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index aa43dc6c599a..ea28b309cf21 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -48,6 +48,9 @@ MODULE_FIRMWARE("amdgpu/navi10_sdma1.bin"); MODULE_FIRMWARE("amdgpu/navi14_sdma.bin"); MODULE_FIRMWARE("amdgpu/navi14_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/navi12_sdma.bin"); +MODULE_FIRMWARE("amdgpu/navi12_sdma1.bin"); + #define SDMA1_REG_OFFSET 0x600 #define SDMA0_HYP_DEC_REG_START 0x5880 #define SDMA0_HYP_DEC_REG_END 0x5893 @@ -167,6 +170,9 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev) case CHIP_NAVI14: chip_name = "navi14"; break; + case CHIP_NAVI12: + chip_name = "navi12"; + break; default: BUG(); } -- cgit v1.2.3 From f2d6731d77cfbcdac9724e249cebdce0a75a2d00 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 17 Dec 2018 18:07:22 +0800 Subject: drm/amdgpu/sdma5: add placeholder for navi12 golden settings None yet. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index ea28b309cf21..01d4faccc68f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -98,6 +98,9 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), }; +static const struct soc15_reg_golden golden_settings_sdma_nv12[] = { +}; + static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) { u32 base; @@ -135,6 +138,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_nv14, (const u32)ARRAY_SIZE(golden_settings_sdma_nv14)); break; + case CHIP_NAVI12: + soc15_program_register_sequence(adev, + golden_settings_sdma_5, + (const u32)ARRAY_SIZE(golden_settings_sdma_5)); + soc15_program_register_sequence(adev, + golden_settings_sdma_nv12, + (const u32)ARRAY_SIZE(golden_settings_sdma_nv12)); + break; default: break; } -- cgit v1.2.3 From 4a0e815fb3180875f8763321e5d2e2b432d35f1a Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 18:05:37 +0800 Subject: drm/amdgpu/gmc10: set gart size and vm size for navi12 Same as other navi asics. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 0fd85cb15322..f585fc92871b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -525,6 +525,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: default: adev->gmc.gart_size = 512ULL << 20; break; @@ -603,10 +604,11 @@ static int gmc_v10_0_sw_init(void *handle) switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: adev->num_vmhubs = 2; /* * To fulfill 4-level page support, - * vm size is 256TB (48bit), maximum size of Navi10/Navi14, + * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12, * block size 512 (9bit) */ amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); @@ -721,6 +723,7 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: break; default: break; -- cgit v1.2.3 From 44e9e7c96c5e987354ac7a976c1c17a88c9bf901 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 16 May 2019 19:58:19 +0800 Subject: drm/amdgpu: add ip blocks for navi12 Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index e0400a3f09b4..b914b5170ee2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -422,6 +422,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->enable_mes) amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); break; + case CHIP_NAVI12: + amdgpu_device_ip_block_add(adev, &nv_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); + break; default: return -EINVAL; } -- cgit v1.2.3 From 71745cf4740945e3143f1cabb51fab4136fd4278 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 29 Jan 2019 22:36:15 +0800 Subject: drm/amdgpu/gfx10: set tcp harvest for navi12 Same as navi10. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index a5d9dc96aaf2..407d7d40d25f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1567,7 +1567,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) u32 utcl_invreq_disable = 0; /* * GCRD_TARGETS_DISABLE field contains - * for Navi10: GL1C=[18:15], SQC=[14:10], TCP=[9:0] + * for Navi10/Navi12: GL1C=[18:15], SQC=[14:10], TCP=[9:0] * for Navi14: GL1C=[21:18], SQC=[17:12], TCP=[11:0] */ u32 gcrd_targets_disable_mask = amdgpu_gfx_create_bitmask( @@ -1576,7 +1576,7 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) 4); /* GL1C */ /* * UTCL1_UTCL0_INVREQ_DISABLE field contains - * for Navi10: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0] + * for Navi10Navi12: SQG=[24], RMI=[23:20], SQC=[19:10], TCP=[9:0] * for Navi14: SQG=[28], RMI=[27:24], SQC=[23:12], TCP=[11:0] */ u32 utcl_invreq_disable_mask = amdgpu_gfx_create_bitmask( @@ -1585,7 +1585,9 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) 4 + /* RMI */ 1); /* SQG */ - if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_NAVI14) { + if (adev->asic_type == CHIP_NAVI10 || + adev->asic_type == CHIP_NAVI14 || + adev->asic_type == CHIP_NAVI12) { mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { -- cgit v1.2.3 From 7990202903135173893beb211b14d3a37e97a5e7 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 26 Jun 2019 19:19:57 +0800 Subject: drm/amdgpu: enable virtual display for navi12 Virtual display is a sw display interface for bring up and virtualization or for cards without display hardware. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 + drivers/gpu/drm/amd/amdgpu/nv.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index ee2210b8712c..4c6d792d51a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -460,6 +460,7 @@ static int dce_virtual_hw_init(void *handle) case CHIP_ARCTURUS: case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: break; default: DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index b914b5170ee2..466ef9aee103 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -426,6 +426,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); break; -- cgit v1.2.3 From f8984cb9e3e984611d9dbd3cb77e93686e525e48 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 9 Jul 2019 14:16:22 +0800 Subject: drm/amdgpu/gfx10: add golden settings for navi12 (v2) Add initial golden settings for navi12 gfx. v2: update settings Signed-off-by: Xiaojie Yuan Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 54 ++++++++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 407d7d40d25f..db8c7fbc1b5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -169,12 +169,56 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000), }; +static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_WD_CLK_CTRL, 0xffff8fff, 0xffff8100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_VC5_ENABLE, 0x00000003, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0x800007ff, 0x000005ff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x10321032), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x02310231), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CGTT_SCLK_CTRL, 0xffff0fff, 0x10000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL2, 0xffffffff, 0x1402002f), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xffffbfff, 0x00000188), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_0, 0xffffffff, 0x842a4c02), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x08000009), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04440000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000) +}; + static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ }; -static const struct soc15_reg_golden golden_settings_gc_10_1_nv12[] = +static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] = { /* Pending on emulation bring up */ }; @@ -330,11 +374,11 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) break; case CHIP_NAVI12: soc15_program_register_sequence(adev, - golden_settings_gc_10_1, - (const u32)ARRAY_SIZE(golden_settings_gc_10_1)); + golden_settings_gc_10_1_2, + (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2)); soc15_program_register_sequence(adev, - golden_settings_gc_10_1_nv12, - (const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv12)); + golden_settings_gc_10_1_2_nv12, + (const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12)); break; default: break; -- cgit v1.2.3 From c726fbf0fb64ef6142deeff2d211fa4996556094 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 9 Jul 2019 14:17:08 +0800 Subject: drm/amdgpu/sdma5: add golden settings for navi12 (v2) common golden settings are put in golden_settings_sdma_5 array v2: update settings (Alex) Signed-off-by: Xiaojie Yuan Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 01d4faccc68f..3e180152c5ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -99,6 +99,8 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = { }; static const struct soc15_reg_golden golden_settings_sdma_nv12[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), }; static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset) -- cgit v1.2.3 From 87190edcf3a965cb1ef050bf2c407f6cef94e676 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Wed, 10 Jul 2019 18:50:20 +0800 Subject: drm/amdgpu: add CGTT_GS_NGG_CLK_CTRL register to gc header gc 10.1.2 introduced this new register Signed-off-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- .../drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h | 2 ++ .../amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h | 39 ++++++++++++++++++++++ 2 files changed, 41 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h index 1dbc7cefbc05..075867d4b1da 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h @@ -10107,6 +10107,8 @@ #define mmCGTT_IA_CLK_CTRL_BASE_IDX 1 #define mmCGTT_WD_CLK_CTRL 0x5086 #define mmCGTT_WD_CLK_CTRL_BASE_IDX 1 +#define mmCGTT_GS_NGG_CLK_CTRL 0x5087 +#define mmCGTT_GS_NGG_CLK_CTRL_BASE_IDX 1 #define mmCGTT_PA_CLK_CTRL 0x5088 #define mmCGTT_PA_CLK_CTRL_BASE_IDX 1 #define mmCGTT_SC_CLK_CTRL0 0x5089 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h index 6c2a421fe8b7..e7db6f9f9c86 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h @@ -37872,6 +37872,45 @@ #define CGTT_WD_CLK_CTRL__CORE_OVERRIDE_MASK 0x20000000L #define CGTT_WD_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L #define CGTT_WD_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L +//CGTT_GS_NGG_CLK_CTRL +#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY__SHIFT 0x0 +#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 +#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE__SHIFT 0xf +#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE__SHIFT 0x10 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6__SHIFT 0x11 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5__SHIFT 0x12 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4__SHIFT 0x13 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3__SHIFT 0x14 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2__SHIFT 0x15 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1__SHIFT 0x16 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0__SHIFT 0x17 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7__SHIFT 0x18 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6__SHIFT 0x19 +#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5__SHIFT 0x1a +#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE__SHIFT 0x1b +#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE__SHIFT 0x1c +#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE__SHIFT 0x1d +#define CGTT_GS_NGG_CLK_CTRL__RBIU_INPUT_OVERRIDE__SHIFT 0x1e +#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE__SHIFT 0x1f +#define CGTT_GS_NGG_CLK_CTRL__ON_DELAY_MASK 0x0000000FL +#define CGTT_GS_NGG_CLK_CTRL__OFF_HYSTERESIS_MASK 0x00000FF0L +#define CGTT_GS_NGG_CLK_CTRL__PERF_ENABLE_MASK 0x00008000L +#define CGTT_GS_NGG_CLK_CTRL__DBG_ENABLE_MASK 0x00010000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE6_MASK 0x00020000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE5_MASK 0x00040000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE4_MASK 0x00080000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE3_MASK 0x00100000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE2_MASK 0x00200000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE1_MASK 0x00400000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_STALL_OVERRIDE0_MASK 0x00800000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE7_MASK 0x01000000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE6_MASK 0x02000000L +#define CGTT_GS_NGG_CLK_CTRL__SOFT_OVERRIDE5_MASK 0x04000000L +#define CGTT_GS_NGG_CLK_CTRL__PRIMGEN_OVERRIDE_MASK 0x08000000L +#define CGTT_GS_NGG_CLK_CTRL__GS1_OVERRIDE_MASK 0x10000000L +#define CGTT_GS_NGG_CLK_CTRL__GS0_OVERRIDE_MASK 0x20000000L +#define CGTT_GS_NGG_CLK_CTRL__RBIU_INPUT_OVERRIDE_MASK 0x40000000L +#define CGTT_GS_NGG_CLK_CTRL__REG_OVERRIDE_MASK 0x80000000L //CGTT_PA_CLK_CTRL #define CGTT_PA_CLK_CTRL__ON_DELAY__SHIFT 0x0 #define CGTT_PA_CLK_CTRL__OFF_HYSTERESIS__SHIFT 0x4 -- cgit v1.2.3 From 02938eed74a3d75c743c3b8a80c80ba130dd6674 Mon Sep 17 00:00:00 2001 From: Jack Xiao Date: Mon, 15 Jul 2019 05:12:21 +0800 Subject: drm/amdgpu: correct smu rlc handshake enablement bit Correct the enablement bit of SMU RLC handshake. Signed-off-by: Jack Xiao Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index db8c7fbc1b5f..ec71e2a7d7b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1779,9 +1779,9 @@ static void gfx_v10_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev, * hence no handshake between SMU & RLC * GFXOFF will be disabled */ - rlc_pg_cntl |= 0x80000; + rlc_pg_cntl |= 0x800000; } else - rlc_pg_cntl &= ~0x80000; + rlc_pg_cntl &= ~0x800000; WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, rlc_pg_cntl); } -- cgit v1.2.3 From 9ea8da75ba43a344a06b7c811a61277e9075a96b Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 1 Aug 2019 14:54:59 -0500 Subject: drm/amdgpu/smu11: add smu support for navi12 Same as other Navi asics. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 1 + drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 00651118102c..3b44bbc6f1e7 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -726,6 +726,7 @@ static int smu_set_funcs(struct amdgpu_device *adev) case CHIP_VEGA20: case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: case CHIP_ARCTURUS: if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) smu->od_enabled = true; diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 848ad04837a8..ee739c080943 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/vega20_smc.bin"); MODULE_FIRMWARE("amdgpu/arcturus_smc.bin"); MODULE_FIRMWARE("amdgpu/navi10_smc.bin"); MODULE_FIRMWARE("amdgpu/navi14_smc.bin"); +MODULE_FIRMWARE("amdgpu/navi12_smc.bin"); #define SMU11_VOLTAGE_SCALE 4 @@ -166,6 +167,9 @@ static int smu_v11_0_init_microcode(struct smu_context *smu) case CHIP_NAVI14: chip_name = "navi14"; break; + case CHIP_NAVI12: + chip_name = "navi12"; + break; default: BUG(); } @@ -1334,6 +1338,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) break; case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; mutex_lock(&smu->mutex); @@ -1758,6 +1763,7 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu) break; case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: navi10_set_ppt_funcs(smu); break; default: -- cgit v1.2.3 From 739cdbd6a21b61677890b648fe49f40a9dbcc909 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Fri, 2 Aug 2019 08:59:36 -0500 Subject: drm/amdgpu/psp11: add psp support for navi12 Same as other navi asics. Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 + drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 6 ++++++ 2 files changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 314e7cbabc87..d1ac73eca06a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -59,6 +59,7 @@ static int psp_early_init(void *handle) break; case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = true; break; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 38956e41718b..f0a0ecb07818 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/navi10_sos.bin"); MODULE_FIRMWARE("amdgpu/navi10_asd.bin"); MODULE_FIRMWARE("amdgpu/navi14_sos.bin"); MODULE_FIRMWARE("amdgpu/navi14_asd.bin"); +MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); +MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); @@ -80,6 +82,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_NAVI14: chip_name = "navi14"; break; + case CHIP_NAVI12: + chip_name = "navi12"; + break; case CHIP_ARCTURUS: chip_name = "arcturus"; break; @@ -176,6 +181,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) break; case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: case CHIP_ARCTURUS: break; default: -- cgit v1.2.3 From e60cc94b268a65cc46f8fd430e98e764a4f46370 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 18 Jul 2019 05:00:00 +0800 Subject: drm/amdgpu: start autoload till RLCG fw for navi12 rlc save restore list is not ready yet for navi12 Signed-off-by: Xiaojie Yuan Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index d1ac73eca06a..51fb890e2d3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1025,7 +1025,8 @@ out: return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ - if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { + if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM || + (adev->asic_type == CHIP_NAVI12 && ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); -- cgit v1.2.3 From 7f47efeb9e212dce2abfb5510b9a19741e51820b Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Tue, 16 Jul 2019 03:26:49 +0800 Subject: drm/amdgpu: add smu ip block for navi12 Signed-off-by: Xiaojie Yuan Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 466ef9aee103..3a04b292474e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -426,10 +426,16 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && + is_support_sw_smu(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && + is_support_sw_smu(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); break; default: return -EINVAL; -- cgit v1.2.3 From 6b66ae2e556c78e26fc75fb8ff01753857fbd457 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 18 Jul 2019 02:54:29 +0800 Subject: drm/amdgpu: add psp ip block for navi12 Signed-off-by: Xiaojie Yuan Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 3a04b292474e..9bbc4bb3d1b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -426,6 +426,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && is_support_sw_smu(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); -- cgit v1.2.3 From a3219816c407282a0f1b40d1dc5efc320b051767 Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Thu, 18 Jul 2019 09:17:24 -0400 Subject: drm/amdgpu: add Navi12 VCN firmware support Add Navi12 to VCN family Signed-off-by: Boyuan Zhang Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index b35fc6ef4c1f..47086cdbb413 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -49,6 +49,7 @@ #define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin" #define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin" #define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin" +#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); @@ -56,6 +57,7 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN2); MODULE_FIRMWARE(FIRMWARE_ARCTURUS); MODULE_FIRMWARE(FIRMWARE_NAVI10); MODULE_FIRMWARE(FIRMWARE_NAVI14); +MODULE_FIRMWARE(FIRMWARE_NAVI12); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -87,12 +89,18 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; - case CHIP_NAVI14: + case CHIP_NAVI14: fw_name = FIRMWARE_NAVI14; if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; + case CHIP_NAVI12: + fw_name = FIRMWARE_NAVI12; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; default: return -EINVAL; } -- cgit v1.2.3 From 1fbed280a2445aba3700eae72b31a623311b2c2e Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Thu, 18 Jul 2019 10:13:23 -0400 Subject: drm/amdgpu: add VCN ip block for Navi12 Add VCN2 ip block for Navi12 Signed-off-by: Boyuan Zhang Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 9bbc4bb3d1b3..b97d48608ccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -437,6 +437,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && is_support_sw_smu(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); break; default: return -EINVAL; -- cgit v1.2.3 From 400e9c5ea66ae2ec101992b6f3af8441c5b58497 Mon Sep 17 00:00:00 2001 From: Boyuan Zhang Date: Thu, 18 Jul 2019 17:39:07 -0400 Subject: drm/amdgpu: enable DPG mode for Navi12 Enable Dynamic Power Gating VCN for Navi12. Signed-off-by: Boyuan Zhang Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index b97d48608ccc..14c501ab5df3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -615,7 +615,7 @@ static int nv_common_early_init(void *handle) break; case CHIP_NAVI12: adev->cg_flags = 0; - adev->pg_flags = 0; + adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG; adev->external_rev_id = adev->rev_id + 0xa; break; default: -- cgit v1.2.3 From 36bbb634b75399b98e09be41842fdf8429453e31 Mon Sep 17 00:00:00 2001 From: Leo Li Date: Tue, 16 Jul 2019 11:50:06 -0400 Subject: drm/amd/display: Add ASICREV_IS_NAVI macros They are used by DC to determine ASIC revs. Signed-off-by: Leo Li Reviewed-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 94b7d5ec155b..d2e380d8c9a0 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -153,11 +153,13 @@ enum { NV_NAVI10_P_A0 = 1, + NV_NAVI12_P_A0 = 10, NV_NAVI14_M_A0 = 20, NV_UNKNOWN = 0xFF }; -#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI14_M_A0) +#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0) +#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0)) #define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) #endif -- cgit v1.2.3 From 078655d982c5fcf03a3120b52383482b3dc04ab1 Mon Sep 17 00:00:00 2001 From: Leo Li Date: Tue, 16 Jul 2019 18:12:13 -0400 Subject: drm/amdgpu: Add nv12 DC ip block Load DC and amdgpu display manager Signed-off-by: Leo Li Reviewed-by: Xiaojie Yuan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/nv.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b3ad96add13c..f9fabbc4181f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2545,6 +2545,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) #if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: #endif return amdgpu_dc != 0; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 14c501ab5df3..f25097969da7 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -432,6 +432,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && -- cgit v1.2.3 From fbd2afe5366cd78997153b615232482fb97011c5 Mon Sep 17 00:00:00 2001 From: Roman Li Date: Tue, 16 Jul 2019 17:26:14 -0400 Subject: drm/amd/display: Add missing NV12 asic IDs Add missing navi12 asic ids. Signed-off-by: Roman Li Reviewed-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dca371a4407c..f5f3c44865cb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -810,6 +810,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev) case CHIP_VEGA20: case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: return 0; case CHIP_RAVEN: if (ASICREV_IS_PICASSO(adev->external_rev_id)) @@ -2359,6 +2360,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: #if defined(CONFIG_DRM_AMD_DC_DCN2_0) + case CHIP_NAVI12: case CHIP_NAVI10: case CHIP_NAVI14: #endif @@ -2516,6 +2518,7 @@ static int dm_early_init(void *handle) #endif #if defined(CONFIG_DRM_AMD_DC_DCN2_0) case CHIP_NAVI10: + case CHIP_NAVI12: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; @@ -2821,6 +2824,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, #if defined(CONFIG_DRM_AMD_DC_DCN2_0) adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_NAVI14 || + adev->asic_type == CHIP_NAVI12 || #endif adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ -- cgit v1.2.3 From 3cf7bf2e4837fa1836f09b751e6130473a69fd11 Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Fri, 26 Jul 2019 14:19:02 -0500 Subject: drm/amdgpu: enable Navi12 kfd support for amdgpu Navi12 has the same interface as Navi10 Signed-off-by: shaoyunl Reviewed-by: Jack Xiao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index f1cba95b1b0b..07eb29885372 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -92,6 +92,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) break; case CHIP_NAVI10: case CHIP_NAVI14: + case CHIP_NAVI12: kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions(); break; default: -- cgit v1.2.3 From af655cc5aae800f30cd71c5d72d36bd3ecb1e7df Mon Sep 17 00:00:00 2001 From: Thong Thai Date: Thu, 25 Jul 2019 11:21:58 -0400 Subject: drm/amd/amdgpu/vcn_v2_0: Mark RB commands as KMD commands MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sets the CMD_SOURCE bit for VCN 2.0 Decoder Ring Buffer commands. This bit was previously set by the RBC HW on older firmware. Newer firmware uses a SW RBC and this bit has to be set by the driver. Signed-off-by: Thong Thai Reviewed-by: Leo Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 + drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 99f14fcc1460..19661c645703 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -30,6 +30,7 @@ #define AMDGPU_VCN_FIRMWARE_OFFSET 256 #define AMDGPU_VCN_MAX_ENC_RINGS 3 +#define VCN_DEC_KMD_CMD 0x80000000 #define VCN_DEC_CMD_FENCE 0x00000000 #define VCN_DEC_CMD_TRAP 0x00000001 #define VCN_DEC_CMD_WRITE_REG 0x00000004 diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 1cfc2620b2dd..46593e323e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -1485,7 +1485,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_START << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1)); } /** @@ -1498,7 +1498,7 @@ static void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring) static void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1)); } /** @@ -1543,7 +1543,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_FENCE << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET, 0)); amdgpu_ring_write(ring, 0); @@ -1553,7 +1553,7 @@ static void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1)); } /** @@ -1597,7 +1597,7 @@ static void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_REG_READ_COND_WAIT << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1)); } static void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, @@ -1626,7 +1626,7 @@ static void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); - amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1)); } /** -- cgit v1.2.3 From d1836f3813ee0742a2067d5f4d78e811d2b76d9d Mon Sep 17 00:00:00 2001 From: Thong Thai Date: Thu, 25 Jul 2019 11:26:56 -0400 Subject: drm/amd/amdgpu/vcn_v2_0: Move VCN 2.0 specific dec ring test to vcn_v2_0 VCN 2.0 firmware now requires a packet start command to be sent before any other decode ring buffer command. Signed-off-by: Thong Thai Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 46593e323e77..dfde886cc6bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -2079,6 +2079,36 @@ static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t tmp = 0; + unsigned i; + int r; + + WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD); + r = amdgpu_ring_alloc(ring, 4); + if (r) + return r; + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET, 0)); + amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1)); + amdgpu_ring_write(ring, PACKET0(adev->vcn.internal.scratch9, 0)); + amdgpu_ring_write(ring, 0xDEADBEEF); + amdgpu_ring_commit(ring); + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(adev->vcn.external.scratch9); + if (tmp == 0xDEADBEEF) + break; + DRM_UDELAY(1); + } + + if (i >= adev->usec_timeout) + r = -ETIMEDOUT; + + return r; +} + + static int vcn_v2_0_set_powergating_state(void *handle, enum amd_powergating_state state) { @@ -2142,7 +2172,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, -- cgit v1.2.3 From 6eae41fea75039136707c02cf99431462d590c5f Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 30 Jun 2019 07:21:03 +0200 Subject: drm/etnaviv: drop use of drmP.h Drop use of the deprecated drmP.h header file. Fix fallout in all .c files. The etnaviv_drv.h header file was made self-contained, and missing includes was then added to the .c files that needed them. In a few cases the list of include files was sorted. Signed-off-by: Sam Ravnborg Cc: Lucas Stach Cc: Russell King Cc: Christian Gmeiner Cc: David Airlie Cc: Daniel Vetter Cc: etnaviv@lists.freedesktop.org Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 2 ++ drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c | 2 ++ drivers/gpu/drm/etnaviv/etnaviv_drv.c | 8 ++++++++ drivers/gpu/drm/etnaviv/etnaviv_drv.h | 9 ++++----- drivers/gpu/drm/etnaviv/etnaviv_gem.c | 7 +++++-- drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | 2 ++ drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 5 +++++ drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 2 ++ drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c | 5 +++-- drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 2 ++ 10 files changed, 35 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 160ce3c060a5..fe0d2d67007d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -3,6 +3,8 @@ * Copyright (C) 2014-2018 Etnaviv Project */ +#include + #include "etnaviv_cmdbuf.h" #include "etnaviv_gpu.h" #include "etnaviv_gem.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c index a3c44f145c1d..bdc35c0d4e79 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c @@ -3,6 +3,8 @@ * Copyright (C) 2017-2018 Etnaviv Project */ +#include + #include #include "etnaviv_cmdbuf.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 7eb7cf9c3fa8..e823e41f6a6b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -4,8 +4,16 @@ */ #include +#include #include +#include + +#include +#include +#include +#include #include +#include #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 8798423705e1..f887a431a11f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -6,21 +6,20 @@ #ifndef __ETNAVIV_DRV_H__ #define __ETNAVIV_DRV_H__ -#include #include #include +#include +#include +#include #include #include #include #include +#include #include -#include #include #include -#include -#include -#include #include #include #include diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index e8778ebb72e6..3c5e8760d43c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -3,10 +3,13 @@ * Copyright (C) 2015-2018 Etnaviv Project */ -#include -#include +#include +#include #include #include +#include +#include +#include #include "etnaviv_drv.h" #include "etnaviv_gem.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 00e8b6a817e3..1437dbe319bb 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -3,7 +3,9 @@ * Copyright (C) 2014-2018 Etnaviv Project */ +#include #include + #include "etnaviv_drv.h" #include "etnaviv_gem.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 1a636469eeda..8a7cbb44a3d2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -3,9 +3,14 @@ * Copyright (C) 2015 Etnaviv Project */ +#include #include +#include #include #include +#include +#include + #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" #include "etnaviv_gpu.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 5418a1a87b2c..6b1d7fb37c24 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -5,7 +5,9 @@ #include #include +#include #include +#include #include #include #include diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index f794e04be9e6..d7cc184da571 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -3,11 +3,12 @@ * Copyright (C) 2016-2018 Etnaviv Project */ +#include +#include #include #include #include -#include -#include +#include #include "etnaviv_cmdbuf.h" #include "etnaviv_gpu.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 8069f9f36a2e..731275999a57 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -3,6 +3,8 @@ * Copyright (C) 2015-2018 Etnaviv Project */ +#include + #include "common.xml.h" #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" -- cgit v1.2.3 From facb180d3a519040e877eb3a6810c4fc53320ed7 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 5 Jun 2019 14:57:02 -0300 Subject: drm/etnaviv: Use devm_platform_ioremap_resource() Use devm_platform_ioremap_resource() to simplify the code a bit. Signed-off-by: Fabio Estevam Signed-off-by: Lucas Stach --- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 6b1d7fb37c24..4ea3d12f2fd4 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1716,7 +1716,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct etnaviv_gpu *gpu; - struct resource *res; int err; gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL); @@ -1728,8 +1727,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) mutex_init(&gpu->fence_lock); /* Map registers: */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - gpu->mmio = devm_ioremap_resource(&pdev->dev, res); + gpu->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpu->mmio)) return PTR_ERR(gpu->mmio); -- cgit v1.2.3 From 2e737e520548d4456fc8dfa800ab189e0e4b2939 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Thu, 4 Jul 2019 12:43:37 +0200 Subject: drm/etnaviv: clean up includes Drop unused includes, move more includes from the generic etnaviv_drv.h to the units where they are actually used, sort includes. Signed-off-by: Lucas Stach Acked-by: Sam Ravnborg --- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 1 + drivers/gpu/drm/etnaviv/etnaviv_drv.h | 8 -------- drivers/gpu/drm/etnaviv/etnaviv_dump.c | 2 ++ drivers/gpu/drm/etnaviv/etnaviv_gem.c | 2 -- drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 1 + drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 4 +++- drivers/gpu/drm/etnaviv/etnaviv_iommu.c | 4 ++-- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 2 +- 8 files changed, 10 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index e823e41f6a6b..9d4404723489 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -5,6 +5,7 @@ #include #include +#include #include #include diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index f887a431a11f..eabe394c4e25 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -6,17 +6,9 @@ #ifndef __ETNAVIV_DRV_H__ #define __ETNAVIV_DRV_H__ -#include -#include -#include #include #include -#include -#include -#include -#include #include -#include #include #include diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 9a6f5b65488f..55c37379e389 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -4,6 +4,8 @@ */ #include +#include + #include "etnaviv_cmdbuf.h" #include "etnaviv_dump.h" #include "etnaviv_gem.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 3c5e8760d43c..727bb3f5ceb2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -5,8 +5,6 @@ #include #include -#include -#include #include #include #include diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 8a7cbb44a3d2..f535a627f297 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 4ea3d12f2fd4..4822549500ee 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -8,8 +8,10 @@ #include #include #include -#include +#include #include +#include +#include #include #include diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index b163bdbcb880..18c627c5cae1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c @@ -3,11 +3,11 @@ * Copyright (C) 2014-2018 Etnaviv Project */ +#include +#include #include #include #include -#include -#include #include "etnaviv_gpu.h" #include "etnaviv_mmu.h" diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index a813c824e154..4ea8df03cd38 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -3,7 +3,7 @@ * Copyright (C) 2017 Etnaviv Project */ -#include +#include #include "etnaviv_drv.h" #include "etnaviv_dump.h" -- cgit v1.2.3 From 50d84418f58699272a9220f9447bf89f0440c671 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 2 Aug 2019 18:40:50 +0000 Subject: drm/i915: Add i915 to i915_inject_probe_failure With i915 added to i915_inject_probe_failure we can use dedicated printk when injecting artificial load failure. Also make this function look like other i915 functions that return error code and make it more flexible to return any provided error code instead of previously assumed -ENODEV. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190802184055.31988-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/display/intel_connector.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 27 +++++++++++++------------- drivers/gpu/drm/i915/i915_drv.h | 12 +++++++----- drivers/gpu/drm/i915/i915_gem.c | 10 ++++------ drivers/gpu/drm/i915/i915_pci.c | 2 +- drivers/gpu/drm/i915/intel_gvt.c | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 2 +- drivers/gpu/drm/i915/intel_wopcm.c | 2 +- 9 files changed, 31 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index d0163d86c42a..cf8823ce9606 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector) if (ret) goto err; - if (i915_inject_probe_failure()) { + if (i915_inject_probe_failure(to_i915(connector->dev))) { ret = -EFAULT; goto err_backlight; } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 65cbf1d9118d..8bd9a9adf4a5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -426,7 +426,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) WARN_ON(engine_mask & GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(i915)) return -ENODEV; for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index d6178f5c6b50..21f1b29d06a2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -83,19 +83,20 @@ static struct drm_driver driver; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) static unsigned int i915_probe_fail_count; -bool __i915_inject_probe_failure(const char *func, int line) +int __i915_inject_load_error(struct drm_i915_private *i915, int err, + const char *func, int line) { if (i915_probe_fail_count >= i915_modparams.inject_load_failure) - return false; + return 0; - if (++i915_probe_fail_count == i915_modparams.inject_load_failure) { - DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", - i915_modparams.inject_load_failure, func, line); - i915_modparams.inject_load_failure = 0; - return true; - } + if (++i915_probe_fail_count < i915_modparams.inject_load_failure) + return 0; - return false; + __i915_printk(i915, KERN_INFO, + "Injecting failure %d at checkpoint %u [%s:%d]\n", + err, i915_modparams.inject_load_failure, func, line); + i915_modparams.inject_load_failure = 0; + return err; } bool i915_error_injected(void) @@ -687,7 +688,7 @@ static int i915_driver_modeset_probe(struct drm_device *dev) struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(dev_priv)) return -ENODEV; if (HAS_DISPLAY(dev_priv)) { @@ -894,7 +895,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) { int ret = 0; - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(dev_priv)) return -ENODEV; intel_device_info_subplatform_init(dev_priv); @@ -985,7 +986,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) { int ret; - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(dev_priv)) return -ENODEV; if (i915_get_bridge_dev(dev_priv)) @@ -1530,7 +1531,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(dev_priv)) return -ENODEV; intel_device_info_runtime_init(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7ca95136d302..98045875ebba 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -122,19 +122,21 @@ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -bool __i915_inject_probe_failure(const char *func, int line); -#define i915_inject_probe_failure() \ - __i915_inject_probe_failure(__func__, __LINE__) - +int __i915_inject_load_error(struct drm_i915_private *i915, int err, + const char *func, int line); +#define i915_inject_load_error(_i915, _err) \ + __i915_inject_load_error((_i915), (_err), __func__, __LINE__) bool i915_error_injected(void); #else -#define i915_inject_probe_failure() false +#define i915_inject_load_error(_i915, _err) 0 #define i915_error_injected() false #endif +#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV) + #define i915_probe_error(i915, fmt, ...) \ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ fmt, ##__VA_ARGS__) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e779dba2e5a3..5e87acc4b770 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1520,15 +1520,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) goto err_gt; - if (i915_inject_probe_failure()) { - ret = -ENODEV; + ret = i915_inject_load_error(dev_priv, -ENODEV); + if (ret) goto err_gt; - } - if (i915_inject_probe_failure()) { - ret = -EIO; + ret = i915_inject_load_error(dev_priv, -EIO); + if (ret) goto err_gt; - } intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index a7e1cde4a6d9..20e562ced8cc 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -959,7 +959,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - if (i915_inject_probe_failure()) { + if (i915_inject_probe_failure(to_i915(pci_get_drvdata(pdev)))) { i915_pci_remove(pdev); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index c66b2d8a6219..2b6c016387c2 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(dev_priv)) return -ENODEV; if (!i915_modparams.enable_gvt) { diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 4e9c15b60e93..773dc0a79577 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1331,7 +1331,7 @@ static int __fw_domain_init(struct intel_uncore *uncore, GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT); GEM_BUG_ON(uncore->fw_domain[domain_id]); - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(uncore->i915)) return -ENOMEM; d = kzalloc(sizeof(*d), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index d9973c0b0384..291881937d97 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -177,7 +177,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) GEM_BUG_ON(!wopcm->size); - if (i915_inject_probe_failure()) + if (i915_inject_probe_failure(i915)) return -E2BIG; if (guc_fw_size >= wopcm->size) { -- cgit v1.2.3 From 771051eaa74661f63d893589137f5d70515266c0 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 2 Aug 2019 18:40:51 +0000 Subject: drm/i915/uc: Do full sanitize instead of pure reset On Gen9 when we try to reload HuC due to GuC upload error, we hit: <7> [229.656688] [drm:intel_uc_init_hw [i915]] GuC fw load failed: -8; will reset and retry 2 more time(s) <7> [229.656739] [drm:intel_uc_fw_upload [i915]] HuC fw load i915/kbl_huc_ver02_00_1810.bin <3> [229.656740] intel_uc_fw_upload:425 GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)) as we performed only pure reset and didn't sanitized HuC fw status. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190802184055.31988-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index d1b08b28b1ad..f24860a86d0e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -340,7 +340,7 @@ void intel_uc_fini(struct intel_uc *uc) intel_guc_fini(guc); } -static void __uc_sanitize(struct intel_uc *uc) +static int __uc_sanitize(struct intel_uc *uc) { struct intel_guc *guc = &uc->guc; struct intel_huc *huc = &uc->huc; @@ -350,7 +350,7 @@ static void __uc_sanitize(struct intel_uc *uc) intel_huc_sanitize(huc); intel_guc_sanitize(guc); - __intel_uc_reset_hw(uc); + return __intel_uc_reset_hw(uc); } void intel_uc_sanitize(struct intel_uc *uc) @@ -434,7 +434,7 @@ int intel_uc_init_hw(struct intel_uc *uc) * Always reset the GuC just before (re)loading, so * that the state and timing are fairly predictable */ - ret = __intel_uc_reset_hw(uc); + ret = __uc_sanitize(uc); if (ret) goto err_out; -- cgit v1.2.3 From 3243bd096bb1656dfab7a1f3699d4d94692fd14c Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 2 Aug 2019 18:40:52 +0000 Subject: drm/i915/uc: Reorder firmware status codes On Gen9 when we try to reload HuC due to GuC upload error, we hit: <7> [232.025927] [drm:intel_uc_init_hw [i915]] GuC fw load failed: -8; will reset and retry 2 more time(s) <7> [232.026004] [drm:intel_uc_fw_upload [i915]] HuC fw load i915/kbl_huc_ver02_00_1810.bin <7> [232.026686] [drm:intel_uc_fw_upload [i915]] HuC fw xfer completed <6> [232.026688] [drm] HuC: Loaded firmware i915/kbl_huc_ver02_00_1810.bin (version 2.0) <3> [232.026703] intel_uc_fw_copy_rsa:541 GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw)) as firmware that previously failed to load was wrongly treated as unavailable since its status code was not matching status check logic. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190802184055.31988-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h index 6b64b8073703..bfe3614613b7 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h @@ -37,12 +37,12 @@ struct intel_gt; #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915" enum intel_uc_fw_status { - INTEL_UC_FIRMWARE_FAIL = -3, /* failed to xfer or init/auth the fw */ - INTEL_UC_FIRMWARE_MISSING = -2, /* blob not found on the system */ - INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */ + INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW or disabled */ INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */ INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */ + INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */ INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */ + INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */ INTEL_UC_FIRMWARE_RUNNING /* init/auth done */ }; @@ -83,18 +83,18 @@ static inline const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status) { switch (status) { - case INTEL_UC_FIRMWARE_FAIL: - return "FAIL"; - case INTEL_UC_FIRMWARE_MISSING: - return "MISSING"; case INTEL_UC_FIRMWARE_NOT_SUPPORTED: return "N/A"; case INTEL_UC_FIRMWARE_UNINITIALIZED: return "UNINITIALIZED"; case INTEL_UC_FIRMWARE_SELECTED: return "SELECTED"; + case INTEL_UC_FIRMWARE_MISSING: + return "MISSING"; case INTEL_UC_FIRMWARE_AVAILABLE: return "AVAILABLE"; + case INTEL_UC_FIRMWARE_FAIL: + return "FAIL"; case INTEL_UC_FIRMWARE_TRANSFERRED: return "TRANSFERRED"; case INTEL_UC_FIRMWARE_RUNNING: -- cgit v1.2.3 From 32ff76e80c2400c511b37a498eead9caba5656b8 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 2 Aug 2019 18:40:53 +0000 Subject: drm/i915/uc: Move GuC error log to uc and release it on fini When we fail to load GuC and want to abort probe, we hit: <7> [229.915779] i915 0000:00:02.0: [drm:intel_uc_init_hw [i915]] GuC initialization failed -6 <7> [229.915813] i915 0000:00:02.0: [drm:i915_gem_init_hw [i915]] Enabling uc failed (-6) <4> [229.953354] ------------[ cut here ]------------ <4> [229.953355] WARN_ON(dev_priv->mm.shrink_count) <4> [229.953406] WARNING: CPU: 9 PID: 3287 at drivers/gpu/drm/i915/i915_gem.c:1684 i915_gem_cleanup_early+0xfc/0x110 [i915] <4> [229.953464] Call Trace: <4> [229.953489] i915_driver_late_release+0x19/0x60 [i915] <4> [229.953514] i915_driver_probe+0xb82/0x18a0 [i915] <4> [229.953519] ? __pm_runtime_resume+0x4f/0x80 <4> [229.953545] i915_pci_probe+0x43/0x1b0 [i915] ... <4> [229.962951] ------------[ cut here ]------------ <4> [229.962956] DEBUG_LOCKS_WARN_ON(lock->magic != lock) <4> [229.962959] WARNING: CPU: 8 PID: 2395 at kernel/locking/mutex.c:912 __mutex_lock+0x750/0x9b0 <4> [229.963091] Call Trace: <4> [229.963129] ? i915_vma_destroy+0x86/0x350 [i915] <4> [229.963166] ? i915_vma_destroy+0x86/0x350 [i915] <4> [229.963201] i915_vma_destroy+0x86/0x350 [i915] <4> [229.963236] __i915_gem_free_objects+0xb8/0x510 [i915] <4> [229.963270] __i915_gem_free_work+0x5a/0x90 [i915] <4> [229.963275] process_one_work+0x245/0x610 as since commit 6f76098fe0f3 ("drm/i915/uc: Move uC early functions inside the GT ones") we cleanup uc after gem. Move captured GuC load error log to uc struct and release it in intel_uc_fini() instead of intel_uc_driver_late_release() Note that intel_uc_driver_late_release() is now empty, but we can leave it as a placeholder for future code. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190802184055.31988-5-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 3 --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 26 ++++++++++++-------------- drivers/gpu/drm/i915/gt/uc/intel_uc.h | 3 +++ drivers/gpu/drm/i915/i915_debugfs.c | 2 +- 4 files changed, 16 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 6edb29b9ceaa..cc035c9781ae 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -47,9 +47,6 @@ struct intel_guc { struct intel_guc_log log; struct intel_guc_ct ct; - /* Log snapshot if GuC errors during load */ - struct drm_i915_gem_object *load_err_log; - /* intel_guc_recv interrupt related state */ spinlock_t irq_lock; unsigned int msg_enabled_mask; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index f24860a86d0e..e5421c0b9a25 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -31,8 +31,6 @@ #include "i915_drv.h" -static void guc_free_load_err_log(struct intel_guc *guc); - /* Reset GuC providing us with fresh state for both GuC and HuC. */ static int __intel_uc_reset_hw(struct intel_uc *uc) @@ -103,7 +101,6 @@ void intel_uc_init_early(struct intel_uc *uc) void intel_uc_driver_late_release(struct intel_uc *uc) { - guc_free_load_err_log(&uc->guc); } /** @@ -118,21 +115,20 @@ void intel_uc_init_mmio(struct intel_uc *uc) intel_guc_init_send_regs(&uc->guc); } -static void guc_capture_load_err_log(struct intel_guc *guc) +static void __uc_capture_load_err_log(struct intel_uc *uc) { - if (!guc->log.vma || !intel_guc_log_get_level(&guc->log)) - return; - - if (!guc->load_err_log) - guc->load_err_log = i915_gem_object_get(guc->log.vma->obj); + struct intel_guc *guc = &uc->guc; - return; + if (guc->log.vma && !uc->load_err_log) + uc->load_err_log = i915_gem_object_get(guc->log.vma->obj); } -static void guc_free_load_err_log(struct intel_guc *guc) +static void __uc_free_load_err_log(struct intel_uc *uc) { - if (guc->load_err_log) - i915_gem_object_put(guc->load_err_log); + struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log); + + if (log) + i915_gem_object_put(log); } /* @@ -338,6 +334,8 @@ void intel_uc_fini(struct intel_uc *uc) intel_huc_fini(&uc->huc); intel_guc_fini(guc); + + __uc_free_load_err_log(uc); } static int __uc_sanitize(struct intel_uc *uc) @@ -493,7 +491,7 @@ int intel_uc_init_hw(struct intel_uc *uc) err_communication: guc_disable_communication(guc); err_log_capture: - guc_capture_load_err_log(guc); + __uc_capture_load_err_log(uc); err_out: __uc_sanitize(uc); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h index 0cca839422e2..cf9ee3c27877 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h @@ -31,6 +31,9 @@ struct intel_uc { struct intel_guc guc; struct intel_huc huc; + + /* Snapshot of GuC log from last failed load */ + struct drm_i915_gem_object *load_err_log; }; void intel_uc_init_early(struct intel_uc *uc); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6dbd85b38759..461a8dd4cc47 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2066,7 +2066,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data) return -ENODEV; if (dump_load_err) - obj = dev_priv->gt.uc.guc.load_err_log; + obj = dev_priv->gt.uc.load_err_log; else if (dev_priv->gt.uc.guc.log.vma) obj = dev_priv->gt.uc.guc.log.vma->obj; -- cgit v1.2.3 From 5d1ef2b4270de45e1b1b40a00838e3b6196eefd7 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 2 Aug 2019 18:40:54 +0000 Subject: drm/i915/uc: Inject probe errors into intel_uc_init_hw Inject probe errors into intel_uc_init_hw to make sure we correctly handle any uC initialization failure. To avoid complains from CI about injected errors use i915_probe_error to lower message level. v4: rebased after moving hot fixes moved to separate patches Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson #v1 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190802184055.31988-6-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 4 ++++ drivers/gpu/drm/i915/gt/uc/intel_huc.c | 8 +++++--- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 14 +++++++++++++- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 18 ++++++++++++------ drivers/gpu/drm/i915/i915_gem.c | 2 +- 5 files changed, 35 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index b4b508f19a1c..412892096daa 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1123,6 +1123,10 @@ int intel_guc_submission_enable(struct intel_guc *guc) enum intel_engine_id id; int err; + err = i915_inject_load_error(gt->i915, -ENXIO); + if (err) + return err; + /* * We're using GuC work items for submitting work through GuC. Since * we're coalescing multiple requests from a single context into a diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index d642b167a389..ef54053c5ef9 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -139,6 +139,10 @@ int intel_huc_auth(struct intel_huc *huc) GEM_BUG_ON(!intel_uc_fw_is_loaded(&huc->fw)); GEM_BUG_ON(intel_huc_is_authenticated(huc)); + ret = i915_inject_load_error(gt->i915, -ENXIO); + if (ret) + goto fail; + ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->rsa_data)); if (ret) { @@ -158,13 +162,11 @@ int intel_huc_auth(struct intel_huc *huc) } huc->fw.status = INTEL_UC_FIRMWARE_RUNNING; - return 0; fail: + i915_probe_error(gt->i915, "HuC: Authentication failed %d\n", ret); huc->fw.status = INTEL_UC_FIRMWARE_FAIL; - - DRM_ERROR("HuC: Authentication failed %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index e5421c0b9a25..66d6e0775c23 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -39,6 +39,10 @@ static int __intel_uc_reset_hw(struct intel_uc *uc) int ret; u32 guc_status; + ret = i915_inject_load_error(gt->i915, -ENXIO); + if (ret) + return ret; + ret = intel_reset_guc(gt); if (ret) { DRM_ERROR("Failed to reset GuC, ret = %d\n", ret); @@ -205,6 +209,10 @@ static int guc_enable_communication(struct intel_guc *guc) GEM_BUG_ON(guc_communication_enabled(guc)); + ret = i915_inject_load_error(i915, -ENXIO); + if (ret) + return ret; + ret = intel_guc_ct_enable(&guc->ct); if (ret) return ret; @@ -376,6 +384,10 @@ static int uc_init_wopcm(struct intel_uc *uc) GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); + err = i915_inject_load_error(gt->i915, -ENXIO); + if (err) + return err; + mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask, size | GUC_WOPCM_SIZE_LOCKED); @@ -502,7 +514,7 @@ err_out: if (GEM_WARN_ON(ret == -EIO)) ret = -EINVAL; - dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret); + i915_probe_error(i915, "GuC initialization failed %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 650ad6037b74..a3a22a26016c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -383,6 +383,10 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, u64 offset; int ret; + ret = i915_inject_load_error(gt->i915, -ETIMEDOUT); + if (ret) + return ret; + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Set the source address for the uCode */ @@ -443,8 +447,13 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, /* make sure the status was cleared the last time we reset the uc */ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); + err = i915_inject_load_error(gt->i915, -ENOEXEC); + if (err) + return err; + if (!intel_uc_fw_is_available(uc_fw)) return -ENOEXEC; + /* Call custom loader */ intel_uc_fw_ggtt_bind(uc_fw, gt); err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags); @@ -464,13 +473,10 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, return 0; fail: + i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n", + intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, + err); uc_fw->status = INTEL_UC_FIRMWARE_FAIL; - DRM_DEBUG_DRIVER("%s fw load failed\n", - intel_uc_fw_type_repr(uc_fw->type)); - - DRM_WARN("%s: Failed to load firmware %s (error %d)\n", - intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err); - return err; } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5e87acc4b770..2436cd598e6e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1243,7 +1243,7 @@ int i915_gem_init_hw(struct drm_i915_private *i915) /* We can't enable contexts until all firmware is loaded */ ret = intel_uc_init_hw(>->uc); if (ret) { - DRM_ERROR("Enabling uc failed (%d)\n", ret); + i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); goto out; } -- cgit v1.2.3 From 6bd0fbe156f1cc5dc97590ce994d1848f593569e Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Fri, 2 Aug 2019 18:40:55 +0000 Subject: drm/i915/wopcm: Don't fail on WOPCM partitioning failure We don't have to immediately fail on WOPCM partitioning, we can wait until we will start programming WOPCM registers. This should give us more options if we decide to restore fallback in case of GuC failures. v3: rebased Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190802184055.31988-7-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 5 +++++ drivers/gpu/drm/i915/i915_gem.c | 6 +----- drivers/gpu/drm/i915/intel_wopcm.c | 28 ++++++++++++++-------------- drivers/gpu/drm/i915/intel_wopcm.h | 2 +- 4 files changed, 21 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 66d6e0775c23..fe526698eee2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -378,6 +378,11 @@ static int uc_init_wopcm(struct intel_uc *uc) u32 mask; int err; + if (unlikely(!base || !size)) { + i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n"); + return -E2BIG; + } + GEM_BUG_ON(!intel_uc_supports_guc(uc)); GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2436cd598e6e..deaca3c2416d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1441,10 +1441,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) return ret; intel_uc_fetch_firmwares(&dev_priv->gt.uc); - - ret = intel_wopcm_init(&dev_priv->wopcm); - if (ret) - goto err_uc_fw; + intel_wopcm_init(&dev_priv->wopcm); /* This is just a security blanket to placate dragons. * On some systems, we very sporadically observe that the first TLBs @@ -1568,7 +1565,6 @@ err_unlock: intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); -err_uc_fw: intel_uc_cleanup_firmwares(&dev_priv->gt.uc); if (ret != -EIO) { diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 291881937d97..4c22143ee84f 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -156,12 +156,10 @@ static inline int check_hw_restriction(struct drm_i915_private *i915, * This function will partition WOPCM space based on GuC and HuC firmware sizes * and will allocate max remaining for use by GuC. This function will also * enforce platform dependent hardware restrictions on GuC WOPCM offset and - * size. It will fail the WOPCM init if any of these checks were failed, so that - * the following GuC firmware uploading would be aborted. - * - * Return: 0 on success, non-zero error code on failure. + * size. It will fail the WOPCM init if any of these checks fail, so that the + * following WOPCM registers setup and GuC firmware uploading would be aborted. */ -int intel_wopcm_init(struct intel_wopcm *wopcm) +void intel_wopcm_init(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.guc.fw); @@ -173,23 +171,25 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) int err; if (!USES_GUC(i915)) - return 0; + return; GEM_BUG_ON(!wopcm->size); + GEM_BUG_ON(wopcm->guc.base); + GEM_BUG_ON(wopcm->guc.size); if (i915_inject_probe_failure(i915)) - return -E2BIG; + return; if (guc_fw_size >= wopcm->size) { DRM_ERROR("GuC FW (%uKiB) is too big to fit in WOPCM.", guc_fw_size / 1024); - return -E2BIG; + return; } if (huc_fw_size >= wopcm->size) { DRM_ERROR("HuC FW (%uKiB) is too big to fit in WOPCM.", huc_fw_size / 1024); - return -E2BIG; + return; } guc_wopcm_base = ALIGN(huc_fw_size + WOPCM_RESERVED_SIZE, @@ -197,7 +197,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) if ((guc_wopcm_base + ctx_rsvd) >= wopcm->size) { DRM_ERROR("GuC WOPCM base (%uKiB) is too big.\n", guc_wopcm_base / 1024); - return -E2BIG; + return; } guc_wopcm_size = wopcm->size - guc_wopcm_base - ctx_rsvd; @@ -211,16 +211,16 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) DRM_ERROR("Need %uKiB WOPCM for GuC, %uKiB available.\n", (guc_fw_size + guc_wopcm_rsvd) / 1024, guc_wopcm_size / 1024); - return -E2BIG; + return; } err = check_hw_restriction(i915, guc_wopcm_base, guc_wopcm_size, huc_fw_size); if (err) - return err; + return; wopcm->guc.base = guc_wopcm_base; wopcm->guc.size = guc_wopcm_size; - - return 0; + GEM_BUG_ON(!wopcm->guc.base); + GEM_BUG_ON(!wopcm->guc.size); } diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h index f9b603205bb1..17d6aa86008a 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.h +++ b/drivers/gpu/drm/i915/intel_wopcm.h @@ -55,6 +55,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm) } void intel_wopcm_init_early(struct intel_wopcm *wopcm); -int intel_wopcm_init(struct intel_wopcm *wopcm); +void intel_wopcm_init(struct intel_wopcm *wopcm); #endif -- cgit v1.2.3 From 1aff1903d0ff53f055088a77948ac8d8224d42db Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 2 Aug 2019 22:21:36 +0100 Subject: drm/i915: Hide unshrinkable context objects from the shrinker The shrinker cannot touch objects used by the contexts (logical state and ring). Currently we mark those as "pin_global" to let the shrinker skip over them, however, if we remove them from the shrinker lists entirely, we don't event have to include them in our shrink accounting. By keeping the unshrinkable objects in our shrinker tracking, we report a large number of objects available to be shrunk, and leave the shrinker deeply unsatisfied when we fail to reclaim those. The shrinker will persist in trying to reclaim the unavailable objects, forcing the system into a livelock (not even hitting the dread oomkiller). v2: Extend unshrinkable protection for perma-pinned scratch and guc allocations (Tvrtko) v3: Notice that we should be pinned when marking unshrinkable and so the link cannot be empty; merge duplicate paths. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190802212137.22207-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 11 ++---- drivers/gpu/drm/i915/gem/i915_gem_object.h | 4 ++ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 13 +------ drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 58 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_context.c | 4 +- drivers/gpu/drm/i915/gt/intel_gt.c | 3 +- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 17 ++++---- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 3 +- drivers/gpu/drm/i915/i915_vma.c | 16 ++++++++ drivers/gpu/drm/i915/i915_vma.h | 4 ++ 11 files changed, 102 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index d5197a2a106f..4ea97fca9c35 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -63,6 +63,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, spin_lock_init(&obj->vma.lock); INIT_LIST_HEAD(&obj->vma.list); + INIT_LIST_HEAD(&obj->mm.link); + INIT_LIST_HEAD(&obj->lut_list); INIT_LIST_HEAD(&obj->batch_pool_link); @@ -273,14 +275,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) * or else we may oom whilst there are plenty of deferred * freed objects. */ - if (i915_gem_object_has_pages(obj) && - i915_gem_object_is_shrinkable(obj)) { - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - list_del_init(&obj->mm.link); - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - } + i915_gem_object_make_unshrinkable(obj); /* * Since we require blocking on struct_mutex to unbind the freed diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 67aea07ea019..3714cf234d64 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -394,6 +394,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, unsigned int flags); void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); +void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); +void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); + static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { if (obj->cache_dirty) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index b36ad269f4ea..92ad3cc220e3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -153,24 +153,13 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *pages; pages = fetch_and_zero(&obj->mm.pages); if (IS_ERR_OR_NULL(pages)) return pages; - if (i915_gem_object_is_shrinkable(obj)) { - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - - list_del(&obj->mm.link); - i915->mm.shrink_count--; - i915->mm.shrink_memory -= obj->base.size; - - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - } + i915_gem_object_make_unshrinkable(obj); if (obj->mm.mapping) { void *ptr; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 3f4c6bdcc3c3..5ab7df53c2a0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -530,3 +530,61 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, if (unlock) mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); } + +#define obj_to_i915(obj__) to_i915((obj__)->base.dev) + +void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) +{ + /* + * We can only be called while the pages are pinned or when + * the pages are released. If pinned, we should only be called + * from a single caller under controlled conditions; and on release + * only one caller may release us. Neither the two may cross. + */ + if (!list_empty(&obj->mm.link)) { /* pinned by caller */ + struct drm_i915_private *i915 = obj_to_i915(obj); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + GEM_BUG_ON(list_empty(&obj->mm.link)); + + list_del_init(&obj->mm.link); + i915->mm.shrink_count--; + i915->mm.shrink_memory -= obj->base.size; + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + } +} + +static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, + struct list_head *head) +{ + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + GEM_BUG_ON(!list_empty(&obj->mm.link)); + + if (i915_gem_object_is_shrinkable(obj)) { + struct drm_i915_private *i915 = obj_to_i915(obj); + unsigned long flags; + + spin_lock_irqsave(&i915->mm.obj_lock, flags); + GEM_BUG_ON(!kref_read(&obj->base.refcount)); + + list_add_tail(&obj->mm.link, head); + i915->mm.shrink_count++; + i915->mm.shrink_memory += obj->base.size; + + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); + } +} + +void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) +{ + __i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.shrink_list); +} + +void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) +{ + __i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.purge_list); +} diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 34c8e37a73b8..c8777e222b12 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -118,7 +118,7 @@ static int __context_pin_state(struct i915_vma *vma) * And mark it as a globally pinned object to let the shrinker know * it cannot reclaim the object until we release it. */ - vma->obj->pin_global++; + i915_vma_make_unshrinkable(vma); vma->obj->mm.dirty = true; return 0; @@ -126,8 +126,8 @@ static int __context_pin_state(struct i915_vma *vma) static void __context_unpin_state(struct i915_vma *vma) { - vma->obj->pin_global--; __i915_vma_unpin(vma); + i915_vma_make_shrinkable(vma); } static void __intel_context_retire(struct i915_active *active) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 5a7d1a34e429..75d8c5ee6ecb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -248,7 +248,8 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) if (ret) goto err_unref; - gt->scratch = vma; + gt->scratch = i915_vma_make_unshrinkable(vma); + return 0; err_unref: diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 8d24a49e5139..aa2f06b80961 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1222,7 +1222,7 @@ int intel_ring_pin(struct intel_ring *ring) goto err_ring; } - vma->obj->pin_global++; + i915_vma_make_unshrinkable(vma); GEM_BUG_ON(ring->vaddr); ring->vaddr = addr; @@ -1251,6 +1251,8 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail) void intel_ring_unpin(struct intel_ring *ring) { + struct i915_vma *vma = ring->vma; + if (!atomic_dec_and_test(&ring->pin_count)) return; @@ -1259,18 +1261,17 @@ void intel_ring_unpin(struct intel_ring *ring) /* Discard any unused bytes beyond that submitted to hw. */ intel_ring_reset(ring, ring->tail); - GEM_BUG_ON(!ring->vma); - i915_vma_unset_ggtt_write(ring->vma); - if (i915_vma_is_map_and_fenceable(ring->vma)) - i915_vma_unpin_iomap(ring->vma); + i915_vma_unset_ggtt_write(vma); + if (i915_vma_is_map_and_fenceable(vma)) + i915_vma_unpin_iomap(vma); else - i915_gem_object_unpin_map(ring->vma->obj); + i915_gem_object_unpin_map(vma->obj); GEM_BUG_ON(!ring->vaddr); ring->vaddr = NULL; - ring->vma->obj->pin_global--; - i915_vma_unpin(ring->vma); + i915_vma_unpin(vma); + i915_vma_make_purgeable(vma); intel_timeline_unpin(ring->timeline); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index da14f8067497..0ee8139885a5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -626,7 +626,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) goto err; } - return vma; + return i915_vma_make_unshrinkable(vma); err: i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 461a8dd4cc47..19f156a7f501 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -363,8 +363,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct drm_i915_private *i915 = node_to_i915(m->private); int ret; - seq_printf(m, "%u shrinkable objects, %llu bytes\n", + seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", i915->mm.shrink_count, + atomic_read(&i915->mm.free_count), i915->mm.shrink_memory); seq_putc(m, '\n'); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 7734d6218ce7..101a2bb416a6 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1017,6 +1017,22 @@ unpin: return 0; } +struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_unshrinkable(vma->obj); + return vma; +} + +void i915_vma_make_shrinkable(struct i915_vma *vma) +{ + i915_gem_object_make_shrinkable(vma->obj); +} + +void i915_vma_make_purgeable(struct i915_vma *vma) +{ + i915_gem_object_make_purgeable(vma->obj); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_vma.c" #endif diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 4b769db649bf..5c4224749bde 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -459,4 +459,8 @@ void i915_vma_parked(struct drm_i915_private *i915); struct i915_vma *i915_vma_alloc(void); void i915_vma_free(struct i915_vma *vma); +struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); +void i915_vma_make_shrinkable(struct i915_vma *vma); +void i915_vma_make_purgeable(struct i915_vma *vma); + #endif -- cgit v1.2.3 From 515b8b7e935ef3f59c4efda04a3b05353ed6fbb7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 2 Aug 2019 22:21:37 +0100 Subject: drm/i915: Flush the freed object list on file close As we increase the number of RCU objects, it becomes easier for us to have several hundred thousand objects in the deferred RCU free queues. An example is gem_ctx_create/files which continually creates active contexts, which are not immediately freed upon close as they are kept alive by outstanding requests. This lack of backpressure allows the context objects to persist until they overwhelm and starve the system. We can increase our backpressure by flushing the freed object queue upon closing the device fd which should then not impact other clients. Testcase: igt/gem_ctx_create/*files Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190802212137.22207-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 38 ++++-------------------------- drivers/gpu/drm/i915/i915_drv.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 1 - 4 files changed, 7 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 4ea97fca9c35..19d55115747c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -211,48 +211,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, void i915_gem_flush_free_objects(struct drm_i915_private *i915) { - struct llist_node *freed; - - /* Free the oldest, most stale object to keep the free_list short */ - freed = NULL; - if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */ - /* Only one consumer of llist_del_first() allowed */ - spin_lock(&i915->mm.free_lock); - freed = llist_del_first(&i915->mm.free_list); - spin_unlock(&i915->mm.free_lock); - } - if (unlikely(freed)) { - freed->next = NULL; + struct llist_node *freed = llist_del_all(&i915->mm.free_list); + + if (unlikely(freed)) __i915_gem_free_objects(i915, freed); - } } static void __i915_gem_free_work(struct work_struct *work) { struct drm_i915_private *i915 = container_of(work, struct drm_i915_private, mm.free_work); - struct llist_node *freed; - - /* - * All file-owned VMA should have been released by this point through - * i915_gem_close_object(), or earlier by i915_gem_context_close(). - * However, the object may also be bound into the global GTT (e.g. - * older GPUs without per-process support, or for direct access through - * the GTT either for the user or for scanout). Those VMA still need to - * unbound now. - */ - - spin_lock(&i915->mm.free_lock); - while ((freed = llist_del_all(&i915->mm.free_list))) { - spin_unlock(&i915->mm.free_lock); - __i915_gem_free_objects(i915, freed); - if (need_resched()) - return; - - spin_lock(&i915->mm.free_lock); - } - spin_unlock(&i915->mm.free_lock); + i915_gem_flush_free_objects(i915); } void i915_gem_free_object(struct drm_gem_object *gem_obj) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 21f1b29d06a2..b9c6ae09d61f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2052,6 +2052,9 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) mutex_unlock(&dev->struct_mutex); kfree(file_priv); + + /* Catch up with all the deferred frees from "this" client */ + i915_gem_flush_free_objects(to_i915(dev)); } static void intel_suspend_encoders(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 98045875ebba..5f3e5c13fbaa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -773,7 +773,6 @@ struct i915_gem_mm { */ struct llist_head free_list; struct work_struct free_work; - spinlock_t free_lock; /** * Count of objects pending destructions. Used to skip needlessly * waiting on an RCU barrier if no objects are waiting to be freed. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index deaca3c2416d..eb34f3e5a74d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1648,7 +1648,6 @@ void i915_gem_init_mmio(struct drm_i915_private *i915) static void i915_gem_init__mm(struct drm_i915_private *i915) { spin_lock_init(&i915->mm.obj_lock); - spin_lock_init(&i915->mm.free_lock); init_llist_head(&i915->mm.free_list); -- cgit v1.2.3 From 4853fc32306faaef03d409c38921c401379da2ea Mon Sep 17 00:00:00 2001 From: Matt Coffin Date: Wed, 31 Jul 2019 14:14:35 -0600 Subject: drm/amd/powerplay: Allow changing of fan_control in smu_v11_0 [Why] Before this change, the fan control state on smu_v11 was not able to be changed because the capability check for checking if the fan control capability existed was inverted. [How] The capability check for fan control in smu_v11_0_auto_fan_control was inverted, to correctly check for the absence, instead of presence of fan control capabilities. Reviewed-by: Evan Quan Signed-off-by: Matt Coffin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index ac5b26228e75..5fde5cf65b42 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1391,7 +1391,7 @@ smu_v11_0_smc_fan_control(struct smu_context *smu, bool start) { int ret = 0; - if (smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) + if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT)) return 0; ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, start); -- cgit v1.2.3 From 8ecd998debe1280ef8aab45049e69ac3d2116398 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Fri, 2 Aug 2019 12:01:00 +0800 Subject: drm/amd/powerplay: honor hw limit on fetching metrics data for navi10 too frequently to update mertrics table will cause smu internal error. Signed-off-by: Kevin Wang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 56 ++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index cc0a3b2256af..f63405b1a009 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -502,6 +502,8 @@ static int navi10_store_powerplay_table(struct smu_context *smu) static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) { + struct smu_table_context *smu_table = &smu->smu_table; + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), @@ -516,9 +518,35 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables) sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); + if (!smu_table->metrics_table) + return -ENOMEM; + smu_table->metrics_time = 0; + return 0; } +static int navi10_get_metrics_table(struct smu_context *smu, + SmuMetrics_t *metrics_table) +{ + struct smu_table_context *smu_table= &smu->smu_table; + int ret = 0; + + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, + (void *)smu_table->metrics_table, false); + if (ret) { + pr_info("Failed to export SMU metrics table!\n"); + return ret; + } + smu_table->metrics_time = jiffies; + } + + memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); + + return ret; +} + static int navi10_allocate_dpm_context(struct smu_context *smu) { struct smu_dpm_context *smu_dpm = &smu->smu_dpm; @@ -598,15 +626,10 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) { - static SmuMetrics_t metrics; int ret = 0, clk_id = 0; + SmuMetrics_t metrics; - if (!value) - return -EINVAL; - - memset(&metrics, 0, sizeof(metrics)); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; @@ -894,8 +917,9 @@ static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value) if (!value) return -EINVAL; - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, - false); + ret = navi10_get_metrics_table(smu, &metrics); + if (ret) + return ret; if (ret) return ret; @@ -914,10 +938,7 @@ static int navi10_get_current_activity_percent(struct smu_context *smu, if (!value) return -EINVAL; - msleep(1); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; @@ -956,10 +977,9 @@ static int navi10_get_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - memset(&metrics, 0, sizeof(metrics)); - - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, - (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); + if (ret) + return ret; if (ret) return ret; @@ -1307,7 +1327,7 @@ static int navi10_thermal_get_temperature(struct smu_context *smu, if (!value) return -EINVAL; - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); + ret = navi10_get_metrics_table(smu, &metrics); if (ret) return ret; -- cgit v1.2.3 From b887011803582c8539cd9f963035eef91373a169 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Fri, 2 Aug 2019 16:38:32 +0800 Subject: drm/amd/powerplay: correct navi10 vcn powergate vcn dpm on is a prerequisite for vcn power gate control. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 +++- drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 + drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 23 +++++++++++++++-------- 3 files changed, 19 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 0685a3388e38..8a3eadeebdcb 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -315,6 +315,8 @@ int smu_get_power_num_states(struct smu_context *smu, int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; switch (sensor) { @@ -339,7 +341,7 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *size = 4; break; case AMDGPU_PP_SENSOR_VCN_POWER_STATE: - *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *(uint32_t *)data = power_gate->vcn_gated ? 0 : 1; *size = 4; break; default: diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 208e6711d506..a0f52c86d8c7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -451,6 +451,7 @@ struct smu_dpm_context { struct smu_power_gate { bool uvd_gated; bool vce_gated; + bool vcn_gated; }; struct smu_power_context { diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index f63405b1a009..b81c7e715dc9 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -605,20 +605,27 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; + /* vcn dpm on is a prerequisite for vcn power gate messages */ + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; + } + power_gate->vcn_gated = false; } else { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; + } + power_gate->vcn_gated = true; } - ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); - return ret; } -- cgit v1.2.3 From 9b562437d388582f5463a4c3f60ee97a2d4d8d58 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Wed, 31 Jul 2019 10:01:40 +0800 Subject: drm/modes: Fix unterminated strncpy strncpy(dest, src, strlen(src)) leads to unterminated dest, which is dangerous. Fix it by using strscpy. Fixes: 3aeeb13d8996 ("drm/modes: Support modes names on the command line") Signed-off-by: Chuhong Yuan Signed-off-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20190731020140.3529-1-hslester96@gmail.com --- drivers/gpu/drm/drm_modes.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 80fcd5dc1558..b0369e690f36 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1770,7 +1770,9 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, } if (named_mode) { - strncpy(mode->name, name, mode_end); + if (mode_end + 1 > DRM_DISPLAY_MODE_LEN) + return false; + strscpy(mode->name, name, mode_end + 1); } else { ret = drm_mode_parse_cmdline_res_mode(name, mode_end, parse_extras, -- cgit v1.2.3 From a4e57f9031ccd543c549a34524af16c0c246e628 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 4 Aug 2019 13:48:25 +0100 Subject: drm/i915: Teach execbuffer to take the engine wakeref not GT In the next patch, we would like to couple into the engine wakeref to free the batch pool on idling. The caveat here is that we therefore want to track the engine wakeref more precisely and to hold it instead of the broader GT wakeref as we process the ioctl. v2: Avoid introducing odd semantics for a shortlived timeline->mutex acquisition interface. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190804124826.30272-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 42 ++++++++++++++++++-------- 1 file changed, 29 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 4db4463089ce..8d90498eaf46 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2139,14 +2139,40 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce) if (err) return err; + /* + * Take a local wakeref for preparing to dispatch the execbuf as + * we expect to access the hardware fairly frequently in the + * process, and require the engine to be kept awake between accesses. + * Upon dispatch, we acquire another prolonged wakeref that we hold + * until the timeline is idle, which in turn releases the wakeref + * taken on the engine, and the parent device. + */ + err = intel_context_timeline_lock(ce); + if (err) + goto err_unpin; + + intel_context_enter(ce); + intel_context_timeline_unlock(ce); + eb->engine = ce->engine; eb->context = ce; return 0; + +err_unpin: + intel_context_unpin(ce); + return err; } static void eb_unpin_context(struct i915_execbuffer *eb) { - intel_context_unpin(eb->context); + struct intel_context *ce = eb->context; + struct intel_timeline *tl = ce->ring->timeline; + + mutex_lock(&tl->mutex); + intel_context_exit(ce); + mutex_unlock(&tl->mutex); + + intel_context_unpin(ce); } static unsigned int @@ -2426,18 +2452,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (unlikely(err)) goto err_destroy; - /* - * Take a local wakeref for preparing to dispatch the execbuf as - * we expect to access the hardware fairly frequently in the - * process. Upon first dispatch, we acquire another prolonged - * wakeref that we hold until the GPU has been idle for at least - * 100ms. - */ - intel_gt_pm_get(&eb.i915->gt); - err = i915_mutex_lock_interruptible(dev); if (err) - goto err_rpm; + goto err_context; err = eb_select_engine(&eb, file, args); if (unlikely(err)) @@ -2602,8 +2619,7 @@ err_engine: eb_unpin_context(&eb); err_unlock: mutex_unlock(&dev->struct_mutex); -err_rpm: - intel_gt_pm_put(&eb.i915->gt); +err_context: i915_gem_context_put(eb.gem_context); err_destroy: eb_destroy(&eb); -- cgit v1.2.3 From b40d73784ffc33f3c6431e7ceec3b20fffcd95c3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 4 Aug 2019 13:48:26 +0100 Subject: drm/i915: Replace struct_mutex for batch pool serialisation Switch to tracking activity via i915_active on individual nodes, only keeping a list of retired objects in the cache, and reaping the cache when the engine itself idles. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190804124826.30272-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 2 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 58 ++++--- drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 - drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 1 - drivers/gpu/drm/i915/gem/i915_gem_pm.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine.h | 1 - drivers/gpu/drm/i915/gt/intel_engine_cs.c | 11 +- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 2 + drivers/gpu/drm/i915/gt/intel_engine_pool.c | 177 ++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_engine_pool.h | 34 +++++ drivers/gpu/drm/i915/gt/intel_engine_pool_types.h | 29 ++++ drivers/gpu/drm/i915/gt/intel_engine_types.h | 6 +- drivers/gpu/drm/i915/gt/mock_engine.c | 2 + drivers/gpu/drm/i915/i915_debugfs.c | 68 --------- drivers/gpu/drm/i915/i915_gem_batch_pool.c | 132 ---------------- drivers/gpu/drm/i915/i915_gem_batch_pool.h | 26 ---- 16 files changed, 289 insertions(+), 265 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.c create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.h create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool_types.h delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.c delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 654bdcbd23c7..53a55326106a 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -72,6 +72,7 @@ obj-y += gt/ gt-y += \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ + gt/intel_engine_pool.o \ gt/intel_engine_cs.o \ gt/intel_engine_pm.o \ gt/intel_gt.o \ @@ -125,7 +126,6 @@ i915-y += \ $(gem-y) \ i915_active.o \ i915_cmd_parser.o \ - i915_gem_batch_pool.o \ i915_gem_evict.o \ i915_gem_fence_reg.o \ i915_gem_gtt.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 8d90498eaf46..0c8c909743f1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -16,6 +16,7 @@ #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" +#include "gt/intel_engine_pool.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" @@ -1198,25 +1199,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, unsigned int len) { struct reloc_cache *cache = &eb->reloc_cache; - struct drm_i915_gem_object *obj; + struct intel_engine_pool_node *pool; struct i915_request *rq; struct i915_vma *batch; u32 *cmd; int err; - obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); + pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE); + if (IS_ERR(pool)) + return PTR_ERR(pool); - cmd = i915_gem_object_pin_map(obj, + cmd = i915_gem_object_pin_map(pool->obj, cache->has_llc ? I915_MAP_FORCE_WB : I915_MAP_FORCE_WC); - i915_gem_object_unpin_pages(obj); - if (IS_ERR(cmd)) - return PTR_ERR(cmd); + if (IS_ERR(cmd)) { + err = PTR_ERR(cmd); + goto out_pool; + } - batch = i915_vma_instance(obj, vma->vm, NULL); + batch = i915_vma_instance(pool->obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto err_unmap; @@ -1232,6 +1234,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto err_unpin; } + err = intel_engine_pool_mark_active(pool, rq); + if (err) + goto err_request; + err = reloc_move_to_gpu(rq, vma); if (err) goto err_request; @@ -1257,7 +1263,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, cache->rq_size = 0; /* Return with batch mapping (cmd) still pinned */ - return 0; + goto out_pool; skip_request: i915_request_skip(rq, err); @@ -1266,7 +1272,9 @@ err_request: err_unpin: i915_vma_unpin(batch); err_unmap: - i915_gem_object_unpin_map(obj); + i915_gem_object_unpin_map(pool->obj); +out_pool: + intel_engine_pool_put(pool); return err; } @@ -2010,18 +2018,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) { - struct drm_i915_gem_object *shadow_batch_obj; + struct intel_engine_pool_node *pool; struct i915_vma *vma; int err; - shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, - PAGE_ALIGN(eb->batch_len)); - if (IS_ERR(shadow_batch_obj)) - return ERR_CAST(shadow_batch_obj); + pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len); + if (IS_ERR(pool)) + return ERR_CAST(pool); err = intel_engine_cmd_parser(eb->engine, eb->batch->obj, - shadow_batch_obj, + pool->obj, eb->batch_start_offset, eb->batch_len, is_master); @@ -2030,12 +2037,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) vma = NULL; else vma = ERR_PTR(err); - goto out; + goto err; } - vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0); + vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0); if (IS_ERR(vma)) - goto out; + goto err; eb->vma[eb->buffer_count] = i915_vma_get(vma); eb->flags[eb->buffer_count] = @@ -2043,8 +2050,11 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) vma->exec_flags = &eb->flags[eb->buffer_count]; eb->buffer_count++; -out: - i915_gem_object_unpin_pages(shadow_batch_obj); + vma->private = pool; + return vma; + +err: + intel_engine_pool_put(pool); return vma; } @@ -2588,6 +2598,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, * to explicitly hold another reference here. */ eb.request->batch = eb.batch; + if (eb.batch->private) + intel_engine_pool_mark_active(eb.batch->private, eb.request); trace_i915_request_queue(eb.request, eb.batch_flags); err = eb_submit(&eb); @@ -2612,6 +2624,8 @@ err_request: err_batch_unpin: if (eb.batch_flags & I915_DISPATCH_SECURE) i915_vma_unpin(eb.batch); + if (eb.batch->private) + intel_engine_pool_put(eb.batch->private); err_vma: if (eb.exec) eb_release_vmas(&eb); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 19d55115747c..09f1843f9274 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -66,7 +66,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&obj->mm.link); INIT_LIST_HEAD(&obj->lut_list); - INIT_LIST_HEAD(&obj->batch_pool_link); init_rcu_head(&obj->rcu); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 34b51fad02de..d474c6ac4100 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -114,7 +114,6 @@ struct drm_i915_gem_object { unsigned int userfault_count; struct list_head userfault_link; - struct list_head batch_pool_link; I915_SELFTEST_DECLARE(struct list_head st_link); /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index b5561cbdc5ea..72922703af49 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -34,10 +34,8 @@ static void i915_gem_park(struct drm_i915_private *i915) lockdep_assert_held(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) { + for_each_engine(engine, i915, id) call_idle_barriers(engine); /* cleanup after wedging */ - i915_gem_batch_pool_fini(&engine->batch_pool); - } intel_timelines_park(i915); i915_vma_parked(i915); diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index db5c73ce86ee..a4db4dd22b4f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -9,7 +9,6 @@ #include #include -#include "i915_gem_batch_pool.h" #include "i915_pmu.h" #include "i915_reg.h" #include "i915_request.h" diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 8bd9a9adf4a5..a91f15717cc1 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -32,6 +32,7 @@ #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_engine_pool.h" #include "intel_context.h" #include "intel_lrc.h" #include "intel_reset.h" @@ -492,11 +493,6 @@ cleanup: return err; } -static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) -{ - i915_gem_batch_pool_init(&engine->batch_pool, engine); -} - void intel_engine_init_execlists(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -622,10 +618,11 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); intel_engine_init_hangcheck(engine); - intel_engine_init_batch_pool(engine); intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); + intel_engine_pool_init(&engine->pool); + /* Use the whole device by default */ engine->sseu = intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); @@ -869,9 +866,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) cleanup_status_page(engine); + intel_engine_pool_fini(&engine->pool); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); - i915_gem_batch_pool_fini(&engine->batch_pool); if (engine->default_state) i915_gem_object_put(engine->default_state); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index ce54092475da..0336204988d6 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -8,6 +8,7 @@ #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_engine_pool.h" #include "intel_gt.h" #include "intel_gt_pm.h" @@ -116,6 +117,7 @@ static int __engine_park(struct intel_wakeref *wf) GEM_TRACE("%s\n", engine->name); intel_engine_disarm_breadcrumbs(engine); + intel_engine_pool_park(&engine->pool); /* Must be reset upon idling, or we may miss the busy wakeup. */ GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c new file mode 100644 index 000000000000..03d90b49584a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -0,0 +1,177 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#include "gem/i915_gem_object.h" + +#include "i915_drv.h" +#include "intel_engine_pm.h" +#include "intel_engine_pool.h" + +static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool) +{ + return container_of(pool, struct intel_engine_cs, pool); +} + +static struct list_head * +bucket_for_size(struct intel_engine_pool *pool, size_t sz) +{ + int n; + + /* + * Compute a power-of-two bucket, but throw everything greater than + * 16KiB into the same bucket: i.e. the buckets hold objects of + * (1 page, 2 pages, 4 pages, 8+ pages). + */ + n = fls(sz >> PAGE_SHIFT) - 1; + if (n >= ARRAY_SIZE(pool->cache_list)) + n = ARRAY_SIZE(pool->cache_list) - 1; + + return &pool->cache_list[n]; +} + +static void node_free(struct intel_engine_pool_node *node) +{ + i915_gem_object_put(node->obj); + i915_active_fini(&node->active); + kfree(node); +} + +static int pool_active(struct i915_active *ref) +{ + struct intel_engine_pool_node *node = + container_of(ref, typeof(*node), active); + struct reservation_object *resv = node->obj->base.resv; + int err; + + if (reservation_object_trylock(resv)) { + reservation_object_add_excl_fence(resv, NULL); + reservation_object_unlock(resv); + } + + err = i915_gem_object_pin_pages(node->obj); + if (err) + return err; + + /* Hide this pinned object from the shrinker until retired */ + i915_gem_object_make_unshrinkable(node->obj); + + return 0; +} + +static void pool_retire(struct i915_active *ref) +{ + struct intel_engine_pool_node *node = + container_of(ref, typeof(*node), active); + struct intel_engine_pool *pool = node->pool; + struct list_head *list = bucket_for_size(pool, node->obj->base.size); + unsigned long flags; + + GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool))); + + i915_gem_object_unpin_pages(node->obj); + + /* Return this object to the shrinker pool */ + i915_gem_object_make_purgeable(node->obj); + + spin_lock_irqsave(&pool->lock, flags); + list_add(&node->link, list); + spin_unlock_irqrestore(&pool->lock, flags); +} + +static struct intel_engine_pool_node * +node_create(struct intel_engine_pool *pool, size_t sz) +{ + struct intel_engine_cs *engine = to_engine(pool); + struct intel_engine_pool_node *node; + struct drm_i915_gem_object *obj; + + node = kmalloc(sizeof(*node), + GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); + if (!node) + return ERR_PTR(-ENOMEM); + + node->pool = pool; + i915_active_init(engine->i915, &node->active, pool_active, pool_retire); + + obj = i915_gem_object_create_internal(engine->i915, sz); + if (IS_ERR(obj)) { + i915_active_fini(&node->active); + kfree(node); + return ERR_CAST(obj); + } + + node->obj = obj; + return node; +} + +struct intel_engine_pool_node * +intel_engine_pool_get(struct intel_engine_pool *pool, size_t size) +{ + struct intel_engine_pool_node *node; + struct list_head *list; + unsigned long flags; + int ret; + + GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool))); + + size = PAGE_ALIGN(size); + list = bucket_for_size(pool, size); + + spin_lock_irqsave(&pool->lock, flags); + list_for_each_entry(node, list, link) { + if (node->obj->base.size < size) + continue; + list_del(&node->link); + break; + } + spin_unlock_irqrestore(&pool->lock, flags); + + if (&node->link == list) { + node = node_create(pool, size); + if (IS_ERR(node)) + return node; + } + + ret = i915_active_acquire(&node->active); + if (ret) { + node_free(node); + return ERR_PTR(ret); + } + + return node; +} + +void intel_engine_pool_init(struct intel_engine_pool *pool) +{ + int n; + + spin_lock_init(&pool->lock); + for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) + INIT_LIST_HEAD(&pool->cache_list[n]); +} + +void intel_engine_pool_park(struct intel_engine_pool *pool) +{ + int n; + + for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { + struct list_head *list = &pool->cache_list[n]; + struct intel_engine_pool_node *node, *nn; + + list_for_each_entry_safe(node, nn, list, link) + node_free(node); + + INIT_LIST_HEAD(list); + } +} + +void intel_engine_pool_fini(struct intel_engine_pool *pool) +{ + int n; + + for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) + GEM_BUG_ON(!list_empty(&pool->cache_list[n])); +} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h new file mode 100644 index 000000000000..f7a0a660c1c9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef INTEL_ENGINE_POOL_H +#define INTEL_ENGINE_POOL_H + +#include "intel_engine_pool_types.h" +#include "i915_active.h" +#include "i915_request.h" + +struct intel_engine_pool_node * +intel_engine_pool_get(struct intel_engine_pool *pool, size_t size); + +static inline int +intel_engine_pool_mark_active(struct intel_engine_pool_node *node, + struct i915_request *rq) +{ + return i915_active_ref(&node->active, rq->fence.context, rq); +} + +static inline void +intel_engine_pool_put(struct intel_engine_pool_node *node) +{ + i915_active_release(&node->active); +} + +void intel_engine_pool_init(struct intel_engine_pool *pool); +void intel_engine_pool_park(struct intel_engine_pool *pool); +void intel_engine_pool_fini(struct intel_engine_pool *pool); + +#endif /* INTEL_ENGINE_POOL_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h new file mode 100644 index 000000000000..e31ee361b76f --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef INTEL_ENGINE_POOL_TYPES_H +#define INTEL_ENGINE_POOL_TYPES_H + +#include +#include + +#include "i915_active_types.h" + +struct drm_i915_gem_object; + +struct intel_engine_pool { + spinlock_t lock; + struct list_head cache_list[4]; +}; + +struct intel_engine_pool_node { + struct i915_active active; + struct drm_i915_gem_object *obj; + struct list_head link; + struct intel_engine_pool *pool; +}; + +#endif /* INTEL_ENGINE_POOL_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index da61dd329210..2e273e8f5064 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -16,12 +16,12 @@ #include #include "i915_gem.h" -#include "i915_gem_batch_pool.h" #include "i915_pmu.h" #include "i915_priolist_types.h" #include "i915_selftest.h" -#include "gt/intel_timeline_types.h" +#include "intel_engine_pool_types.h" #include "intel_sseu.h" +#include "intel_timeline_types.h" #include "intel_wakeref.h" #include "intel_workarounds_types.h" @@ -354,7 +354,7 @@ struct intel_engine_cs { * when the command parser is enabled. Prevents the client from * modifying the batch contents after software parsing. */ - struct i915_gem_batch_pool batch_pool; + struct intel_engine_pool pool; struct intel_hw_status_page status_page; struct i915_ctx_workarounds wa_ctx; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 10cb312462e5..8a5f07935b84 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -27,6 +27,7 @@ #include "i915_drv.h" #include "intel_context.h" #include "intel_engine_pm.h" +#include "intel_engine_pool.h" #include "mock_engine.h" #include "selftests/mock_request.h" @@ -292,6 +293,7 @@ int mock_engine_init(struct intel_engine_cs *engine) intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); intel_engine_init__pm(engine); + intel_engine_pool_init(&engine->pool); engine->kernel_context = i915_gem_context_get_engine(i915->kernel_context, engine->id); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 19f156a7f501..8953336f2ae5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -295,27 +295,6 @@ static int per_file_stats(int id, void *ptr, void *data) stats.closed); \ } while (0) -static void print_batch_pool_stats(struct seq_file *m, - struct drm_i915_private *dev_priv) -{ - struct drm_i915_gem_object *obj; - struct intel_engine_cs *engine; - struct file_stats stats = {}; - enum intel_engine_id id; - int j; - - for_each_engine(engine, dev_priv, id) { - for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { - list_for_each_entry(obj, - &engine->batch_pool.cache_list[j], - batch_pool_link) - per_file_stats(0, obj, &stats); - } - } - - print_file_stats(m, "[k]batch pool", stats); -} - static void print_context_stats(struct seq_file *m, struct drm_i915_private *i915) { @@ -374,58 +353,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) if (ret) return ret; - print_batch_pool_stats(m, i915); print_context_stats(m, i915); mutex_unlock(&i915->drm.struct_mutex); return 0; } -static int i915_gem_batch_pool_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct drm_i915_gem_object *obj; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int total = 0; - int ret, j; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - for_each_engine(engine, dev_priv, id) { - for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { - int count; - - count = 0; - list_for_each_entry(obj, - &engine->batch_pool.cache_list[j], - batch_pool_link) - count++; - seq_printf(m, "%s cache[%d]: %d objects\n", - engine->name, j, count); - - list_for_each_entry(obj, - &engine->batch_pool.cache_list[j], - batch_pool_link) { - seq_puts(m, " "); - describe_obj(m, obj); - seq_putc(m, '\n'); - } - - total += count; - } - } - - seq_printf(m, "total: %d\n", total); - - mutex_unlock(&dev->struct_mutex); - - return 0; -} - static void gen8_display_interrupt_info(struct seq_file *m) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -4385,7 +4318,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_gem_objects", i915_gem_object_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, {"i915_guc_info", i915_guc_info, 0}, {"i915_guc_load_status", i915_guc_load_status_info, 0}, {"i915_guc_log_dump", i915_guc_log_dump, 0}, diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c deleted file mode 100644 index b17f23991253..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c +++ /dev/null @@ -1,132 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2014-2018 Intel Corporation - */ - -#include "i915_gem_batch_pool.h" -#include "i915_drv.h" - -/** - * DOC: batch pool - * - * In order to submit batch buffers as 'secure', the software command parser - * must ensure that a batch buffer cannot be modified after parsing. It does - * this by copying the user provided batch buffer contents to a kernel owned - * buffer from which the hardware will actually execute, and by carefully - * managing the address space bindings for such buffers. - * - * The batch pool framework provides a mechanism for the driver to manage a - * set of scratch buffers to use for this purpose. The framework can be - * extended to support other uses cases should they arise. - */ - -/** - * i915_gem_batch_pool_init() - initialize a batch buffer pool - * @pool: the batch buffer pool - * @engine: the associated request submission engine - */ -void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool, - struct intel_engine_cs *engine) -{ - int n; - - pool->engine = engine; - - for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) - INIT_LIST_HEAD(&pool->cache_list[n]); -} - -/** - * i915_gem_batch_pool_fini() - clean up a batch buffer pool - * @pool: the pool to clean up - * - * Note: Callers must hold the struct_mutex. - */ -void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) -{ - int n; - - lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); - - for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { - struct drm_i915_gem_object *obj, *next; - - list_for_each_entry_safe(obj, next, - &pool->cache_list[n], - batch_pool_link) - i915_gem_object_put(obj); - - INIT_LIST_HEAD(&pool->cache_list[n]); - } -} - -/** - * i915_gem_batch_pool_get() - allocate a buffer from the pool - * @pool: the batch buffer pool - * @size: the minimum desired size of the returned buffer - * - * Returns an inactive buffer from @pool with at least @size bytes, - * with the pages pinned. The caller must i915_gem_object_unpin_pages() - * on the returned object. - * - * Note: Callers must hold the struct_mutex - * - * Return: the buffer object or an error pointer - */ -struct drm_i915_gem_object * -i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, - size_t size) -{ - struct drm_i915_gem_object *obj; - struct list_head *list; - int n, ret; - - lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); - - /* Compute a power-of-two bucket, but throw everything greater than - * 16KiB into the same bucket: i.e. the the buckets hold objects of - * (1 page, 2 pages, 4 pages, 8+ pages). - */ - n = fls(size >> PAGE_SHIFT) - 1; - if (n >= ARRAY_SIZE(pool->cache_list)) - n = ARRAY_SIZE(pool->cache_list) - 1; - list = &pool->cache_list[n]; - - list_for_each_entry(obj, list, batch_pool_link) { - struct reservation_object *resv = obj->base.resv; - - /* The batches are strictly LRU ordered */ - if (!reservation_object_test_signaled_rcu(resv, true)) - break; - - /* - * The object is now idle, clear the array of shared - * fences before we add a new request. Although, we - * remain on the same engine, we may be on a different - * timeline and so may continually grow the array, - * trapping a reference to all the old fences, rather - * than replace the existing fence. - */ - if (rcu_access_pointer(resv->fence)) { - reservation_object_lock(resv, NULL); - reservation_object_add_excl_fence(resv, NULL); - reservation_object_unlock(resv); - } - - if (obj->base.size >= size) - goto found; - } - - obj = i915_gem_object_create_internal(pool->engine->i915, size); - if (IS_ERR(obj)) - return obj; - -found: - ret = i915_gem_object_pin_pages(obj); - if (ret) - return ERR_PTR(ret); - - list_move_tail(&obj->batch_pool_link, list); - return obj; -} diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h deleted file mode 100644 index feeeeeaa54d8..000000000000 --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * SPDX-License-Identifier: MIT - * - * Copyright © 2014-2018 Intel Corporation - */ - -#ifndef I915_GEM_BATCH_POOL_H -#define I915_GEM_BATCH_POOL_H - -#include - -struct drm_i915_gem_object; -struct intel_engine_cs; - -struct i915_gem_batch_pool { - struct intel_engine_cs *engine; - struct list_head cache_list[4]; -}; - -void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool, - struct intel_engine_cs *engine); -void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); -struct drm_i915_gem_object * -i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size); - -#endif /* I915_GEM_BATCH_POOL_H */ -- cgit v1.2.3 From ce52ad5dd52cfaf3398058384e0ff94134bbd89c Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Fri, 12 Jul 2019 11:19:38 +0300 Subject: drm/i915: Fix wrong escape clock divisor init for GLK According to Bspec clock divisor registers in GeminiLake should be initialized by shifting 1(<<) to amount of correspondent divisor. While i915 was writing all this time that value as is. Surprisingly that it by accident worked, until we met some issues with Microtech Etab. v2: Added Fixes tag and cc v3: Added stable to cc as well. Signed-off-by: Stanislav Lisovskiy Reviewed-by: Vandita Kulkarni Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108826 Fixes: bcc657004841 ("drm/i915/glk: Program txesc clock divider for GLK") Cc: Deepak M Cc: Madhav Chauhan Cc: Jani Nikula Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: intel-gfx@lists.freedesktop.org Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190712081938.14185-1-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/vlv_dsi_pll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 99cc3e2e9c2c..f016a776a39e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -396,8 +396,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, else txesc2_div = 10; - I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK); - I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ -- cgit v1.2.3 From 0584674d7fe407c2db91a009294d5ec19f7cb1d5 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 4 Jul 2019 10:45:34 +0000 Subject: drm/i915: fix possible memory leak in intel_hdcp_auth_downstream() 'ksv_fifo' is malloced in intel_hdcp_auth_downstream() and should be freed before leaving from the error handling cases, otherwise it will cause memory leak. Fixes: f26ae6a652f2 ("drm/i915: SRM revocation check for HDCP1.4 and 2.2") Signed-off-by: Wei Yongjun Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190704104534.12508-1-weiyongjun1@huawei.com (cherry picked from commit de70fdd7d24cd07e51fbec420f8704d956a47949) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_hdcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index bc3a94d491c4..27bd7276a82d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -536,7 +536,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) { DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n"); - return -EPERM; + ret = -EPERM; + goto err; } /* -- cgit v1.2.3 From 73a0ff0b30af79bf0303d557eb82f1d1945bb6ee Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Fri, 12 Jul 2019 11:19:38 +0300 Subject: drm/i915: Fix wrong escape clock divisor init for GLK According to Bspec clock divisor registers in GeminiLake should be initialized by shifting 1(<<) to amount of correspondent divisor. While i915 was writing all this time that value as is. Surprisingly that it by accident worked, until we met some issues with Microtech Etab. v2: Added Fixes tag and cc v3: Added stable to cc as well. Signed-off-by: Stanislav Lisovskiy Reviewed-by: Vandita Kulkarni Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108826 Fixes: bcc657004841 ("drm/i915/glk: Program txesc clock divider for GLK") Cc: Deepak M Cc: Madhav Chauhan Cc: Jani Nikula Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Rodrigo Vivi Cc: intel-gfx@lists.freedesktop.org Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190712081938.14185-1-stanislav.lisovskiy@intel.com (cherry picked from commit ce52ad5dd52cfaf3398058384e0ff94134bbd89c) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/vlv_dsi_pll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 99cc3e2e9c2c..f016a776a39e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -396,8 +396,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, else txesc2_div = 10; - I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK); - I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); + I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ -- cgit v1.2.3 From f7ccbed656f78212593ca965d9a8f34bf24e0aab Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Fri, 2 Aug 2019 11:46:16 -0700 Subject: drm/rockchip: Suspend DP late In commit fe64ba5c6323 ("drm/rockchip: Resume DP early") we moved resume to be early but left suspend at its normal time. This seems like it could be OK, but casues problems if a suspend gets interrupted partway through. The OS only balances matching suspend/resume levels. ...so if suspend was called then resume will be called. If suspend late was called then resume early will be called. ...but if suspend was called resume early might not get called. This leads to an unbalance in the clock enables / disables. Lets take the simple fix and just move suspend to be late to match. This makes the PM core take proper care in keeping things balanced. Fixes: fe64ba5c6323 ("drm/rockchip: Resume DP early") Signed-off-by: Douglas Anderson Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190802184616.44822-1-dianders@chromium.org --- drivers/gpu/drm/rockchip/analogix_dp-rockchip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c index 95e5c517a15f..9aae3d8e99ef 100644 --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -432,7 +432,7 @@ static int rockchip_dp_resume(struct device *dev) static const struct dev_pm_ops rockchip_dp_pm_ops = { #ifdef CONFIG_PM_SLEEP - .suspend = rockchip_dp_suspend, + .suspend_late = rockchip_dp_suspend, .resume_early = rockchip_dp_resume, #endif }; -- cgit v1.2.3 From c8be1a5fc5c63b78f86f3c15fd700163ecfed57f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sun, 4 Aug 2019 19:50:49 +0000 Subject: drm/i915/guc: Prefer intel_guc_is_submission_supported No need to use intel_uc_supports_guc_submission(uc) as we can directly use intel_guc_is_submission_supported(guc) Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190804195052.31140-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 0ee8139885a5..f9c33062249f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -145,7 +145,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc) { u32 flags = 0; - if (!intel_uc_supports_guc_submission(&guc_to_gt(guc)->uc)) + if (!intel_guc_is_submission_supported(guc)) flags |= GUC_CTL_DISABLE_SCHEDULER; return flags; @@ -155,7 +155,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc) { u32 flags = 0; - if (intel_uc_supports_guc_submission(&guc_to_gt(guc)->uc)) { + if (intel_guc_is_submission_supported(guc)) { u32 ctxnum, base; base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool); @@ -291,7 +291,7 @@ int intel_guc_init(struct intel_guc *guc) if (ret) goto err_ads; - if (intel_uc_supports_guc_submission(>->uc)) { + if (intel_guc_is_submission_supported(guc)) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -330,7 +330,7 @@ void intel_guc_fini(struct intel_guc *guc) i915_ggtt_disable_guc(gt->ggtt); - if (intel_uc_supports_guc_submission(>->uc)) + if (intel_guc_is_submission_supported(guc)) intel_guc_submission_fini(guc); intel_guc_ct_fini(&guc->ct); -- cgit v1.2.3 From 2afc794c3ea026edcf4134b683e5c478073e8781 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sun, 4 Aug 2019 19:50:50 +0000 Subject: drm/i915/huc: Prefer intel_huc_is_supported No need to dance with intel_uc_supports_huc(uc) as we can directly use intel_huc_is_supported(huc) Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190804195052.31140-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index ef54053c5ef9..66b2d5fdb317 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -187,7 +187,7 @@ int intel_huc_check_status(struct intel_huc *huc) intel_wakeref_t wakeref; u32 status = 0; - if (!intel_uc_supports_huc(>->uc)) + if (!intel_huc_is_supported(huc)) return -ENODEV; with_intel_runtime_pm(>->i915->runtime_pm, wakeref) -- cgit v1.2.3 From 34f0a20fa4ea8ad08b2d1fd43fa6e2febb9a4eff Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sun, 4 Aug 2019 19:50:51 +0000 Subject: drm/i915/uc: Remove redundant GuC support checks Since commit db81bc6eb9c0 ("drm/i915/uc: Consider enable_guc modparam during fw selection") we have started using firmware status as main indicator of the GuC support. No need to use same checks twice. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190804195052.31140-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index fe526698eee2..1ddd252419ec 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -306,9 +306,6 @@ int intel_uc_init(struct intel_uc *uc) if (!intel_uc_supports_guc(uc)) return 0; - if (!intel_uc_fw_supported(&guc->fw)) - return -ENODEV; - /* XXX: GuC submission is unavailable for now */ GEM_BUG_ON(intel_uc_supports_guc_submission(uc)); @@ -336,8 +333,6 @@ void intel_uc_fini(struct intel_uc *uc) if (!intel_uc_supports_guc(uc)) return; - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - if (intel_uc_supports_huc(uc)) intel_huc_fini(&uc->huc); @@ -351,7 +346,7 @@ static int __uc_sanitize(struct intel_uc *uc) struct intel_guc *guc = &uc->guc; struct intel_huc *huc = &uc->huc; - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); + GEM_BUG_ON(!intel_uc_supports_guc(uc)); intel_huc_sanitize(huc); intel_guc_sanitize(guc); @@ -429,8 +424,6 @@ int intel_uc_init_hw(struct intel_uc *uc) if (!intel_uc_supports_guc(uc)) return 0; - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - ret = uc_init_wopcm(uc); if (ret) goto err_out; @@ -530,8 +523,6 @@ void intel_uc_fini_hw(struct intel_uc *uc) if (!intel_guc_is_running(guc)) return; - GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw)); - if (intel_uc_supports_guc_submission(uc)) intel_guc_submission_disable(guc); -- cgit v1.2.3 From d9a910f5ed72c11773a11cff84e0b957d3634d5f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Sun, 4 Aug 2019 19:50:52 +0000 Subject: drm/i915/uc: Don't fail on HuC early init errors Since commit 301efe96f777 ("drm/i915/uc: Don't fail on HuC firmware failure") we can continue driver load after error during HuC firmware load or authentication, but we could still fail on any error during early HuC initialization. Change that by ignoring HuC related errors until hardware initialization phase where we can decide about next steps. Signed-off-by: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190804195052.31140-5-michal.wajdeczko@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_huc.c | 7 ++++++- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 8 ++++---- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index 66b2d5fdb317..faaf8ad5ba89 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -52,6 +52,11 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) struct i915_vma *vma; size_t copied; void *vaddr; + int err; + + err = i915_inject_load_error(gt->i915, -ENXIO); + if (err) + return err; /* * HuC firmware will sit above GUC_GGTT_TOP and will not map @@ -115,8 +120,8 @@ out_fini: void intel_huc_fini(struct intel_huc *huc) { - intel_uc_fw_fini(&huc->fw); intel_huc_rsa_data_destroy(huc); + intel_uc_fw_fini(&huc->fw); } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 1ddd252419ec..e87b7904ab7a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -316,14 +316,14 @@ int intel_uc_init(struct intel_uc *uc) if (intel_uc_supports_huc(uc)) { ret = intel_huc_init(huc); if (ret) - goto err_guc; + goto out_huc; } return 0; -err_guc: - intel_guc_fini(guc); - return ret; +out_huc: + intel_uc_fw_cleanup_fetch(&huc->fw); + return 0; } void intel_uc_fini(struct intel_uc *uc) -- cgit v1.2.3 From 6c86e5ff2617efaca3db745132de0f004a7494e7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 3 Aug 2019 11:22:21 +0100 Subject: drm/i915/gt: Remove stale kerneldoc for internal MOCS functions The kerneldoc were stale, generating mismatching parameters warning, but furthermore they were for internal routines, not part of the MOCS interface so the instructions were superfluous. Signed-off-by: Chris Wilson Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190803102221.21344-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_mocs.c | 57 ++---------------------------------- 1 file changed, 2 insertions(+), 55 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 77ddb307346a..728704bbbe18 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -287,18 +287,6 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = { GEN11_MOCS_ENTRIES }; -/** - * get_mocs_settings() - * @gt: gt device - * @table: Output table that will be made to point at appropriate - * MOCS values for the device. - * - * This function will return the values of the MOCS table that needs to - * be programmed for the platform. It will return the values that need - * to be programmed and if they need to be programmed. - * - * Return: true if there are applicable MOCS settings for the device. - */ static bool get_mocs_settings(struct intel_gt *gt, struct drm_i915_mocs_table *table) { @@ -420,12 +408,6 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine) unused_value); } -/** - * intel_mocs_init_global() - program the global mocs registers - * gt: pointer to struct intel_gt - * - * This function initializes the MOCS global registers. - */ static void intel_mocs_init_global(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; @@ -456,16 +438,6 @@ static void intel_mocs_init_global(struct intel_gt *gt) table.table[0].control_value); } -/** - * emit_mocs_control_table() - emit the mocs control table - * @rq: Request to set up the MOCS table for. - * @table: The values to program into the control regs. - * - * This function simply emits a MI_LOAD_REGISTER_IMM command for the - * given table starting at the given address. - * - * Return: 0 on success, otherwise the error status. - */ static int emit_mocs_control_table(struct i915_request *rq, const struct drm_i915_mocs_table *table) { @@ -525,17 +497,6 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, return low | high << 16; } -/** - * emit_mocs_l3cc_table() - emit the mocs control table - * @rq: Request to set up the MOCS table for. - * @table: The values to program into the control regs. - * - * This function simply emits a MI_LOAD_REGISTER_IMM command for the - * given table starting at the given address. This register set is - * programmed in pairs. - * - * Return: 0 on success, otherwise the error status. - */ static int emit_mocs_l3cc_table(struct i915_request *rq, const struct drm_i915_mocs_table *table) { @@ -584,20 +545,6 @@ static int emit_mocs_l3cc_table(struct i915_request *rq, return 0; } -/** - * intel_mocs_init_l3cc_table() - program the mocs control table - * @gt: the intel_gt container - * - * This function simply programs the mocs registers for the given table - * starting at the given address. This register set is programmed in pairs. - * - * These registers may get programmed more than once, it is simpler to - * re-program 32 registers than maintain the state of when they were programmed. - * We are always reprogramming with the same values and this only on context - * start. - * - * Return: Nothing. - */ static void intel_mocs_init_l3cc_table(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; @@ -639,8 +586,8 @@ static void intel_mocs_init_l3cc_table(struct intel_gt *gt) } /** - * intel_rcs_context_init_mocs() - program the MOCS register. - * @rq: Request to set up the MOCS tables for. + * intel_mocs_emit() - program the MOCS register. + * @rq: Request to use to set up the MOCS tables. * * This function will emit a batch buffer with the values required for * programming the MOCS register values for all the currently supported -- cgit v1.2.3 From 361f9dc2436812cbcf727fe73d0a4c11ca895fbb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Aug 2019 08:42:19 +0100 Subject: drm/i915: Use drm_i915_private directly from drv_get_drvdata() As we store a pointer to i915 in the drvdata field (as the pointer is both an alias to the drm_device and drm_i915_private), we can use the stored pointer directly as the i915 device. v2: Store and use i915 inside drv_get_drvdata() v3: Only expect i915 inside drv_get_drvdata() so drop the assumed i915/drm equivalence. Signed-off-by: Chris Wilson Cc: Andi Shyti Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20190806074219.11043-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_drv.c | 153 ++++++++++++------------- drivers/gpu/drm/i915/i915_drv.h | 9 +- drivers/gpu/drm/i915/i915_pci.c | 14 +-- drivers/gpu/drm/i915/selftests/i915_selftest.c | 2 +- 4 files changed, 91 insertions(+), 87 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b9c6ae09d61f..14ca719dcf30 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -641,39 +641,45 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -static int i915_resume_switcheroo(struct drm_device *dev); -static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); +static int i915_resume_switcheroo(struct drm_i915_private *i915); +static int i915_suspend_switcheroo(struct drm_i915_private *i915, + pm_message_t state); static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_i915_private *i915 = pdev_to_i915(pdev); pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (!i915) { + dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n"); + return; + } + if (state == VGA_SWITCHEROO_ON) { pr_info("switched on\n"); - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; + i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(pdev, PCI_D0); - i915_resume_switcheroo(dev); - dev->switch_power_state = DRM_SWITCH_POWER_ON; + i915_resume_switcheroo(i915); + i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; } else { pr_info("switched off\n"); - dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend_switcheroo(dev, pmm); - dev->switch_power_state = DRM_SWITCH_POWER_OFF; + i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; + i915_suspend_switcheroo(i915, pmm); + i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; } } static bool i915_switcheroo_can_switch(struct pci_dev *pdev) { - struct drm_device *dev = pci_get_drvdata(pdev); + struct drm_i915_private *i915 = pdev_to_i915(pdev); /* * FIXME: open_count is protected by drm_global_mutex but that would lead to * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return dev->open_count == 0; + return i915 && i915->drm.open_count == 0; } static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { @@ -1840,9 +1846,10 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) return ERR_PTR(err); } - i915->drm.pdev = pdev; i915->drm.dev_private = i915; - pci_set_drvdata(pdev, &i915->drm); + + i915->drm.pdev = pdev; + pci_set_drvdata(pdev, i915); /* Setup the write-once "constant" device info */ device_info = mkwrite_device_info(i915); @@ -1942,51 +1949,50 @@ out_fini: return ret; } -void i915_driver_remove(struct drm_device *dev) +void i915_driver_remove(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; - disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + disable_rpm_wakeref_asserts(&i915->runtime_pm); - i915_driver_unregister(dev_priv); + i915_driver_unregister(i915); /* * After unregistering the device to prevent any new users, cancel * all in-flight requests so that we can quickly unbind the active * resources. */ - intel_gt_set_wedged(&dev_priv->gt); + intel_gt_set_wedged(&i915->gt); /* Flush any external code that still may be under the RCU lock */ synchronize_rcu(); - i915_gem_suspend(dev_priv); + i915_gem_suspend(i915); - drm_atomic_helper_shutdown(dev); + drm_atomic_helper_shutdown(&i915->drm); - intel_gvt_driver_remove(dev_priv); + intel_gvt_driver_remove(i915); - intel_modeset_driver_remove(dev); + intel_modeset_driver_remove(&i915->drm); - intel_bios_driver_remove(dev_priv); + intel_bios_driver_remove(i915); vga_switcheroo_unregister_client(pdev); vga_client_register(pdev, NULL, NULL, NULL); - intel_csr_ucode_fini(dev_priv); + intel_csr_ucode_fini(i915); /* Free error state after interrupts are fully disabled. */ - cancel_delayed_work_sync(&dev_priv->gt.hangcheck.work); - i915_reset_error_state(dev_priv); + cancel_delayed_work_sync(&i915->gt.hangcheck.work); + i915_reset_error_state(i915); - i915_gem_driver_remove(dev_priv); + i915_gem_driver_remove(i915); - intel_power_domains_driver_remove(dev_priv); + intel_power_domains_driver_remove(i915); - i915_driver_hw_remove(dev_priv); + i915_driver_hw_remove(i915); - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); + enable_rpm_wakeref_asserts(&i915->runtime_pm); } static void i915_driver_release(struct drm_device *dev) @@ -2209,28 +2215,23 @@ out: return ret; } -static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state) +static int +i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) { int error; - if (!dev) { - DRM_ERROR("dev: %p\n", dev); - DRM_ERROR("DRM not initialized, aborting suspend.\n"); - return -ENODEV; - } - if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && state.event != PM_EVENT_FREEZE)) return -EINVAL; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - error = i915_drm_suspend(dev); + error = i915_drm_suspend(&i915->drm); if (error) return error; - return i915_drm_suspend_late(dev, false); + return i915_drm_suspend_late(&i915->drm, false); } static int i915_drm_resume(struct drm_device *dev) @@ -2383,53 +2384,53 @@ static int i915_drm_resume_early(struct drm_device *dev) return ret; } -static int i915_resume_switcheroo(struct drm_device *dev) +static int i915_resume_switcheroo(struct drm_i915_private *i915) { int ret; - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - ret = i915_drm_resume_early(dev); + ret = i915_drm_resume_early(&i915->drm); if (ret) return ret; - return i915_drm_resume(dev); + return i915_drm_resume(&i915->drm); } static int i915_pm_prepare(struct device *kdev) { - struct drm_device *dev = dev_get_drvdata(kdev); + struct drm_i915_private *i915 = kdev_to_i915(kdev); - if (!dev) { + if (!i915) { dev_err(kdev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_prepare(dev); + return i915_drm_prepare(&i915->drm); } static int i915_pm_suspend(struct device *kdev) { - struct drm_device *dev = dev_get_drvdata(kdev); + struct drm_i915_private *i915 = kdev_to_i915(kdev); - if (!dev) { + if (!i915) { dev_err(kdev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_suspend(dev); + return i915_drm_suspend(&i915->drm); } static int i915_pm_suspend_late(struct device *kdev) { - struct drm_device *dev = &kdev_to_i915(kdev)->drm; + struct drm_i915_private *i915 = kdev_to_i915(kdev); /* * We have a suspend ordering issue with the snd-hda driver also @@ -2440,55 +2441,55 @@ static int i915_pm_suspend_late(struct device *kdev) * FIXME: This should be solved with a special hdmi sink device or * similar so that power domains can be employed. */ - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_suspend_late(dev, false); + return i915_drm_suspend_late(&i915->drm, false); } static int i915_pm_poweroff_late(struct device *kdev) { - struct drm_device *dev = &kdev_to_i915(kdev)->drm; + struct drm_i915_private *i915 = kdev_to_i915(kdev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_suspend_late(dev, true); + return i915_drm_suspend_late(&i915->drm, true); } static int i915_pm_resume_early(struct device *kdev) { - struct drm_device *dev = &kdev_to_i915(kdev)->drm; + struct drm_i915_private *i915 = kdev_to_i915(kdev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_resume_early(dev); + return i915_drm_resume_early(&i915->drm); } static int i915_pm_resume(struct device *kdev) { - struct drm_device *dev = &kdev_to_i915(kdev)->drm; + struct drm_i915_private *i915 = kdev_to_i915(kdev); - if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - return i915_drm_resume(dev); + return i915_drm_resume(&i915->drm); } /* freeze: before creating the hibernation_image */ static int i915_pm_freeze(struct device *kdev) { - struct drm_device *dev = &kdev_to_i915(kdev)->drm; + struct drm_i915_private *i915 = kdev_to_i915(kdev); int ret; - if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { - ret = i915_drm_suspend(dev); + if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { + ret = i915_drm_suspend(&i915->drm); if (ret) return ret; } - ret = i915_gem_freeze(kdev_to_i915(kdev)); + ret = i915_gem_freeze(i915); if (ret) return ret; @@ -2497,16 +2498,16 @@ static int i915_pm_freeze(struct device *kdev) static int i915_pm_freeze_late(struct device *kdev) { - struct drm_device *dev = &kdev_to_i915(kdev)->drm; + struct drm_i915_private *i915 = kdev_to_i915(kdev); int ret; - if (dev->switch_power_state != DRM_SWITCH_POWER_OFF) { - ret = i915_drm_suspend_late(dev, true); + if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) { + ret = i915_drm_suspend_late(&i915->drm, true); if (ret) return ret; } - ret = i915_gem_freeze_late(kdev_to_i915(kdev)); + ret = i915_gem_freeze_late(i915); if (ret) return ret; @@ -2908,8 +2909,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, static int intel_runtime_suspend(struct device *kdev) { - struct drm_device *dev = dev_get_drvdata(kdev); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret; @@ -3005,8 +3005,7 @@ static int intel_runtime_suspend(struct device *kdev) static int intel_runtime_resume(struct device *kdev) { - struct drm_device *dev = dev_get_drvdata(kdev); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret = 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5f3e5c13fbaa..7f0937948935 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1897,7 +1897,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) { - return to_i915(dev_get_drvdata(kdev)); + return dev_get_drvdata(kdev); +} + +static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); } static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm) @@ -2376,7 +2381,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern const struct dev_pm_ops i915_pm_ops; int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); -void i915_driver_remove(struct drm_device *dev); +void i915_driver_remove(struct drm_i915_private *i915); void intel_engine_init_hangcheck(struct intel_engine_cs *engine); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 20e562ced8cc..1febda2a90e7 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -874,16 +874,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static void i915_pci_remove(struct pci_dev *pdev) { - struct drm_device *dev; + struct drm_i915_private *i915; - dev = pci_get_drvdata(pdev); - if (!dev) /* driver load aborted, nothing to cleanup */ + i915 = pci_get_drvdata(pdev); + if (!i915) /* driver load aborted, nothing to cleanup */ return; - i915_driver_remove(dev); - drm_dev_put(dev); - + i915_driver_remove(i915); pci_set_drvdata(pdev, NULL); + + drm_dev_put(&i915->drm); } /* is device_id present in comma separated list of ids */ @@ -959,7 +959,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) return err; - if (i915_inject_probe_failure(to_i915(pci_get_drvdata(pdev)))) { + if (i915_inject_probe_failure(pci_get_drvdata(pdev))) { i915_pci_remove(pdev); return -ENODEV; } diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index db9c645bbdfe..438ea0eaa416 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -185,7 +185,7 @@ int i915_live_selftests(struct pci_dev *pdev) if (!i915_selftest.live) return 0; - err = run_selftests(live, to_i915(pci_get_drvdata(pdev))); + err = run_selftests(live, pdev_to_i915(pdev)); if (err) { i915_selftest.live = err; return err; -- cgit v1.2.3 From c29579d2fabe7448a444681d8229384249d315f9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Aug 2019 13:42:59 +0100 Subject: drm/i915/gem: Make caps.scheduler static We do not notify userspace when the scheduler capabilities are changed (due to wedging the driver) and as such userspace will expect the caps to be static and unchanging. Make it so, and so we only need to compute our caps once during driver registration. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190806124300.24945-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 16 ++-------------- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_reset.c | 5 +---- drivers/gpu/drm/i915/i915_drv.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 6 ++++-- drivers/gpu/drm/i915/i915_gem.c | 13 +++++++++++-- drivers/gpu/drm/i915/i915_request.c | 2 -- 7 files changed, 22 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 5ab7df53c2a0..edd21d14e64f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -459,13 +459,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr return NOTIFY_DONE; } -/** - * i915_gem_shrinker_register - Register the i915 shrinker - * @i915: i915 device - * - * This function registers and sets up the i915 shrinker and OOM handler. - */ -void i915_gem_shrinker_register(struct drm_i915_private *i915) +void i915_gem_driver_register__shrinker(struct drm_i915_private *i915) { i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan; i915->mm.shrinker.count_objects = i915_gem_shrinker_count; @@ -480,13 +474,7 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915) WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier)); } -/** - * i915_gem_shrinker_unregister - Unregisters the i915 shrinker - * @i915: i915 device - * - * This function unregisters the i915 shrinker and OOM handler. - */ -void i915_gem_shrinker_unregister(struct drm_i915_private *i915) +void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) { WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier)); WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier)); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 01857c12f12f..50aa7e95124d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -382,7 +382,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, static void disable_retire_worker(struct drm_i915_private *i915) { - i915_gem_shrinker_unregister(i915); + i915_gem_driver_unregister__shrinker(i915); intel_gt_pm_get(&i915->gt); @@ -398,7 +398,7 @@ static void restore_retire_worker(struct drm_i915_private *i915) igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - i915_gem_shrinker_register(i915); + i915_gem_driver_register__shrinker(i915); } static void mmap_offset_lock(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 98c071fe532b..cdba6cd29327 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -757,11 +757,8 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) __intel_gt_reset(gt, ALL_ENGINES); - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt->i915, id) engine->submit_request = nop_submit_request; - engine->schedule = NULL; - } - gt->i915->caps.scheduler = 0; /* * Make sure no request can slip through without getting completed by diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 14ca719dcf30..ec61e8a7c818 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1719,7 +1719,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - i915_gem_shrinker_register(dev_priv); + i915_gem_driver_register(dev_priv); i915_pmu_register(dev_priv); /* @@ -1799,7 +1799,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_teardown_sysfs(dev_priv); drm_dev_unplug(&dev_priv->drm); - i915_gem_shrinker_unregister(dev_priv); + i915_gem_driver_unregister(dev_priv); } static void i915_welcome_messages(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7f0937948935..cb64dd9b6e64 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2488,6 +2488,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); +void i915_gem_driver_register(struct drm_i915_private *i915); +void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_remove(struct drm_i915_private *dev_priv); void i915_gem_driver_release(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, @@ -2588,8 +2590,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915, #define I915_SHRINK_WRITEBACK BIT(4) unsigned long i915_gem_shrink_all(struct drm_i915_private *i915); -void i915_gem_shrinker_register(struct drm_i915_private *i915); -void i915_gem_shrinker_unregister(struct drm_i915_private *i915); +void i915_gem_driver_register__shrinker(struct drm_i915_private *i915); +void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915); void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, struct mutex *mutex); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eb34f3e5a74d..5ab1ddfef23c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1249,8 +1249,6 @@ int i915_gem_init_hw(struct drm_i915_private *i915) intel_mocs_init(gt); - intel_engines_set_scheduler_caps(i915); - out: intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); return ret; @@ -1599,6 +1597,17 @@ err_unlock: return ret; } +void i915_gem_driver_register(struct drm_i915_private *i915) +{ + i915_gem_driver_register__shrinker(i915); + intel_engines_set_scheduler_caps(i915); +} + +void i915_gem_driver_unregister(struct drm_i915_private *i915) +{ + i915_gem_driver_unregister__shrinker(i915); +} + void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { GEM_BUG_ON(dev_priv->gt.awake); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 8ac7d14ec8c9..81094f250bdb 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1198,7 +1198,6 @@ struct i915_request *__i915_request_commit(struct i915_request *rq) */ local_bh_disable(); i915_sw_fence_commit(&rq->semaphore); - rcu_read_lock(); /* RCU serialisation for set-wedged protection */ if (engine->schedule) { struct i915_sched_attr attr = rq->gem_context->sched; @@ -1228,7 +1227,6 @@ struct i915_request *__i915_request_commit(struct i915_request *rq) engine->schedule(rq, &attr); } - rcu_read_unlock(); i915_sw_fence_commit(&rq->submit); local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ -- cgit v1.2.3 From 750e76b4f9f63c95bf4c283ccf8e2af0e258d3bb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 6 Aug 2019 13:43:00 +0100 Subject: drm/i915/gt: Move the [class][inst] lookup for engines onto the GT To maintain a fast lookup from a GT centric irq handler, we want the engine lookup tables on the intel_gt. To avoid having multiple copies of the same multi-dimension lookup table, move the generic user engine lookup into an rbtree (for fast and flexible indexing). v2: Split uabi_instance cf uabi_class v3: Set uabi_class/uabi_instance after collating all engines to provide a stable uabi across parallel unordered construction. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio #v2 Link: https://patchwork.freedesktop.org/patch/msgid/20190806124300.24945-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/gem/i915_gem_context.c | 3 +- drivers/gpu/drm/i915/gt/intel_engine.h | 6 - drivers/gpu/drm/i915/gt/intel_engine_cs.c | 109 ++--------- drivers/gpu/drm/i915/gt/intel_engine_types.h | 9 +- drivers/gpu/drm/i915/gt/intel_engine_user.c | 228 ++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_engine_user.h | 23 +++ drivers/gpu/drm/i915/gt/intel_gt_types.h | 4 + drivers/gpu/drm/i915/gt/selftest_lrc.c | 15 +- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 2 +- drivers/gpu/drm/i915/i915_cmd_parser.c | 3 +- drivers/gpu/drm/i915/i915_debugfs.c | 17 +- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 15 +- drivers/gpu/drm/i915/i915_gem.c | 22 +-- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/i915_perf.c | 3 +- drivers/gpu/drm/i915/i915_pmu.c | 8 +- drivers/gpu/drm/i915/i915_query.c | 5 +- drivers/gpu/drm/i915/i915_trace.h | 10 +- 21 files changed, 328 insertions(+), 161 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_user.c create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_user.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 53a55326106a..327fc448b2d5 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -72,9 +72,10 @@ obj-y += gt/ gt-y += \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ - gt/intel_engine_pool.o \ gt/intel_engine_cs.o \ + gt/intel_engine_pool.o \ gt/intel_engine_pm.o \ + gt/intel_engine_user.o \ gt/intel_gt.o \ gt/intel_gt_pm.o \ gt/intel_hangcheck.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 64f7a533e886..1c5bc21a80ff 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -70,6 +70,7 @@ #include #include "gt/intel_lrc_reg.h" +#include "gt/intel_engine_user.h" #include "i915_gem_context.h" #include "i915_globals.h" @@ -1729,7 +1730,7 @@ get_engines(struct i915_gem_context *ctx, if (e->engines[n]) { ci.engine_class = e->engines[n]->engine->uabi_class; - ci.engine_instance = e->engines[n]->engine->instance; + ci.engine_instance = e->engines[n]->engine->uabi_instance; } if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index a4db4dd22b4f..37c391cee441 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -122,8 +122,6 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) return "unknown"; } -void intel_engines_set_scheduler_caps(struct drm_i915_private *i915); - static inline unsigned int execlists_num_ports(const struct intel_engine_execlists * const execlists) { @@ -422,7 +420,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engines_are_idle(struct intel_gt *gt); void intel_engines_reset_default_submission(struct intel_gt *gt); -unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915); bool intel_engine_can_store_dword(struct intel_engine_cs *engine); @@ -431,9 +428,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...); -struct intel_engine_cs * -intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance); - static inline void intel_engine_context_in(struct intel_engine_cs *engine) { unsigned long flags; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index a91f15717cc1..d0befd6c023a 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -33,6 +33,7 @@ #include "intel_engine.h" #include "intel_engine_pm.h" #include "intel_engine_pool.h" +#include "intel_engine_user.h" #include "intel_context.h" #include "intel_lrc.h" #include "intel_reset.h" @@ -286,9 +287,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) intel_engine_set_hwsp_writemask(engine, ~0u); } -static int -intel_engine_setup(struct drm_i915_private *dev_priv, - enum intel_engine_id id) +static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) { const struct engine_info *info = &intel_engines[id]; struct intel_engine_cs *engine; @@ -304,10 +303,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv, if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) return -EINVAL; - if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance])) + if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance])) return -EINVAL; - GEM_BUG_ON(dev_priv->engine[id]); engine = kzalloc(sizeof(*engine), GFP_KERNEL); if (!engine) return -ENOMEM; @@ -316,12 +314,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv, engine->id = id; engine->mask = BIT(id); - engine->i915 = dev_priv; - engine->gt = &dev_priv->gt; - engine->uncore = &dev_priv->uncore; + engine->i915 = gt->i915; + engine->gt = gt; + engine->uncore = gt->uncore; __sprint_engine_name(engine->name, info); engine->hw_id = engine->guc_id = info->hw_id; - engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases); + engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases); engine->class = info->class; engine->instance = info->instance; @@ -331,14 +329,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv, */ engine->destroy = (typeof(engine->destroy))kfree; - engine->uabi_class = intel_engine_classes[info->class].uabi_class; - - engine->context_size = intel_engine_context_size(dev_priv, + engine->context_size = intel_engine_context_size(gt->i915, engine->class); if (WARN_ON(engine->context_size > BIT(20))) engine->context_size = 0; if (engine->context_size) - DRIVER_CAPS(dev_priv)->has_logical_contexts = true; + DRIVER_CAPS(gt->i915)->has_logical_contexts = true; /* Nothing to do here, execute in order of dependencies */ engine->schedule = NULL; @@ -350,8 +346,11 @@ intel_engine_setup(struct drm_i915_private *dev_priv, /* Scrub mmio state on takeover */ intel_engine_sanitize_mmio(engine); - dev_priv->engine_class[info->class][info->instance] = engine; - dev_priv->engine[id] = engine; + gt->engine_class[info->class][info->instance] = engine; + + intel_engine_add_user(engine); + gt->i915->engine[id] = engine; + return 0; } @@ -434,7 +433,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) if (!HAS_ENGINE(i915, i)) continue; - err = intel_engine_setup(i915, i); + err = intel_engine_setup(&i915->gt, i); if (err) goto cleanup; @@ -677,47 +676,6 @@ cleanup: return err; } -void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) -{ - static const struct { - u8 engine; - u8 sched; - } map[] = { -#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } - MAP(HAS_PREEMPTION, PREEMPTION), - MAP(HAS_SEMAPHORES, SEMAPHORES), - MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), -#undef MAP - }; - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 enabled, disabled; - - enabled = 0; - disabled = 0; - for_each_engine(engine, i915, id) { /* all engines must agree! */ - int i; - - if (engine->schedule) - enabled |= (I915_SCHEDULER_CAP_ENABLED | - I915_SCHEDULER_CAP_PRIORITY); - else - disabled |= (I915_SCHEDULER_CAP_ENABLED | - I915_SCHEDULER_CAP_PRIORITY); - - for (i = 0; i < ARRAY_SIZE(map); i++) { - if (engine->flags & BIT(map[i].engine)) - enabled |= BIT(map[i].sched); - else - disabled |= BIT(map[i].sched); - } - } - - i915->caps.scheduler = enabled & ~disabled; - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) - i915->caps.scheduler = 0; -} - struct measure_breadcrumb { struct i915_request rq; struct intel_timeline timeline; @@ -1187,20 +1145,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine) } } -unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - unsigned int which; - - which = 0; - for_each_engine(engine, i915, id) - if (engine->default_state) - which |= BIT(engine->uabi_class); - - return which; -} - static int print_sched_attr(struct drm_i915_private *i915, const struct i915_sched_attr *attr, char *buf, int x, int len) @@ -1498,29 +1442,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -static u8 user_class_map[] = { - [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS, - [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS, - [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS, - [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS, -}; - -struct intel_engine_cs * -intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) -{ - if (class >= ARRAY_SIZE(user_class_map)) - return NULL; - - class = user_class_map[class]; - - GEM_BUG_ON(class > MAX_ENGINE_CLASS); - - if (instance > MAX_ENGINE_INSTANCE) - return NULL; - - return i915->engine_class[class][instance]; -} - /** * intel_enable_engine_stats() - Enable engine busy tracking on engine * @engine: engine to enable stats collection diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 2e273e8f5064..dacaa707c797 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -267,15 +268,19 @@ struct intel_engine_cs { unsigned int guc_id; intel_engine_mask_t mask; - u8 uabi_class; - u8 class; u8 instance; + + u8 uabi_class; + u8 uabi_instance; + u32 context_size; u32 mmio_base; u32 uabi_capabilities; + struct rb_node uabi_node; + struct intel_sseu sseu; struct intel_ring *buffer; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c new file mode 100644 index 000000000000..68fda1ac3c60 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -0,0 +1,228 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include +#include +#include + +#include "i915_drv.h" +#include "intel_engine.h" +#include "intel_engine_user.h" + +struct intel_engine_cs * +intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) +{ + struct rb_node *p = i915->uabi_engines.rb_node; + + while (p) { + struct intel_engine_cs *it = + rb_entry(p, typeof(*it), uabi_node); + + if (class < it->uabi_class) + p = p->rb_left; + else if (class > it->uabi_class || + instance > it->uabi_instance) + p = p->rb_right; + else if (instance < it->uabi_instance) + p = p->rb_left; + else + return it; + } + + return NULL; +} + +void intel_engine_add_user(struct intel_engine_cs *engine) +{ + llist_add((struct llist_node *)&engine->uabi_node, + (struct llist_head *)&engine->i915->uabi_engines); +} + +static const u8 uabi_classes[] = { + [RENDER_CLASS] = I915_ENGINE_CLASS_RENDER, + [COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY, + [VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO, + [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, +}; + +static int engine_cmp(void *priv, struct list_head *A, struct list_head *B) +{ + const struct intel_engine_cs *a = + container_of((struct rb_node *)A, typeof(*a), uabi_node); + const struct intel_engine_cs *b = + container_of((struct rb_node *)B, typeof(*b), uabi_node); + + if (uabi_classes[a->class] < uabi_classes[b->class]) + return -1; + if (uabi_classes[a->class] > uabi_classes[b->class]) + return 1; + + if (a->instance < b->instance) + return -1; + if (a->instance > b->instance) + return 1; + + return 0; +} + +static struct llist_node *get_engines(struct drm_i915_private *i915) +{ + return llist_del_all((struct llist_head *)&i915->uabi_engines); +} + +static void sort_engines(struct drm_i915_private *i915, + struct list_head *engines) +{ + struct llist_node *pos, *next; + + llist_for_each_safe(pos, next, get_engines(i915)) { + struct intel_engine_cs *engine = + container_of((struct rb_node *)pos, typeof(*engine), + uabi_node); + list_add((struct list_head *)&engine->uabi_node, engines); + } + list_sort(NULL, engines, engine_cmp); +} + +static void set_scheduler_caps(struct drm_i915_private *i915) +{ + static const struct { + u8 engine; + u8 sched; + } map[] = { +#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) } + MAP(HAS_PREEMPTION, PREEMPTION), + MAP(HAS_SEMAPHORES, SEMAPHORES), + MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS), +#undef MAP + }; + struct intel_engine_cs *engine; + u32 enabled, disabled; + + enabled = 0; + disabled = 0; + for_each_uabi_engine(engine, i915) { /* all engines must agree! */ + int i; + + if (engine->schedule) + enabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + else + disabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + + for (i = 0; i < ARRAY_SIZE(map); i++) { + if (engine->flags & BIT(map[i].engine)) + enabled |= BIT(map[i].sched); + else + disabled |= BIT(map[i].sched); + } + } + + i915->caps.scheduler = enabled & ~disabled; + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) + i915->caps.scheduler = 0; +} + +void intel_engines_driver_register(struct drm_i915_private *i915) +{ + u8 uabi_instances[4] = {}; + struct list_head *it, *next; + struct rb_node **p, *prev; + LIST_HEAD(engines); + + sort_engines(i915, &engines); + + prev = NULL; + p = &i915->uabi_engines.rb_node; + list_for_each_safe(it, next, &engines) { + struct intel_engine_cs *engine = + container_of((struct rb_node *)it, typeof(*engine), + uabi_node); + + GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); + engine->uabi_class = uabi_classes[engine->class]; + + GEM_BUG_ON(engine->uabi_class >= ARRAY_SIZE(uabi_instances)); + engine->uabi_instance = uabi_instances[engine->uabi_class]++; + + rb_link_node(&engine->uabi_node, prev, p); + rb_insert_color(&engine->uabi_node, &i915->uabi_engines); + + GEM_BUG_ON(intel_engine_lookup_user(i915, + engine->uabi_class, + engine->uabi_instance) != engine); + + prev = &engine->uabi_node; + p = &prev->rb_right; + } + + if (IS_ENABLED(CONFIG_DRM_I915_SELFTESTS) && + IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { + struct intel_engine_cs *engine; + unsigned int isolation; + int class, inst; + int errors = 0; + + for (class = 0; class < ARRAY_SIZE(uabi_instances); class++) { + for (inst = 0; inst < uabi_instances[class]; inst++) { + engine = intel_engine_lookup_user(i915, + class, inst); + if (!engine) { + pr_err("UABI engine not found for { class:%d, instance:%d }\n", + class, inst); + errors++; + continue; + } + + if (engine->uabi_class != class || + engine->uabi_instance != inst) { + pr_err("Wrong UABI engine:%s { class:%d, instance:%d } found for { class:%d, instance:%d }\n", + engine->name, + engine->uabi_class, + engine->uabi_instance, + class, inst); + errors++; + continue; + } + } + } + + /* + * Make sure that classes with multiple engine instances all + * share the same basic configuration. + */ + isolation = intel_engines_has_context_isolation(i915); + for_each_uabi_engine(engine, i915) { + unsigned int bit = BIT(engine->uabi_class); + unsigned int expected = engine->default_state ? bit : 0; + + if ((isolation & bit) != expected) { + pr_err("mismatching default context state for class %d on engine %s\n", + engine->uabi_class, engine->name); + errors++; + } + } + + if (WARN(errors, "Invalid UABI engine mapping found")) + i915->uabi_engines = RB_ROOT; + } + + set_scheduler_caps(i915); +} + +unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + unsigned int which; + + which = 0; + for_each_uabi_engine(engine, i915) + if (engine->default_state) + which |= BIT(engine->uabi_class); + + return which; +} diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.h b/drivers/gpu/drm/i915/gt/intel_engine_user.h new file mode 100644 index 000000000000..9e5f113e5027 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.h @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_ENGINE_USER_H +#define INTEL_ENGINE_USER_H + +#include + +struct drm_i915_private; +struct intel_engine_cs; + +struct intel_engine_cs * +intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance); + +unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915); + +void intel_engine_add_user(struct intel_engine_cs *engine); +void intel_engines_driver_register(struct drm_i915_private *i915); + +#endif /* INTEL_ENGINE_USER_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 34d4a868e4f1..5fd11e361d03 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -21,6 +21,7 @@ struct drm_i915_private; struct i915_ggtt; +struct intel_engine_cs; struct intel_uncore; struct intel_hangcheck { @@ -76,6 +77,9 @@ struct intel_gt { u32 pm_ier; u32 pm_guc_events; + + struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] + [MAX_ENGINE_INSTANCE + 1]; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index b40b57d2daae..b26225751a54 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1792,6 +1792,7 @@ static int live_virtual_engine(void *arg) struct drm_i915_private *i915 = arg; struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; struct intel_engine_cs *engine; + struct intel_gt *gt = &i915->gt; enum intel_engine_id id; unsigned int class, inst; int err = -ENODEV; @@ -1815,10 +1816,10 @@ static int live_virtual_engine(void *arg) nsibling = 0; for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { - if (!i915->engine_class[class][inst]) + if (!gt->engine_class[class][inst]) continue; - siblings[nsibling++] = i915->engine_class[class][inst]; + siblings[nsibling++] = gt->engine_class[class][inst]; } if (nsibling < 2) continue; @@ -1939,6 +1940,7 @@ static int live_virtual_mask(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + struct intel_gt *gt = &i915->gt; unsigned int class, inst; int err = 0; @@ -1952,10 +1954,10 @@ static int live_virtual_mask(void *arg) nsibling = 0; for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { - if (!i915->engine_class[class][inst]) + if (!gt->engine_class[class][inst]) break; - siblings[nsibling++] = i915->engine_class[class][inst]; + siblings[nsibling++] = gt->engine_class[class][inst]; } if (nsibling < 2) continue; @@ -2116,6 +2118,7 @@ static int live_virtual_bond(void *arg) }; struct drm_i915_private *i915 = arg; struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + struct intel_gt *gt = &i915->gt; unsigned int class, inst; int err = 0; @@ -2130,11 +2133,11 @@ static int live_virtual_bond(void *arg) nsibling = 0; for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { - if (!i915->engine_class[class][inst]) + if (!gt->engine_class[class][inst]) break; GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings)); - siblings[nsibling++] = i915->engine_class[class][inst]; + siblings[nsibling++] = gt->engine_class[class][inst]; } if (nsibling < 2) continue; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 997da94821d9..5efd2bb89d5f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -5,6 +5,7 @@ */ #include "gem/i915_gem_pm.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "i915_selftest.h" #include "intel_reset.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 412892096daa..a78bd99bc6cb 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -488,7 +488,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) ring_tail, rq->fence.seqno); guc_ring_doorbell(client); - client->submissions[engine->id] += 1; + client->submissions[engine->guc_id] += 1; } /* diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a28bcd2d7c09..cb4a0ca72efe 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1352,11 +1352,10 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - enum intel_engine_id id; bool active = false; /* If the command parser is not enabled, report 0 - unsupported */ - for_each_engine(engine, dev_priv, id) { + for_each_uabi_engine(engine, dev_priv) { if (intel_engine_needs_cmd_parser(engine)) { active = true; break; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 8953336f2ae5..63b599f584db 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -415,7 +415,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; - enum intel_engine_id id; intel_wakeref_t wakeref; int i, pipe; @@ -618,7 +617,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) I915_READ(GEN11_GUNIT_CSME_INTR_MASK)); } else if (INTEL_GEN(dev_priv) >= 6) { - for_each_engine(engine, dev_priv, id) { + for_each_uabi_engine(engine, dev_priv) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", engine->name, ENGINE_READ(engine, RING_IMR)); @@ -1890,7 +1889,6 @@ static void i915_guc_client_info(struct seq_file *m, struct intel_guc_client *client) { struct intel_engine_cs *engine; - enum intel_engine_id id; u64 tot = 0; seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", @@ -1898,8 +1896,8 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", client->doorbell_id, client->doorbell_offset); - for_each_engine(engine, dev_priv, id) { - u64 submissions = client->submissions[id]; + for_each_uabi_engine(engine, dev_priv) { + u64 submissions = client->submissions[engine->guc_id]; tot += submissions; seq_printf(m, "\tSubmissions: %llu %s\n", submissions, engine->name); @@ -1939,7 +1937,6 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); const struct intel_guc *guc = &dev_priv->gt.uc.guc; struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; - intel_engine_mask_t tmp; int index; if (!USES_GUC_SUBMISSION(dev_priv)) @@ -1968,7 +1965,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data) desc->wq_addr, desc->wq_size); seq_putc(m, '\n'); - for_each_engine(engine, dev_priv, tmp) { + for_each_uabi_engine(engine, dev_priv) { u32 guc_engine_id = engine->guc_id; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; @@ -2806,7 +2803,6 @@ static int i915_engine_info(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_engine_cs *engine; intel_wakeref_t wakeref; - enum intel_engine_id id; struct drm_printer p; wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); @@ -2818,7 +2814,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz); p = drm_seq_file_printer(m); - for_each_engine(engine, dev_priv, id) + for_each_uabi_engine(engine, dev_priv) intel_engine_dump(engine, &p, "%s\n", engine->name); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); @@ -2899,9 +2895,8 @@ static int i915_wa_registers(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_engine_cs *engine; - enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { const struct i915_wa_list *wal = &engine->ctx_wa_list; const struct i915_wa *wa; unsigned int count; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ec61e8a7c818..bea440ac8b0f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -61,6 +61,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_reset.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cb64dd9b6e64..0c4db51cdf6e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1376,11 +1376,12 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; - struct intel_engine_cs *engine[I915_NUM_ENGINES]; + /* Context used internally to idle the GPU and setup initial state */ struct i915_gem_context *kernel_context; - struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] - [MAX_ENGINE_INSTANCE + 1]; + + struct intel_engine_cs *engine[I915_NUM_ENGINES]; + struct rb_root uabi_engines; struct resource mch_res; @@ -1924,6 +1925,14 @@ static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm) ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \ 0;) +#define rb_to_uabi_engine(rb) \ + rb_entry_safe(rb, struct intel_engine_cs, uabi_node) + +#define for_each_uabi_engine(engine__, i915__) \ + for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ + (engine__); \ + (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) + enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ HDMI_AUDIO_OFF, /* force turn off HDMI audio */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5ab1ddfef23c..7efff14b9137 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -46,6 +46,7 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_pm.h" #include "gem/i915_gemfs.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_mocs.h" @@ -1359,24 +1360,6 @@ err_rq: i915_gem_object_unpin_map(engine->default_state); } - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { - unsigned int found = intel_engines_has_context_isolation(i915); - - /* - * Make sure that classes with multiple engine instances all - * share the same basic configuration. - */ - for_each_engine(engine, i915, id) { - unsigned int bit = BIT(engine->uabi_class); - unsigned int expected = engine->default_state ? bit : 0; - - if ((found & bit) != expected) { - DRM_ERROR("mismatching default context state for class %d on engine %s\n", - engine->uabi_class, engine->name); - } - } - } - out_ctx: i915_gem_context_unlock_engines(ctx); i915_gem_context_set_closed(ctx); @@ -1600,7 +1583,8 @@ err_unlock: void i915_gem_driver_register(struct drm_i915_private *i915) { i915_gem_driver_register__shrinker(i915); - intel_engines_set_scheduler_caps(i915); + + intel_engines_driver_register(i915); } void i915_gem_driver_unregister(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 33ac5d7e1e9e..9094c4811f54 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3251,7 +3251,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, struct intel_engine_cs *engine; if (instance <= MAX_ENGINE_INSTANCE) - engine = gt->i915->engine_class[class][instance]; + engine = gt->engine_class[class][instance]; else engine = NULL; diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index f2cc69ccb635..2bcf0d6e2ee2 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1875,7 +1875,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915, #undef ctx_flexeuN struct intel_engine_cs *engine; struct i915_gem_context *ctx; - enum intel_engine_id id; int i; for (i = 2; i < ARRAY_SIZE(regs); i++) @@ -1915,7 +1914,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915, * If we don't modify the kernel_context, we do not get events while * idle. */ - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { struct intel_context *ce = engine->kernel_context; int err; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index c2e5f6d5c1e0..c7ee0ab180e8 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -9,6 +9,7 @@ #include "gt/intel_engine.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt_pm.h" #include "i915_drv.h" @@ -860,7 +861,6 @@ create_event_attributes(struct i915_pmu *pmu) struct i915_ext_attribute *i915_attr = NULL, *i915_iter; struct attribute **attr = NULL, **attr_iter; struct intel_engine_cs *engine; - enum intel_engine_id id; unsigned int i; /* Count how many counters we will be exposing. */ @@ -869,7 +869,7 @@ create_event_attributes(struct i915_pmu *pmu) count++; } - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { for (i = 0; i < ARRAY_SIZE(engine_events); i++) { if (!engine_event_status(engine, engine_events[i].sample)) @@ -920,7 +920,7 @@ create_event_attributes(struct i915_pmu *pmu) } /* Initialize supported engine counters. */ - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { for (i = 0; i < ARRAY_SIZE(engine_events); i++) { char *str; @@ -937,7 +937,7 @@ create_event_attributes(struct i915_pmu *pmu) i915_iter = add_i915_attr(i915_iter, str, __I915_PMU_ENGINE(engine->uabi_class, - engine->instance, + engine->uabi_instance, engine_events[i].sample)); str = kasprintf(GFP_KERNEL, "%s-%s.unit", diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 7b7016171057..ad9240a0817a 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -105,7 +105,6 @@ query_engine_info(struct drm_i915_private *i915, struct drm_i915_query_engine_info query; struct drm_i915_engine_info info = { }; struct intel_engine_cs *engine; - enum intel_engine_id id; int len, ret; if (query_item->flags) @@ -125,9 +124,9 @@ query_engine_info(struct drm_i915_private *i915, info_ptr = &query_ptr->engines[0]; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { info.engine.engine_class = engine->uabi_class; - info.engine.engine_instance = engine->instance; + info.engine.engine_instance = engine->uabi_instance; info.capabilities = engine->uabi_capabilities; if (__copy_to_user(info_ptr, &info, sizeof(info))) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index da18b8d6b80c..1d11245c4c87 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -677,7 +677,7 @@ TRACE_EVENT(i915_request_queue, __entry->dev = rq->i915->drm.primary->index; __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; - __entry->instance = rq->engine->instance; + __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->flags = flags; @@ -706,7 +706,7 @@ DECLARE_EVENT_CLASS(i915_request, __entry->dev = rq->i915->drm.primary->index; __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; - __entry->instance = rq->engine->instance; + __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; ), @@ -751,7 +751,7 @@ TRACE_EVENT(i915_request_in, __entry->dev = rq->i915->drm.primary->index; __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; - __entry->instance = rq->engine->instance; + __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->prio = rq->sched.attr.priority; @@ -782,7 +782,7 @@ TRACE_EVENT(i915_request_out, __entry->dev = rq->i915->drm.primary->index; __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; - __entry->instance = rq->engine->instance; + __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->completed = i915_request_completed(rq); @@ -847,7 +847,7 @@ TRACE_EVENT(i915_request_wait_begin, __entry->dev = rq->i915->drm.primary->index; __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; - __entry->instance = rq->engine->instance; + __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; __entry->flags = flags; -- cgit v1.2.3 From 033a856c73683924e28e329502572c413beb86c0 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Fri, 2 Aug 2019 11:38:56 -0700 Subject: drm/i915/dmc: Load DMC on TGL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add Support to load DMC v2.03 on TGL. v2: Use version 2.03 that is already available since that works with PSR2 Signed-off-by: Anusha Srivatsa Signed-off-by: Lucas De Marchi Reviewed-by: Matt Atwood Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190802183856.27280-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_csr.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 6ef74531588a..8279e72edf4c 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -39,6 +39,11 @@ #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE +#define TGL_CSR_PATH "i915/tgl_dmc_ver2_03.bin" +#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 3) +#define TGL_CSR_MAX_FW_SIZE 0x6000 +MODULE_FIRMWARE(TGL_CSR_PATH); + #define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin" #define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) #define ICL_CSR_MAX_FW_SIZE 0x6000 @@ -674,6 +679,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) intel_csr_runtime_pm_get(dev_priv); if (INTEL_GEN(dev_priv) >= 12) { + csr->fw_path = TGL_CSR_PATH; + csr->required_version = TGL_CSR_VERSION_REQUIRED; /* Allow to load fw via parameter using the last known size */ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; } else if (IS_GEN(dev_priv, 11)) { -- cgit v1.2.3 From 2cc450ceaae4febc1539975247d235eb06c11979 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 2 Aug 2019 10:31:29 -0400 Subject: drm/amd/display: Validate dc_plane_info and dc_plane_size in atomic check [Why] Pitch, DCC, rotation and mirroring can result in updates that are not UPDATE_TYPE_FAST but UPDATE_TYPE_MED instead. DC needs dc_plane_info and dc_plane_size to make this determination and we aren't currently passing this into DC during atomic check. Underflow (visible or non-visible) can occur if we don't validate this correctly. This also will generally trigger p-state warnings, typically via the cursor handler when locking. [How] Get the framebuffer tiling flags and generate the required structures for DC in dm_determine_update_type_for_commit. Signed-off-by: Nicholas Kazlauskas Reviewed-by: David Francis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 24 +++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f5f3c44865cb..0432c312a20d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7040,6 +7040,12 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, continue; for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { + const struct amdgpu_framebuffer *amdgpu_fb = + to_amdgpu_framebuffer(new_plane_state->fb); + struct dc_plane_info plane_info; + struct dc_flip_addrs flip_addr; + uint64_t tiling_flags; + new_plane_crtc = new_plane_state->crtc; old_plane_crtc = old_plane_state->crtc; new_dm_plane_state = to_dm_plane_state(new_plane_state); @@ -7083,6 +7089,24 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, updates[num_plane].scaling_info = &scaling_info; + if (amdgpu_fb) { + ret = get_fb_info(amdgpu_fb, &tiling_flags); + if (ret) + goto cleanup; + + memset(&flip_addr, 0, sizeof(flip_addr)); + + ret = fill_dc_plane_info_and_addr( + dm->adev, new_plane_state, tiling_flags, + &plane_info, + &flip_addr.address); + if (ret) + goto cleanup; + + updates[num_plane].plane_info = &plane_info; + updates[num_plane].flip_addr = &flip_addr; + } + num_plane++; } -- cgit v1.2.3 From caff0e6654377b623e37922d084b1a33aa9d44d7 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 2 Aug 2019 10:45:11 -0400 Subject: drm/amd/display: Block immediate flips for non-fast updates [Why] Underflow can occur in the case where we change buffer pitch, DCC state, rotation or mirroring for a plane while also performing an immediate flip. It can also generate a p-state warning stack trace on DCN1 which is typically observed during the cursor handler pipe locking because of how frequent cursor updates can occur. [How] Store the update type on each CRTC - every plane will have access to the CRTC state if it's flipping. If the update type is not UPDATE_TYPE_FAST then the immediate flip should be disallowed. No changes to the target vblank sequencing need to be done, we just need to ensure that the surface registers do a double buffered update. Signed-off-by: Nicholas Kazlauskas Reviewed-by: David Francis Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 16 +++++++++++++++- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 + 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0432c312a20d..0bcb7662e2d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5723,8 +5723,14 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; + /* + * Only allow immediate flips for fast updates that don't + * change FB pitch, DCC state, rotation or mirroing. + */ bundle->flip_addrs[planes_count].flip_immediate = - (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; + (crtc->state->pageflip_flags & + DRM_MODE_PAGE_FLIP_ASYNC) != 0 && + acrtc_state->update_type == UPDATE_TYPE_FAST; timestamp_ns = ktime_get_ns(); bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); @@ -7400,6 +7406,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } + /* Store the overall update type for use later in atomic check. */ + for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) { + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + dm_new_crtc_state->update_type = (int)overall_update_type; + } + /* Must be success */ WARN_ON(ret); return ret; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 35bee77def3b..c8c525a2b505 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -310,6 +310,7 @@ struct dm_crtc_state { bool cm_has_degamma; bool cm_is_degamma_srgb; + int update_type; int active_planes; bool interrupts_enabled; -- cgit v1.2.3 From 1f288afc2cb74e201bf8962d1c4d2f4dff2add51 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Fri, 2 Aug 2019 15:18:57 +0800 Subject: drm/amdgpu: pin the csb buffer on hw init for gfx v8 Without this pin, the csb buffer will be filled with inconsistent data after S3 resume. And that will causes gfx hang on gfxoff exit since this csb will be executed then. Signed-off-by: Likun Gao Tested-by: Paul Gover Reviewed-by: Feifei Xu Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 249526317f81..3f866107d383 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1321,6 +1321,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return 0; } +static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, + AMDGPU_GEM_DOMAIN_VRAM); + if (!r) + adev->gfx.rlc.clear_state_gpu_addr = + amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); + + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + + return r; +} + +static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) +{ + int r; + + if (!adev->gfx.rlc.clear_state_obj) + return; + + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); + if (likely(r == 0)) { + amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + } +} + static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -4795,6 +4828,10 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); + r = gfx_v8_0_csb_vram_pin(adev); + if (r) + return r; + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4911,6 +4948,9 @@ static int gfx_v8_0_hw_fini(void *handle) else pr_err("rlc is busy, skip halt rlc\n"); amdgpu_gfx_rlc_exit_safe_mode(adev); + + gfx_v8_0_csb_vram_unpin(adev); + return 0; } -- cgit v1.2.3 From 876923fb92a9e298625067284977917d4741ee2e Mon Sep 17 00:00:00 2001 From: xinhui pan Date: Mon, 5 Aug 2019 14:53:49 +0800 Subject: drm/amdgpu: Fix panic during gpu reset Clear the flag after hw suspend, otherwise it skips the corresponding hw resume. Signed-off-by: xinhui pan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f9fabbc4181f..192f753407a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2257,6 +2257,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].version->funcs->name, r); } + adev->ip_blocks[i].status.hw = false; /* handle putting the SMC in the appropriate state */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { if (is_support_sw_smu(adev)) { @@ -2271,7 +2272,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) adev->mp1_state, r); return r; } - adev->ip_blocks[i].status.hw = false; } } } -- cgit v1.2.3 From 9234c587a81f360b80aeb7bed643d1405627d6c9 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 5 Aug 2019 14:53:12 +0800 Subject: drm/amd/powerplay: skip pcie params override on Arcturus V2 This is not supported on Arcturus. Affected ASIC: Arcturus V2: minor cosmetic fix Signed-off-by: Evan Quan Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 3b44bbc6f1e7..dd274922ed8a 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -1109,11 +1109,11 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - ret = smu_override_pcie_parameters(smu); - if (ret) - return ret; - if (adev->asic_type != CHIP_ARCTURUS) { + ret = smu_override_pcie_parameters(smu); + if (ret) + return ret; + ret = smu_notify_display_change(smu); if (ret) return ret; -- cgit v1.2.3 From 2a3c7ff6e37c0d0bf8d8e9031922d41c2409d7e5 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 5 Aug 2019 15:48:30 +0800 Subject: drm/amdgpu: update ras sysfs feature info remove confused ras error type info Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 709d22912381..523f43732dee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -789,25 +789,18 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; struct ras_common_if head; int ras_block_count = AMDGPU_RAS_BLOCK_COUNT; - int i; + int i, enabled; ssize_t s; - struct ras_manager *obj; s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features); for (i = 0; i < ras_block_count; i++) { head.block = i; + enabled = amdgpu_ras_is_feature_enabled(adev, &head); - if (amdgpu_ras_is_feature_enabled(adev, &head)) { - obj = amdgpu_ras_find_obj(adev, &head); - s += scnprintf(&buf[s], PAGE_SIZE - s, - "%s: %s\n", - ras_block_str(i), - ras_err_str(obj->head.type)); - } else - s += scnprintf(&buf[s], PAGE_SIZE - s, - "%s: disabled\n", - ras_block_str(i)); + s += scnprintf(&buf[s], PAGE_SIZE - s, + "%s ras feature mask: %s\n", + ras_block_str(i), enabled?"on":"off"); } return s; -- cgit v1.2.3 From 35ef88fa115d0810d28d960b951e7260ed46ad59 Mon Sep 17 00:00:00 2001 From: tiancyin Date: Mon, 5 Aug 2019 17:32:45 +0800 Subject: drm/amdgpu/soc15: fix external_rev_id for navi14 fix the hard code external_rev_id. Reviewed-by: Hawking Zhang Signed-off-by: tiancyin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index f25097969da7..2f09d870f644 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -613,7 +613,7 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_BIF_LS; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; - adev->external_rev_id = adev->rev_id + 0x1; /* ??? */ + adev->external_rev_id = adev->rev_id + 20; break; case CHIP_NAVI12: adev->cg_flags = 0; -- cgit v1.2.3 From b5c73856408b8431eeb29bc1f7d41dbe7549b75b Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Mon, 5 Aug 2019 16:19:45 +0800 Subject: drm/amdgpu/discovery: move common discovery code out of navi1*_reg_base_init() move amdgpu_discovery_reg_base_init() from navi1*_reg_base_init() to a common function nv_reg_base_init(). Signed-off-by: Xiaojie Yuan Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c | 14 +------------- drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c | 14 +------------- drivers/gpu/drm/amd/amdgpu/nv.c | 29 ++++++++++++++++++++++++++-- 3 files changed, 29 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c index 55014ce8670a..a56c93620e78 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c @@ -29,20 +29,8 @@ int navi10_reg_base_init(struct amdgpu_device *adev) { - int r, i; + int i; - if (amdgpu_discovery) { - r = amdgpu_discovery_reg_base_init(adev); - if (r) { - DRM_WARN("failed to init reg base from ip discovery table, " - "fallback to legacy init method\n"); - goto legacy_init; - } - - return 0; - } - -legacy_init: for (i = 0 ; i < MAX_INSTANCE ; ++i) { adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c index 864668a7f1d2..3b5f0f65e096 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c @@ -29,20 +29,8 @@ int navi14_reg_base_init(struct amdgpu_device *adev) { - int r, i; + int i; - if (amdgpu_discovery) { - r = amdgpu_discovery_reg_base_init(adev); - if (r) { - DRM_WARN("failed to init reg base from ip discovery table, " - "fallback to legacy init method\n"); - goto legacy_init; - } - - return 0; - } - -legacy_init: for (i = 0 ; i < MAX_INSTANCE ; ++i) { adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 2f09d870f644..3e67536f0dc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -376,9 +376,22 @@ static const struct amdgpu_ip_block_version nv_common_ip_block = .funcs = &nv_common_ip_funcs, }; -int nv_set_ip_blocks(struct amdgpu_device *adev) +static int nv_reg_base_init(struct amdgpu_device *adev) { - /* Set IP register base before any HW register access */ + int r; + + if (amdgpu_discovery) { + r = amdgpu_discovery_reg_base_init(adev); + if (r) { + DRM_WARN("failed to init reg base from ip discovery table, " + "fallback to legacy init method\n"); + goto legacy_init; + } + + return 0; + } + +legacy_init: switch (adev->asic_type) { case CHIP_NAVI10: navi10_reg_base_init(adev); @@ -393,6 +406,18 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) return -EINVAL; } + return 0; +} + +int nv_set_ip_blocks(struct amdgpu_device *adev) +{ + int r; + + /* Set IP register base before any HW register access */ + r = nv_reg_base_init(adev); + if (r) + return r; + adev->nbio_funcs = &nbio_v2_3_funcs; adev->nbio_funcs->detect_hw_virt(adev); -- cgit v1.2.3 From 8dd45504a7d70a23be6e4415edbac0e197935686 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 6 Aug 2019 16:14:22 +0800 Subject: drm/amd/powerplay: check before issuing messages for max sustainable clocks Those messages are not supported on Arcturus and should not be issued. Affected ASIC: Arcturus Signed-off-by: Evan Quan Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index ee739c080943..8c4d3ee0295a 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -908,6 +908,10 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, if (!smu->pm_enabled) return ret; + if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) || + (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0)) + return 0; + clk_id = smu_clk_get_index(smu, clock_select); if (clk_id < 0) return -EINVAL; -- cgit v1.2.3 From b5507c7e06076d874923aa958ddb8829e3cfc573 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 2 Aug 2019 16:48:08 -0400 Subject: drm/amdgpu: Fix GPU reset crash regression. amdgpu_ip_block.status.hw for GMC wasn't set to false on suspend during GPU reset and so on resume gmc_v9_0_resume wasn't called. Caused by 'drm/amdgpu: fix double ucode load by PSP(v3)' Signed-off-by: Andrey Grodzovsky Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 192f753407a9..e7537e1c4fdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2274,6 +2274,8 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) } } } + + adev->ip_blocks[i].status.hw = false; } return 0; -- cgit v1.2.3 From b56a53db4ce10320a27224d4b2f83f9aaec27c89 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Sun, 4 Aug 2019 13:37:13 -0700 Subject: drm/amd/powerplay: Zero initialize some variables Clang warns (only Navi warning shown but Arcturus warns as well): drivers/gpu/drm/amd/amdgpu/../powerplay/navi10_ppt.c:1534:4: warning: variable 'asic_default_power_limit' is used uninitialized whenever '?:' condition is false [-Wsometimes-uninitialized] smu_read_smc_arg(smu, &asic_default_power_limit); ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../powerplay/inc/amdgpu_smu.h:588:3: note: expanded from macro 'smu_read_smc_arg' ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0) ^~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../powerplay/navi10_ppt.c:1550:30: note: uninitialized use occurs here smu->default_power_limit = asic_default_power_limit; ^~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../powerplay/navi10_ppt.c:1534:4: note: remove the '?:' if its condition is always true smu_read_smc_arg(smu, &asic_default_power_limit); ^ drivers/gpu/drm/amd/amdgpu/../powerplay/inc/amdgpu_smu.h:588:3: note: expanded from macro 'smu_read_smc_arg' ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0) ^ drivers/gpu/drm/amd/amdgpu/../powerplay/navi10_ppt.c:1517:35: note: initialize the variable 'asic_default_power_limit' to silence this warning uint32_t asic_default_power_limit; ^ = 0 1 warning generated. As the code is currently written, if read_smc_arg were ever NULL, arg would fail to be initialized but the code would continue executing as normal because the return value would just be zero. There are a few different possible solutions to resolve this class of warnings which have appeared in these drivers before: 1. Assume the function pointer will never be NULL and eliminate the wrapper macros. 2. Have the wrapper macros initialize arg when the function pointer is NULL. 3. Have the wrapper macros return an error code instead of 0 when the function pointer is NULL so that the callsites can properly bail out before arg can be used. 4. Initialize arg at the top of its function. Number four is the path of least resistance right now as every other change will be driver wide so do that here. I only make the comment now as food for thought. Fixes: b4af964e75c4 ("drm/amd/powerplay: make power limit retrieval as asic specific") Link: https://github.com/ClangBuiltLinux/linux/issues/627 Reviewed-by: Evan Quan Signed-off-by: Nathan Chancellor Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 2 +- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 896fff2a446d..cff3777ae5aa 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -1326,7 +1326,7 @@ static int arcturus_get_power_limit(struct smu_context *smu, bool asic_default) { PPTable_t *pptable = smu->smu_table.driver_pptable; - uint32_t asic_default_power_limit; + uint32_t asic_default_power_limit = 0; int ret = 0; int power_src; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index d62c2784b102..b3e66fead779 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1541,7 +1541,7 @@ static int navi10_get_power_limit(struct smu_context *smu, bool asic_default) { PPTable_t *pptable = smu->smu_table.driver_pptable; - uint32_t asic_default_power_limit; + uint32_t asic_default_power_limit = 0; int ret = 0; int power_src; -- cgit v1.2.3 From 2a1e00c3c0d37f65241236d7731ef6bb92f0d07f Mon Sep 17 00:00:00 2001 From: Hariprasad Kelam Date: Mon, 5 Aug 2019 22:51:38 +0530 Subject: gpu: drm: amd: powerplay: Remove logically dead code Result of pointer airthmentic is never null fix coverity defect:1451876 Signed-off-by: Hariprasad Kelam Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 8c4d3ee0295a..54618d7d6927 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -736,8 +736,6 @@ static int smu_v11_0_write_watermarks_table(struct smu_context *smu) struct smu_table *table = NULL; table = &smu_table->tables[SMU_TABLE_WATERMARKS]; - if (!table) - return -EINVAL; if (!table->cpu_addr) return -EINVAL; -- cgit v1.2.3 From 72cda9bb5e219aea0f2f62f56ae05198c59022a7 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Fri, 2 Aug 2019 15:18:57 +0800 Subject: drm/amdgpu: pin the csb buffer on hw init for gfx v8 Without this pin, the csb buffer will be filled with inconsistent data after S3 resume. And that will causes gfx hang on gfxoff exit since this csb will be executed then. Signed-off-by: Likun Gao Tested-by: Paul Gover Reviewed-by: Feifei Xu Reviewed-by: Xiaojie Yuan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 751567f78567..ee1ccdcf2d30 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1321,6 +1321,39 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) return 0; } +static int gfx_v8_0_csb_vram_pin(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (unlikely(r != 0)) + return r; + + r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, + AMDGPU_GEM_DOMAIN_VRAM); + if (!r) + adev->gfx.rlc.clear_state_gpu_addr = + amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); + + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + + return r; +} + +static void gfx_v8_0_csb_vram_unpin(struct amdgpu_device *adev) +{ + int r; + + if (!adev->gfx.rlc.clear_state_obj) + return; + + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); + if (likely(r == 0)) { + amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + } +} + static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); @@ -4785,6 +4818,10 @@ static int gfx_v8_0_hw_init(void *handle) gfx_v8_0_init_golden_registers(adev); gfx_v8_0_constants_init(adev); + r = gfx_v8_0_csb_vram_pin(adev); + if (r) + return r; + r = adev->gfx.rlc.funcs->resume(adev); if (r) return r; @@ -4901,6 +4938,9 @@ static int gfx_v8_0_hw_fini(void *handle) else pr_err("rlc is busy, skip halt rlc\n"); amdgpu_gfx_rlc_exit_safe_mode(adev); + + gfx_v8_0_csb_vram_unpin(adev); + return 0; } -- cgit v1.2.3 From d9dfe768b3f30faa8340cbf34196668714780c3c Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Fri, 2 Aug 2019 17:44:06 -0400 Subject: Revert "drm/amdgpu: fix transform feedback GDS hang on gfx10 (v2)" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 9ed2c993d723129f85101e51b2ccc36ef5400a67. SET_CONFIG_REG writes to memory if register shadowing is enabled, causing a VM fault. NGG streamout is unstable anyway, so all UMDs should use legacy streamout. I think Mesa is the only driver using NGG streamout. Signed-off-by: Marek Olšák Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h | 1 - drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 12 +----------- 2 files changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h index df8a23554831..f6ac1e9548f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h @@ -32,7 +32,6 @@ struct amdgpu_gds { uint32_t gws_size; uint32_t oa_size; uint32_t gds_compute_max_wave_id; - uint32_t vgt_gs_max_wave_id; }; struct amdgpu_gds_reg_offset { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 32773b7523d2..f41287f9000d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4206,15 +4206,6 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); u32 header, control = 0; - /* Prevent a hw deadlock due to a wave ID mismatch between ME and GDS. - * This resets the wave ID counters. (needed by transform feedback) - * TODO: This might only be needed on a VMID switch when we change - * the GDS OA mapping, not sure. - */ - amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - amdgpu_ring_write(ring, mmVGT_GS_MAX_WAVE_ID); - amdgpu_ring_write(ring, ring->adev->gds.vgt_gs_max_wave_id); - if (ib->flags & AMDGPU_IB_FLAG_CE) header = PACKET3(PACKET3_INDIRECT_BUFFER_CNST, 2); else @@ -4961,7 +4952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { 5 + /* HDP_INVL */ 8 + 8 + /* FENCE x2 */ 2, /* SWITCH_BUFFER */ - .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_gfx */ + .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ .emit_ib = gfx_v10_0_ring_emit_ib_gfx, .emit_fence = gfx_v10_0_ring_emit_fence, .emit_pipeline_sync = gfx_v10_0_ring_emit_pipeline_sync, @@ -5112,7 +5103,6 @@ static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) default: adev->gds.gds_size = 0x10000; adev->gds.gds_compute_max_wave_id = 0x4ff; - adev->gds.vgt_gs_max_wave_id = 0x3ff; break; } -- cgit v1.2.3 From 3e1876251bc34ba0dc94d7a5f8bf9ef312e2f278 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 13:07:22 +0300 Subject: drm/i915: move intel_display.c function declarations Move the declarations of functions in intel_display.c to intel_display.h. There is still plenty of cleanup to do in intel_display.[ch], but one step at a time. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/ebe19d3c508faa82e651ab3c8eb7a0975bfa52b7.1565085691.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.h | 171 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 15 --- drivers/gpu/drm/i915/intel_drv.h | 134 --------------------- 3 files changed, 171 insertions(+), 149 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index d2c718f25478..bf0d7225e649 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -28,8 +28,30 @@ #include #include +enum link_m_n_set; +struct dpll; +struct drm_connector; +struct drm_device; +struct drm_encoder; +struct drm_file; +struct drm_framebuffer; +struct drm_i915_error_state_buf; +struct drm_i915_gem_object; struct drm_i915_private; +struct drm_modeset_acquire_ctx; +struct drm_plane; +struct drm_plane_state; +struct i915_ggtt_view; +struct intel_crtc; +struct intel_crtc_state; +struct intel_digital_port; +struct intel_dp; +struct intel_encoder; +struct intel_load_detect_pipe; +struct intel_plane; struct intel_plane_state; +struct intel_remapped_info; +struct intel_rotation_info; enum i915_gpio { GPIOA, @@ -400,4 +422,153 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, bool intel_plane_can_remap(const struct intel_plane_state *plane_state); enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); +void intel_plane_destroy(struct drm_plane *plane); +void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); +void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); +enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); +int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); +int vlv_get_cck_clock(struct drm_i915_private *dev_priv, + const char *name, u32 reg, int ref_freq); +int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, + const char *name, u32 reg); +void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv); +void lpt_disable_iclkip(struct drm_i915_private *dev_priv); +void intel_init_display_hooks(struct drm_i915_private *dev_priv); +unsigned int intel_fb_xy_to_linear(int x, int y, + const struct intel_plane_state *state, + int plane); +unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, + int color_plane, unsigned int height); +void intel_add_fb_offsets(int *x, int *y, + const struct intel_plane_state *state, int plane); +unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); +unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info); +bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv); +int intel_display_suspend(struct drm_device *dev); +void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); +void intel_encoder_destroy(struct drm_encoder *encoder); +struct drm_display_mode * +intel_encoder_current_mode(struct intel_encoder *encoder); +bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy); +bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy); +enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, + enum port port); +int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, + enum pipe pipe); +u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); + +int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); +void vlv_wait_port_ready(struct drm_i915_private *dev_priv, + struct intel_digital_port *dport, + unsigned int expected_mask); +int intel_get_load_detect_pipe(struct drm_connector *connector, + const struct drm_display_mode *mode, + struct intel_load_detect_pipe *old, + struct drm_modeset_acquire_ctx *ctx); +void intel_release_load_detect_pipe(struct drm_connector *connector, + struct intel_load_detect_pipe *old, + struct drm_modeset_acquire_ctx *ctx); +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, + const struct i915_ggtt_view *view, + bool uses_fence, + unsigned long *out_flags); +void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags); +struct drm_framebuffer * +intel_framebuffer_create(struct drm_i915_gem_object *obj, + struct drm_mode_fb_cmd2 *mode_cmd); +int intel_prepare_plane_fb(struct drm_plane *plane, + struct drm_plane_state *new_state); +void intel_cleanup_plane_fb(struct drm_plane *plane, + struct drm_plane_state *old_state); + +void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe); + +int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, + const struct dpll *dpll); +void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); +int lpt_get_iclkip(struct drm_i915_private *dev_priv); +bool intel_fuzzy_clock_check(int clock1, int clock2); + +void intel_prepare_reset(struct drm_i915_private *dev_priv); +void intel_finish_reset(struct drm_i915_private *dev_priv); +void intel_dp_get_m_n(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); +void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, + enum link_m_n_set m_n); +void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); +bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, + struct dpll *best_clock); +int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); + +bool intel_crtc_active(struct intel_crtc *crtc); +bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); +void hsw_enable_ips(const struct intel_crtc_state *crtc_state); +void hsw_disable_ips(const struct intel_crtc_state *crtc_state); +enum intel_display_power_domain intel_port_to_power_domain(enum port port); +enum intel_display_power_domain +intel_aux_power_domain(struct intel_digital_port *dig_port); +void intel_mode_from_pipe_config(struct drm_display_mode *mode, + struct intel_crtc_state *pipe_config); +void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state); + +u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); +int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); +int skl_max_scale(const struct intel_crtc_state *crtc_state, + u32 pixel_format); +u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); +u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state); +u32 skl_plane_stride(const struct intel_plane_state *plane_state, + int plane); +int skl_check_plane_surface(struct intel_plane_state *plane_state); +int i9xx_check_plane_surface(struct intel_plane_state *plane_state); +int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); +unsigned int i9xx_plane_max_stride(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation); +int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); + +struct intel_display_error_state * +intel_display_capture_error_state(struct drm_i915_private *dev_priv); +void intel_display_print_error_state(struct drm_i915_error_state_buf *e, + struct intel_display_error_state *error); + +/* modesetting */ +void intel_modeset_init_hw(struct drm_device *dev); +int intel_modeset_init(struct drm_device *dev); +void intel_modeset_driver_remove(struct drm_device *dev); +int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); +void intel_display_resume(struct drm_device *dev); +void i915_redisable_vga(struct drm_i915_private *dev_priv); +void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); +void intel_init_pch_refclk(struct drm_i915_private *dev_priv); + +/* modesetting asserts */ +void assert_panel_unlocked(struct drm_i915_private *dev_priv, + enum pipe pipe); +void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state); +#define assert_pll_enabled(d, p) assert_pll(d, p, true) +#define assert_pll_disabled(d, p) assert_pll(d, p, false) +void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state); +#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) +#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) +void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state); +#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) +#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) +void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); +#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) +#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) + #endif diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0c4db51cdf6e..41fd3c659539 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2652,24 +2652,9 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) return (struct intel_device_info *)INTEL_INFO(dev_priv); } -/* modesetting */ -void intel_modeset_init_hw(struct drm_device *dev); -int intel_modeset_init(struct drm_device *dev); -void intel_modeset_driver_remove(struct drm_device *dev); -int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); -void intel_display_resume(struct drm_device *dev); -void i915_redisable_vga(struct drm_i915_private *dev_priv); -void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); -void intel_init_pch_refclk(struct drm_i915_private *dev_priv); - int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -struct intel_display_error_state * -intel_display_capture_error_state(struct drm_i915_private *dev_priv); -void intel_display_print_error_state(struct drm_i915_error_state_buf *e, - struct intel_display_error_state *error); - #define __I915_REG_OP(op__, dev_priv__, ...) \ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c4016164c34e..d6f851d581a3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1473,41 +1473,6 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state, } /* intel_display.c */ -void intel_plane_destroy(struct drm_plane *plane); -void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); -void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); -enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); -int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); -int vlv_get_cck_clock(struct drm_i915_private *dev_priv, - const char *name, u32 reg, int ref_freq); -int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, - const char *name, u32 reg); -void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv); -void lpt_disable_iclkip(struct drm_i915_private *dev_priv); -void intel_init_display_hooks(struct drm_i915_private *dev_priv); -unsigned int intel_fb_xy_to_linear(int x, int y, - const struct intel_plane_state *state, - int plane); -unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, - int color_plane, unsigned int height); -void intel_add_fb_offsets(int *x, int *y, - const struct intel_plane_state *state, int plane); -unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); -unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info); -bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv); -int intel_display_suspend(struct drm_device *dev); -void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); -void intel_encoder_destroy(struct drm_encoder *encoder); -struct drm_display_mode * -intel_encoder_current_mode(struct intel_encoder *encoder); -bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy); -bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy); -enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, - enum port port); -int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); -enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe); static inline bool intel_crtc_has_type(const struct intel_crtc_state *crtc_state, enum intel_output_type type) @@ -1536,108 +1501,9 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) intel_wait_for_vblank(dev_priv, pipe); } -u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); - -int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); -void vlv_wait_port_ready(struct drm_i915_private *dev_priv, - struct intel_digital_port *dport, - unsigned int expected_mask); -int intel_get_load_detect_pipe(struct drm_connector *connector, - const struct drm_display_mode *mode, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx); -void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx); -struct i915_vma * -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - const struct i915_ggtt_view *view, - bool uses_fence, - unsigned long *out_flags); -void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags); -struct drm_framebuffer * -intel_framebuffer_create(struct drm_i915_gem_object *obj, - struct drm_mode_fb_cmd2 *mode_cmd); -int intel_prepare_plane_fb(struct drm_plane *plane, - struct drm_plane_state *new_state); -void intel_cleanup_plane_fb(struct drm_plane *plane, - struct drm_plane_state *old_state); - -void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe); - -int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, - const struct dpll *dpll); -void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe); -int lpt_get_iclkip(struct drm_i915_private *dev_priv); -bool intel_fuzzy_clock_check(int clock1, int clock2); - -/* modesetting asserts */ -void assert_panel_unlocked(struct drm_i915_private *dev_priv, - enum pipe pipe); -void assert_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state); -#define assert_pll_enabled(d, p) assert_pll(d, p, true) -#define assert_pll_disabled(d, p) assert_pll(d, p, false) -void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state); -#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) -#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) -void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state); -#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) -#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) -void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); -#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) -#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) -void intel_prepare_reset(struct drm_i915_private *dev_priv); -void intel_finish_reset(struct drm_i915_private *dev_priv); -void intel_dp_get_m_n(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, - enum link_m_n_set m_n); -void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); -int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); -bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, - struct dpll *best_clock); -int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); - -bool intel_crtc_active(struct intel_crtc *crtc); -bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); -void hsw_enable_ips(const struct intel_crtc_state *crtc_state); -void hsw_disable_ips(const struct intel_crtc_state *crtc_state); -enum intel_display_power_domain intel_port_to_power_domain(enum port port); -enum intel_display_power_domain -intel_aux_power_domain(struct intel_digital_port *dig_port); -void intel_mode_from_pipe_config(struct drm_display_mode *mode, - struct intel_crtc_state *pipe_config); -void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state); - -u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); -int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); -int skl_max_scale(const struct intel_crtc_state *crtc_state, - u32 pixel_format); - static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) { return i915_ggtt_offset(state->vma); } -u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); -u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state); -u32 skl_plane_stride(const struct intel_plane_state *plane_state, - int plane); -int skl_check_plane_surface(struct intel_plane_state *plane_state); -int i9xx_check_plane_surface(struct intel_plane_state *plane_state); -int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); -unsigned int i9xx_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation); -int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); - #endif /* __INTEL_DRV_H__ */ -- cgit v1.2.3 From 5b51f28fa7ba67aec4515a19f3ea31f14bf18599 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 13:07:23 +0300 Subject: drm/i915/sprite: un-inline icl_is_hdr_plane() Avoid including the i915_drv.h mega header from other header files to make further header cleanup easier. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/5f998c72018c0f1cc5cdb239327a1281d21f4c0e.1565085691.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 6 ++++++ drivers/gpu/drm/i915/display/intel_sprite.h | 8 +------- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 53c6594c4588..0dd1d61e5e7d 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -330,6 +330,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) return 0; } +bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) +{ + return INTEL_GEN(dev_priv) >= 11 && + icl_hdr_plane_mask() & BIT(plane_id); +} + static unsigned int skl_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 500f6bffb139..093a2d156f1e 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -8,7 +8,6 @@ #include -#include "i915_drv.h" #include "intel_display.h" struct drm_device; @@ -49,11 +48,6 @@ static inline u8 icl_hdr_plane_mask(void) BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1); } -static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - return INTEL_GEN(dev_priv) >= 11 && - icl_hdr_plane_mask() & BIT(plane_id); -} +bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id); #endif /* __INTEL_SPRITE_H__ */ -- cgit v1.2.3 From d64575ee8d91b067a2ea77b3fa31d973f7931591 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 13:07:24 +0300 Subject: drm/i915/irq: un-inline functions to avoid i915_drv.h include Avoid including the i915_drv.h mega header from other header files to make further header cleanup easier. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/45431103f6cbd4322808907154336fdc176ff20d.1565085691.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 19 +++++++++++++++++++ drivers/gpu/drm/i915/i915_irq.h | 30 ++++++++++-------------------- 2 files changed, 29 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9094c4811f54..019ae347f45c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -595,6 +595,11 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } +u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask) +{ + return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz; +} + void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; @@ -5196,3 +5201,17 @@ void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) intel_irq_reset(dev_priv); intel_irq_postinstall(dev_priv); } + +bool intel_irqs_enabled(struct drm_i915_private *dev_priv) +{ + /* + * We only use drm_irq_uninstall() at unload and VT switch, so + * this is the only thing we need to check. + */ + return dev_priv->runtime_pm.irqs_enabled; +} + +void intel_synchronize_irq(struct drm_i915_private *i915) +{ + synchronize_irq(i915->drm.pdev->irq); +} diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 8918809cd805..43720c2d81b9 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -6,12 +6,18 @@ #ifndef __I915_IRQ_H__ #define __I915_IRQ_H__ +#include #include -#include "i915_drv.h" +#include "display/intel_display.h" +struct drm_crtc; +struct drm_device; +struct drm_display_mode; struct drm_i915_private; struct intel_crtc; +struct intel_crtc; +struct intel_gt; struct intel_guc; void intel_irq_init(struct drm_i915_private *dev_priv); @@ -85,28 +91,12 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); - -static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, - u32 mask) -{ - return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz; -} +u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); -static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) -{ - /* - * We only use drm_irq_uninstall() at unload and VT switch, so - * this is the only thing we need to check. - */ - return dev_priv->runtime_pm.irqs_enabled; -} - -static inline void intel_synchronize_irq(struct drm_i915_private *i915) -{ - synchronize_irq(i915->drm.pdev->irq); -} +bool intel_irqs_enabled(struct drm_i915_private *dev_priv); +void intel_synchronize_irq(struct drm_i915_private *i915); int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 366b6200f76e0f1241be262ea044eee44ded8fc6 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 13:07:25 +0300 Subject: drm/i915/bw: make intel_atomic_get_bw_state() static No need for this function to be accessible outside of intel_bw.c. Avoid including the i915_drv.h mega header from other header files to make further header cleanup easier. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/6c898ec6511af47c1c5b679e516dc757cd207146.1565085691.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_bw.c | 14 ++++++++++++++ drivers/gpu/drm/i915/display/intel_bw.h | 15 --------------- 2 files changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index ee52c5b4643b..e59f8be7c998 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -322,6 +322,20 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, return data_rate; } +static struct intel_bw_state * +intel_atomic_get_bw_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct drm_private_state *bw_state; + + bw_state = drm_atomic_get_private_obj_state(&state->base, + &dev_priv->bw_obj); + if (IS_ERR(bw_state)) + return ERR_CAST(bw_state); + + return to_intel_bw_state(bw_state); +} + int intel_bw_atomic_check(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index e9d9c6d63bc3..9db10af012f4 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -8,7 +8,6 @@ #include -#include "i915_drv.h" #include "intel_display.h" struct drm_i915_private; @@ -24,20 +23,6 @@ struct intel_bw_state { #define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base) -static inline struct intel_bw_state * -intel_atomic_get_bw_state(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct drm_private_state *bw_state; - - bw_state = drm_atomic_get_private_obj_state(&state->base, - &dev_priv->bw_obj); - if (IS_ERR(bw_state)) - return ERR_CAST(bw_state); - - return to_intel_bw_state(bw_state); -} - void intel_bw_init_hw(struct drm_i915_private *dev_priv); int intel_bw_init(struct drm_i915_private *dev_priv); int intel_bw_atomic_check(struct intel_atomic_state *state); -- cgit v1.2.3 From e15fd1bee2a998bffce776dd312108a2676bee57 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 13:07:26 +0300 Subject: drm/i915/mst: un-inline intel_dp_mst_encoder_active_links() Avoid including the intel_drv.h mega header from other header files to make further header cleanup easier. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/ae8f7a9514a9e952e1e0dba411aeb42293d1e183.1565085691.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp_mst.c | 6 ++++++ drivers/gpu/drm/i915/display/intel_dp_mst.h | 9 ++------- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 60652ebbdf61..4071c9da1065 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -631,6 +631,12 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port) return true; } +int +intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) +{ + return intel_dig_port->dp.active_mst_links; +} + int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index 6754c211205a..f660ad80db04 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -6,15 +6,10 @@ #ifndef __INTEL_DP_MST_H__ #define __INTEL_DP_MST_H__ -#include "intel_drv.h" +struct intel_digital_port; int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); -static inline int -intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) -{ - return intel_dig_port->dp.active_mst_links; -} - +int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port); #endif /* __INTEL_DP_MST_H__ */ -- cgit v1.2.3 From d0d392a8deed8442191bdf5b2d6bb17e04c928b6 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 14:37:00 +0300 Subject: drm/i915/tc: un-inline intel_tc_port_ref_held() Avoid including the intel_drv.h mega header from other header files to make further header cleanup easier. v2: restore the over-eagerly dropped (Imre) Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190806113700.18816-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 7 +++++++ drivers/gpu/drm/i915/display/intel_tc.h | 9 ++------- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index c96a81c2416c..a4d3d2d541c4 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -7,6 +7,7 @@ #include "intel_display.h" #include "intel_dp_mst.h" #include "intel_tc.h" +#include "intel_drv.h" static const char *tc_port_mode_name(enum tc_port_mode mode) { @@ -503,6 +504,12 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port) wakeref); } +bool intel_tc_port_ref_held(struct intel_digital_port *dig_port) +{ + return mutex_is_locked(&dig_port->tc_lock) || + dig_port->tc_link_refcount; +} + void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes) { diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 22fe922ac9cf..783d75531435 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -9,7 +9,7 @@ #include #include -#include "intel_drv.h" +struct intel_digital_port; bool intel_tc_port_connected(struct intel_digital_port *dig_port); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); @@ -23,12 +23,7 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port); void intel_tc_port_get_link(struct intel_digital_port *dig_port, int required_lanes); void intel_tc_port_put_link(struct intel_digital_port *dig_port); - -static inline int intel_tc_port_ref_held(struct intel_digital_port *dig_port) -{ - return mutex_is_locked(&dig_port->tc_lock) || - dig_port->tc_link_refcount; -} +bool intel_tc_port_ref_held(struct intel_digital_port *dig_port); void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy); -- cgit v1.2.3 From a09d9a8002368e1b8754c8621ad06d3d7bade6f4 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 13:07:28 +0300 Subject: drm/i915: avoid including intel_drv.h via i915_drv.h->i915_trace.h Disentangle i915_drv.h from intel_drv.h, which gets included via i915_trace.h. This necessitates including i915_trace.h wherever it's needed. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/ed82bf259d3b725a1a1a3c3e9d6fb5c08bc4d489.1565085691.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 1 + drivers/gpu/drm/i915/display/intel_dp.c | 1 + drivers/gpu/drm/i915/display/intel_fifo_underrun.c | 1 + drivers/gpu/drm/i915/display/intel_sprite.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_mman.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 1 + drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 1 + drivers/gpu/drm/i915/gt/intel_lrc.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 1 + drivers/gpu/drm/i915/i915_debugfs.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 3 +-- drivers/gpu/drm/i915/i915_gem.h | 2 ++ drivers/gpu/drm/i915/i915_request.c | 1 + drivers/gpu/drm/i915/i915_vma.c | 1 + drivers/gpu/drm/i915/intel_pm.c | 1 + drivers/gpu/drm/i915/intel_runtime_pm.c | 1 + drivers/gpu/drm/i915/intel_uncore.c | 1 + 19 files changed, 20 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index ab411d5e093c..c14c8dade9a4 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -35,6 +35,7 @@ #include #include +#include "i915_trace.h" #include "intel_atomic_plane.h" #include "intel_drv.h" #include "intel_pm.h" diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0eb5d66f87a7..956e886f9f1e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -44,6 +44,7 @@ #include "i915_debugfs.h" #include "i915_drv.h" +#include "i915_trace.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_connector.h" diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index 8545ad32bb50..b48c15d8f79f 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -26,6 +26,7 @@ */ #include "i915_drv.h" +#include "i915_trace.h" #include "intel_drv.h" #include "intel_fbc.h" #include "intel_fifo_underrun.h" diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 0dd1d61e5e7d..a8493c8d5e1d 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -40,6 +40,7 @@ #include #include "i915_drv.h" +#include "i915_trace.h" #include "intel_atomic_plane.h" #include "intel_drv.h" #include "intel_frontbuffer.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index 5295285d5843..c31684682eaa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "i915_gem_clflush.h" +#include "i915_trace.h" static DEFINE_SPINLOCK(clflush_lock); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index a564c1e4231b..9a733c6853c4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -13,6 +13,7 @@ #include "i915_gem_gtt.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" +#include "i915_trace.h" #include "i915_vma.h" #include "intel_drv.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 09f1843f9274..3929c3a6b281 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -29,6 +29,7 @@ #include "i915_gem_context.h" #include "i915_gem_object.h" #include "i915_globals.h" +#include "i915_trace.h" static struct i915_global_object { struct i915_global base; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index d2a1158868e7..4c4954e8ce0a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -10,6 +10,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" +#include "i915_trace.h" /* * Move pages to appropriate lru and release the pagevec, decrementing the diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index c092bdf5f0bf..e1bbc9b428cd 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -27,6 +27,7 @@ #include #include "i915_drv.h" +#include "i915_trace.h" static void irq_enable(struct intel_engine_cs *engine) { diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 232f40fcb490..2b97641feac3 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -136,6 +136,7 @@ #include "gem/i915_gem_context.h" #include "i915_drv.h" +#include "i915_trace.h" #include "i915_vgpu.h" #include "intel_engine_pm.h" #include "intel_gt.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index a78bd99bc6cb..28f5e5379b2c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -33,6 +33,7 @@ #include "intel_guc_submission.h" #include "i915_drv.h" +#include "i915_trace.h" enum { GUC_PREEMPT_NONE = 0, diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 63b599f584db..b197aae101e4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -44,6 +44,7 @@ #include "i915_debugfs.h" #include "i915_irq.h" +#include "i915_trace.h" #include "intel_csr.h" #include "intel_drv.h" #include "intel_pm.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 41fd3c659539..cfb0aa1e4885 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -91,6 +91,7 @@ #include "i915_scheduler.h" #include "gt/intel_timeline.h" #include "i915_vma.h" +#include "i915_irq.h" #include "intel_gvt.h" @@ -2352,8 +2353,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0) -#include "i915_trace.h" - static inline bool intel_vtd_active(void) { #ifdef CONFIG_INTEL_IOMMU diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index fe82d3571072..167a7b56ed5b 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -28,6 +28,8 @@ #include #include +#include + struct drm_i915_private; #ifdef CONFIG_DRM_I915_DEBUG_GEM diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 81094f250bdb..9b2b18f0196b 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -35,6 +35,7 @@ #include "i915_active.h" #include "i915_drv.h" #include "i915_globals.h" +#include "i915_trace.h" #include "intel_pm.h" struct execute_cb { diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 101a2bb416a6..4183b0e10324 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_globals.h" +#include "i915_trace.h" #include "i915_vma.h" static struct i915_global_vma { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 30399b245f07..91a6068a29db 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -39,6 +39,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "i915_trace.h" #include "intel_drv.h" #include "intel_pm.h" #include "intel_sideband.h" diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b2a05850ea42..2fd3c097e1f5 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -32,6 +32,7 @@ #include #include "i915_drv.h" +#include "i915_trace.h" /** * DOC: runtime pm diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 773dc0a79577..18104d6542b8 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -25,6 +25,7 @@ #include #include "i915_drv.h" +#include "i915_trace.h" #include "i915_vgpu.h" #include "intel_drv.h" #include "intel_pm.h" -- cgit v1.2.3 From 1d455f8de8e8a211cc91e19484eeda2e454531a1 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 6 Aug 2019 14:39:33 +0300 Subject: drm/i915: rename intel_drv.h to display/intel_display_types.h Everything about the file is about display, and mostly about types related to display. Move under display/ as intel_display_types.h to reflect the facts. There's still plenty to clean up, but start off with moving the file where it logically belongs and naming according to contents. v2: fix the include guard name in the renamed file Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190806113933.11799-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/dvo_ch7017.c | 2 +- drivers/gpu/drm/i915/display/dvo_ch7xxx.c | 2 +- drivers/gpu/drm/i915/display/dvo_ivch.c | 2 +- drivers/gpu/drm/i915/display/dvo_ns2501.c | 2 +- drivers/gpu/drm/i915/display/dvo_sil164.c | 2 +- drivers/gpu/drm/i915/display/dvo_tfp410.c | 2 +- drivers/gpu/drm/i915/display/intel_atomic.c | 2 +- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 2 +- drivers/gpu/drm/i915/display/intel_audio.c | 2 +- drivers/gpu/drm/i915/display/intel_bw.c | 2 +- drivers/gpu/drm/i915/display/intel_cdclk.c | 2 +- drivers/gpu/drm/i915/display/intel_color.c | 2 +- drivers/gpu/drm/i915/display/intel_combo_phy.c | 2 +- drivers/gpu/drm/i915/display/intel_connector.c | 2 +- drivers/gpu/drm/i915/display/intel_crt.c | 2 +- drivers/gpu/drm/i915/display/intel_ddi.c | 2 +- drivers/gpu/drm/i915/display/intel_display.c | 4 +- drivers/gpu/drm/i915/display/intel_display_power.c | 2 +- drivers/gpu/drm/i915/display/intel_display_types.h | 1510 ++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.c | 2 +- .../gpu/drm/i915/display/intel_dp_aux_backlight.c | 2 +- .../gpu/drm/i915/display/intel_dp_link_training.c | 2 +- drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +- drivers/gpu/drm/i915/display/intel_dpio_phy.c | 2 +- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 2 +- drivers/gpu/drm/i915/display/intel_dsi.h | 3 +- .../gpu/drm/i915/display/intel_dsi_dcs_backlight.c | 2 +- drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 2 +- drivers/gpu/drm/i915/display/intel_dvo.c | 2 +- drivers/gpu/drm/i915/display/intel_fbc.c | 2 +- drivers/gpu/drm/i915/display/intel_fbdev.c | 2 +- drivers/gpu/drm/i915/display/intel_fifo_underrun.c | 2 +- drivers/gpu/drm/i915/display/intel_frontbuffer.c | 2 +- drivers/gpu/drm/i915/display/intel_gmbus.c | 2 +- drivers/gpu/drm/i915/display/intel_hdcp.c | 2 +- drivers/gpu/drm/i915/display/intel_hdmi.c | 4 +- drivers/gpu/drm/i915/display/intel_hotplug.c | 2 +- drivers/gpu/drm/i915/display/intel_lspcon.c | 2 +- drivers/gpu/drm/i915/display/intel_lvds.c | 2 +- drivers/gpu/drm/i915/display/intel_opregion.c | 2 +- drivers/gpu/drm/i915/display/intel_overlay.c | 2 +- drivers/gpu/drm/i915/display/intel_panel.c | 2 +- drivers/gpu/drm/i915/display/intel_pipe_crc.c | 2 +- drivers/gpu/drm/i915/display/intel_psr.c | 2 +- drivers/gpu/drm/i915/display/intel_quirks.c | 2 +- drivers/gpu/drm/i915/display/intel_sdvo.c | 2 +- drivers/gpu/drm/i915/display/intel_sprite.c | 2 +- drivers/gpu/drm/i915/display/intel_tc.c | 2 +- drivers/gpu/drm/i915/display/intel_tv.c | 2 +- drivers/gpu/drm/i915/display/intel_vdsc.c | 2 +- drivers/gpu/drm/i915/display/vlv_dsi.c | 2 +- drivers/gpu/drm/i915/display/vlv_dsi_pll.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 5 +- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 3 +- drivers/gpu/drm/i915/gt/intel_reset.c | 1 + drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 2 +- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 2 + drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 2 +- drivers/gpu/drm/i915/i915_suspend.c | 2 +- drivers/gpu/drm/i915/i915_sysfs.c | 3 +- drivers/gpu/drm/i915/i915_trace.h | 2 +- drivers/gpu/drm/i915/i915_vgpu.c | 2 +- drivers/gpu/drm/i915/intel_drv.h | 1509 ------------------- drivers/gpu/drm/i915/intel_pm.c | 2 +- drivers/gpu/drm/i915/intel_sideband.c | 4 +- drivers/gpu/drm/i915/intel_uncore.c | 3 +- 73 files changed, 1592 insertions(+), 1583 deletions(-) create mode 100644 drivers/gpu/drm/i915/display/intel_display_types.h delete mode 100644 drivers/gpu/drm/i915/intel_drv.h (limited to 'drivers/gpu/drm') diff --git a/drivers/gpu/drm/i915/display/dvo_ch7017.c b/drivers/gpu/drm/i915/display/dvo_ch7017.c index 602380fe74f3..0589994dde11 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7017.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7017.c @@ -25,7 +25,7 @@ * */ -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_dvo_dev.h" #define CH7017_TV_DISPLAY_MODE 0x00 diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c index e070bebee7b5..54f58ba44b9f 100644 --- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c @@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_dvo_dev.h" #define CH7xxx_REG_VID 0x4a diff --git a/drivers/gpu/drm/i915/display/dvo_ivch.c b/drivers/gpu/drm/i915/display/dvo_ivch.c index 09dba35f3ffa..f43d8c610d3f 100644 --- a/drivers/gpu/drm/i915/display/dvo_ivch.c +++ b/drivers/gpu/drm/i915/display/dvo_ivch.c @@ -29,7 +29,7 @@ * */ -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_dvo_dev.h" /* diff --git a/drivers/gpu/drm/i915/display/dvo_ns2501.c b/drivers/gpu/drm/i915/display/dvo_ns2501.c index c83a5d88d62b..a724a8755673 100644 --- a/drivers/gpu/drm/i915/display/dvo_ns2501.c +++ b/drivers/gpu/drm/i915/display/dvo_ns2501.c @@ -28,7 +28,7 @@ #include "i915_drv.h" #include "i915_reg.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_dvo_dev.h" #define NS2501_VID 0x1305 diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c index 04698eaeb632..0dfa0a0209ff 100644 --- a/drivers/gpu/drm/i915/display/dvo_sil164.c +++ b/drivers/gpu/drm/i915/display/dvo_sil164.c @@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_dvo_dev.h" #define SIL164_VID 0x0001 diff --git a/drivers/gpu/drm/i915/display/dvo_tfp410.c b/drivers/gpu/drm/i915/display/dvo_tfp410.c index 623114ee73cd..009d65b0f3e9 100644 --- a/drivers/gpu/drm/i915/display/dvo_tfp410.c +++ b/drivers/gpu/drm/i915/display/dvo_tfp410.c @@ -25,7 +25,7 @@ * */ -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_dvo_dev.h" /* register definitions according to the TFP410 data sheet */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 90ca11a4ae88..d3fb75bb9eb1 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -35,7 +35,7 @@ #include #include "intel_atomic.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_sprite.h" diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index c14c8dade9a4..d1fcdf206da4 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -37,7 +37,7 @@ #include "i915_trace.h" #include "intel_atomic_plane.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_pm.h" #include "intel_sprite.h" diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index c8fd35a7ca42..ddcccf4408c3 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -29,7 +29,7 @@ #include "i915_drv.h" #include "intel_audio.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_lpe_audio.h" /** diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index e59f8be7c998..688858ebe4d0 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -6,7 +6,7 @@ #include #include "intel_bw.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_sideband.h" /* Parameters for Qclk Geyserville (QGV) */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 93b0d190c184..59c0ceb21aac 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -22,7 +22,7 @@ */ #include "intel_cdclk.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_sideband.h" /** diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 23a84dd7989f..71a0201437a9 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -23,7 +23,7 @@ */ #include "intel_color.h" -#include "intel_drv.h" +#include "intel_display_types.h" #define CTM_COEFF_SIGN (1ULL << 63) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index ac8218a040ab..44bbc7e74fc3 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -4,7 +4,7 @@ */ #include "intel_combo_phy.h" -#include "intel_drv.h" +#include "intel_display_types.h" #define for_each_combo_phy(__dev_priv, __phy) \ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \ diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index cf8823ce9606..308ec63207ee 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -33,7 +33,7 @@ #include "i915_drv.h" #include "intel_connector.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_hdcp.h" int intel_connector_init(struct intel_connector *connector) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 3fcf2f84bcce..067eb14dbf80 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -38,7 +38,7 @@ #include "intel_connector.h" #include "intel_crt.h" #include "intel_ddi.h" -#include "intel_drv.h" +#include "intel_display_types.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug.h" diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index fb58845020dc..bde9e0f61cef 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -32,10 +32,10 @@ #include "intel_combo_phy.h" #include "intel_connector.h" #include "intel_ddi.h" +#include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dpio_phy.h" -#include "intel_drv.h" #include "intel_dsi.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 51e4f6798a6b..503e39e9adfd 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -62,9 +62,9 @@ #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_bw.h" -#include "intel_color.h" #include "intel_cdclk.h" -#include "intel_drv.h" +#include "intel_color.h" +#include "intel_display_types.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fifo_underrun.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index dd2a50b8ba0a..e201dc6f977a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -13,8 +13,8 @@ #include "intel_cdclk.h" #include "intel_combo_phy.h" #include "intel_csr.h" +#include "intel_display_types.h" #include "intel_dpio_phy.h" -#include "intel_drv.h" #include "intel_hotplug.h" #include "intel_sideband.h" #include "intel_tc.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h new file mode 100644 index 000000000000..3283f8d5c4e0 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -0,0 +1,1510 @@ +/* + * Copyright (c) 2006 Dave Airlie + * Copyright (c) 2007-2008 Intel Corporation + * Jesse Barnes + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __INTEL_DISPLAY_TYPES_H__ +#define __INTEL_DISPLAY_TYPES_H__ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i915_drv.h" + +struct drm_printer; + +/* + * Display related stuff + */ + +/* these are outputs from the chip - integrated only + external chips are via DVO or SDVO output */ +enum intel_output_type { + INTEL_OUTPUT_UNUSED = 0, + INTEL_OUTPUT_ANALOG = 1, + INTEL_OUTPUT_DVO = 2, + INTEL_OUTPUT_SDVO = 3, + INTEL_OUTPUT_LVDS = 4, + INTEL_OUTPUT_TVOUT = 5, + INTEL_OUTPUT_HDMI = 6, + INTEL_OUTPUT_DP = 7, + INTEL_OUTPUT_EDP = 8, + INTEL_OUTPUT_DSI = 9, + INTEL_OUTPUT_DDI = 10, + INTEL_OUTPUT_DP_MST = 11, +}; + +struct intel_framebuffer { + struct drm_framebuffer base; + struct intel_rotation_info rot_info; + + /* for each plane in the normal GTT view */ + struct { + unsigned int x, y; + } normal[2]; + /* for each plane in the rotated GTT view */ + struct { + unsigned int x, y; + unsigned int pitch; /* pixels */ + } rotated[2]; +}; + +struct intel_fbdev { + struct drm_fb_helper helper; + struct intel_framebuffer *fb; + struct i915_vma *vma; + unsigned long vma_flags; + async_cookie_t cookie; + int preferred_bpp; + + /* Whether or not fbdev hpd processing is temporarily suspended */ + bool hpd_suspended : 1; + /* Set when a hotplug was received while HPD processing was + * suspended + */ + bool hpd_waiting : 1; + + /* Protects hpd_suspended */ + struct mutex hpd_lock; +}; + +enum intel_hotplug_state { + INTEL_HOTPLUG_UNCHANGED, + INTEL_HOTPLUG_CHANGED, + INTEL_HOTPLUG_RETRY, +}; + +struct intel_encoder { + struct drm_encoder base; + + enum intel_output_type type; + enum port port; + unsigned int cloneable; + enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder, + struct intel_connector *connector, + bool irq_received); + enum intel_output_type (*compute_output_type)(struct intel_encoder *, + struct intel_crtc_state *, + struct drm_connector_state *); + int (*compute_config)(struct intel_encoder *, + struct intel_crtc_state *, + struct drm_connector_state *); + void (*update_prepare)(struct intel_atomic_state *, + struct intel_encoder *, + struct intel_crtc *); + void (*pre_pll_enable)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); + void (*pre_enable)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); + void (*enable)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); + void (*update_complete)(struct intel_atomic_state *, + struct intel_encoder *, + struct intel_crtc *); + void (*disable)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); + void (*post_disable)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); + void (*post_pll_disable)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); + void (*update_pipe)(struct intel_encoder *, + const struct intel_crtc_state *, + const struct drm_connector_state *); + /* Read out the current hw state of this connector, returning true if + * the encoder is active. If the encoder is enabled it also set the pipe + * it is connected to in the pipe parameter. */ + bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); + /* Reconstructs the equivalent mode flags for the current hardware + * state. This must be called _after_ display->get_pipe_config has + * pre-filled the pipe config. Note that intel_encoder->base.crtc must + * be set correctly before calling this function. */ + void (*get_config)(struct intel_encoder *, + struct intel_crtc_state *pipe_config); + /* + * Acquires the power domains needed for an active encoder during + * hardware state readout. + */ + void (*get_power_domains)(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); + /* + * Called during system suspend after all pending requests for the + * encoder are flushed (for example for DP AUX transactions) and + * device interrupts are disabled. + */ + void (*suspend)(struct intel_encoder *); + int crtc_mask; + enum hpd_pin hpd_pin; + enum intel_display_power_domain power_domain; + /* for communication with audio component; protected by av_mutex */ + const struct drm_connector *audio_connector; +}; + +struct intel_panel { + struct drm_display_mode *fixed_mode; + struct drm_display_mode *downclock_mode; + + /* backlight */ + struct { + bool present; + u32 level; + u32 min; + u32 max; + bool enabled; + bool combination_mode; /* gen 2/4 only */ + bool active_low_pwm; + bool alternate_pwm_increment; /* lpt+ */ + + /* PWM chip */ + bool util_pin_active_low; /* bxt+ */ + u8 controller; /* bxt+ only */ + struct pwm_device *pwm; + + struct backlight_device *device; + + /* Connector and platform specific backlight functions */ + int (*setup)(struct intel_connector *connector, enum pipe pipe); + u32 (*get)(struct intel_connector *connector); + void (*set)(const struct drm_connector_state *conn_state, u32 level); + void (*disable)(const struct drm_connector_state *conn_state); + void (*enable)(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); + void (*power)(struct intel_connector *, bool enable); + } backlight; +}; + +struct intel_digital_port; + +enum check_link_response { + HDCP_LINK_PROTECTED = 0, + HDCP_TOPOLOGY_CHANGE, + HDCP_LINK_INTEGRITY_FAILURE, + HDCP_REAUTH_REQUEST +}; + +/* + * This structure serves as a translation layer between the generic HDCP code + * and the bus-specific code. What that means is that HDCP over HDMI differs + * from HDCP over DP, so to account for these differences, we need to + * communicate with the receiver through this shim. + * + * For completeness, the 2 buses differ in the following ways: + * - DP AUX vs. DDC + * HDCP registers on the receiver are set via DP AUX for DP, and + * they are set via DDC for HDMI. + * - Receiver register offsets + * The offsets of the registers are different for DP vs. HDMI + * - Receiver register masks/offsets + * For instance, the ready bit for the KSV fifo is in a different + * place on DP vs HDMI + * - Receiver register names + * Seriously. In the DP spec, the 16-bit register containing + * downstream information is called BINFO, on HDMI it's called + * BSTATUS. To confuse matters further, DP has a BSTATUS register + * with a completely different definition. + * - KSV FIFO + * On HDMI, the ksv fifo is read all at once, whereas on DP it must + * be read 3 keys at a time + * - Aksv output + * Since Aksv is hidden in hardware, there's different procedures + * to send it over DP AUX vs DDC + */ +struct intel_hdcp_shim { + /* Outputs the transmitter's An and Aksv values to the receiver. */ + int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an); + + /* Reads the receiver's key selection vector */ + int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv); + + /* + * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The + * definitions are the same in the respective specs, but the names are + * different. Call it BSTATUS since that's the name the HDMI spec + * uses and it was there first. + */ + int (*read_bstatus)(struct intel_digital_port *intel_dig_port, + u8 *bstatus); + + /* Determines whether a repeater is present downstream */ + int (*repeater_present)(struct intel_digital_port *intel_dig_port, + bool *repeater_present); + + /* Reads the receiver's Ri' value */ + int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri); + + /* Determines if the receiver's KSV FIFO is ready for consumption */ + int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port, + bool *ksv_ready); + + /* Reads the ksv fifo for num_downstream devices */ + int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port, + int num_downstream, u8 *ksv_fifo); + + /* Reads a 32-bit part of V' from the receiver */ + int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port, + int i, u32 *part); + + /* Enables HDCP signalling on the port */ + int (*toggle_signalling)(struct intel_digital_port *intel_dig_port, + bool enable); + + /* Ensures the link is still protected */ + bool (*check_link)(struct intel_digital_port *intel_dig_port); + + /* Detects panel's hdcp capability. This is optional for HDMI. */ + int (*hdcp_capable)(struct intel_digital_port *intel_dig_port, + bool *hdcp_capable); + + /* HDCP adaptation(DP/HDMI) required on the port */ + enum hdcp_wired_protocol protocol; + + /* Detects whether sink is HDCP2.2 capable */ + int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port, + bool *capable); + + /* Write HDCP2.2 messages */ + int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port, + void *buf, size_t size); + + /* Read HDCP2.2 messages */ + int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size); + + /* + * Implementation of DP HDCP2.2 Errata for the communication of stream + * type to Receivers. In DP HDCP2.2 Stream type is one of the input to + * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. + */ + int (*config_stream_type)(struct intel_digital_port *intel_dig_port, + bool is_repeater, u8 type); + + /* HDCP2.2 Link Integrity Check */ + int (*check_2_2_link)(struct intel_digital_port *intel_dig_port); +}; + +struct intel_hdcp { + const struct intel_hdcp_shim *shim; + /* Mutex for hdcp state of the connector */ + struct mutex mutex; + u64 value; + struct delayed_work check_work; + struct work_struct prop_work; + + /* HDCP1.4 Encryption status */ + bool hdcp_encrypted; + + /* HDCP2.2 related definitions */ + /* Flag indicates whether this connector supports HDCP2.2 or not. */ + bool hdcp2_supported; + + /* HDCP2.2 Encryption status */ + bool hdcp2_encrypted; + + /* + * Content Stream Type defined by content owner. TYPE0(0x0) content can + * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1) + * content can flow only through a link protected by HDCP2.2. + */ + u8 content_type; + struct hdcp_port_data port_data; + + bool is_paired; + bool is_repeater; + + /* + * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT. + * Incremented after processing the RepeaterAuth_Send_ReceiverID_List. + * When it rolls over re-auth has to be triggered. + */ + u32 seq_num_v; + + /* + * Count of RepeaterAuth_Stream_Manage msg propagated. + * Initialized to 0 on AKE_INIT. Incremented after every successful + * transmission of RepeaterAuth_Stream_Manage message. When it rolls + * over re-Auth has to be triggered. + */ + u32 seq_num_m; + + /* + * Work queue to signal the CP_IRQ. Used for the waiters to read the + * available information from HDCP DP sink. + */ + wait_queue_head_t cp_irq_queue; + atomic_t cp_irq_count; + int cp_irq_count_cached; +}; + +struct intel_connector { + struct drm_connector base; + /* + * The fixed encoder this connector is connected to. + */ + struct intel_encoder *encoder; + + /* ACPI device id for ACPI and driver cooperation */ + u32 acpi_device_id; + + /* Reads out the current hw, returning true if the connector is enabled + * and active (i.e. dpms ON state). */ + bool (*get_hw_state)(struct intel_connector *); + + /* Panel info for eDP and LVDS */ + struct intel_panel panel; + + /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */ + struct edid *edid; + struct edid *detect_edid; + + /* since POLL and HPD connectors may use the same HPD line keep the native + state of connector->polled in case hotplug storm detection changes it */ + u8 polled; + + void *port; /* store this opaque as its illegal to dereference it */ + + struct intel_dp *mst_port; + + /* Work struct to schedule a uevent on link train failure */ + struct work_struct modeset_retry_work; + + struct intel_hdcp hdcp; +}; + +struct intel_digital_connector_state { + struct drm_connector_state base; + + enum hdmi_force_audio force_audio; + int broadcast_rgb; +}; + +#define to_intel_digital_connector_state(x) container_of(x, struct intel_digital_connector_state, base) + +struct dpll { + /* given values */ + int n; + int m1, m2; + int p1, p2; + /* derived values */ + int dot; + int vco; + int m; + int p; +}; + +struct intel_atomic_state { + struct drm_atomic_state base; + + intel_wakeref_t wakeref; + + struct { + /* + * Logical state of cdclk (used for all scaling, watermark, + * etc. calculations and checks). This is computed as if all + * enabled crtcs were active. + */ + struct intel_cdclk_state logical; + + /* + * Actual state of cdclk, can be different from the logical + * state only when all crtc's are DPMS off. + */ + struct intel_cdclk_state actual; + + int force_min_cdclk; + bool force_min_cdclk_changed; + /* pipe to which cd2x update is synchronized */ + enum pipe pipe; + } cdclk; + + bool dpll_set, modeset; + + /* + * Does this transaction change the pipes that are active? This mask + * tracks which CRTC's have changed their active state at the end of + * the transaction (not counting the temporary disable during modesets). + * This mask should only be non-zero when intel_state->modeset is true, + * but the converse is not necessarily true; simply changing a mode may + * not flip the final active status of any CRTC's + */ + unsigned int active_pipe_changes; + + unsigned int active_crtcs; + /* minimum acceptable cdclk for each pipe */ + int min_cdclk[I915_MAX_PIPES]; + /* minimum acceptable voltage level for each pipe */ + u8 min_voltage_level[I915_MAX_PIPES]; + + struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; + + /* + * Current watermarks can't be trusted during hardware readout, so + * don't bother calculating intermediate watermarks. + */ + bool skip_intermediate_wm; + + bool rps_interactive; + + /* Gen9+ only */ + struct skl_ddb_values wm_results; + + struct i915_sw_fence commit_ready; + + struct llist_node freed; +}; + +struct intel_plane_state { + struct drm_plane_state base; + struct i915_ggtt_view view; + struct i915_vma *vma; + unsigned long flags; +#define PLANE_HAS_FENCE BIT(0) + + struct { + u32 offset; + /* + * Plane stride in: + * bytes for 0/180 degree rotation + * pixels for 90/270 degree rotation + */ + u32 stride; + int x, y; + } color_plane[2]; + + /* plane control register */ + u32 ctl; + + /* plane color control register */ + u32 color_ctl; + + /* + * scaler_id + * = -1 : not using a scaler + * >= 0 : using a scalers + * + * plane requiring a scaler: + * - During check_plane, its bit is set in + * crtc_state->scaler_state.scaler_users by calling helper function + * update_scaler_plane. + * - scaler_id indicates the scaler it got assigned. + * + * plane doesn't require a scaler: + * - this can happen when scaling is no more required or plane simply + * got disabled. + * - During check_plane, corresponding bit is reset in + * crtc_state->scaler_state.scaler_users by calling helper function + * update_scaler_plane. + */ + int scaler_id; + + /* + * linked_plane: + * + * ICL planar formats require 2 planes that are updated as pairs. + * This member is used to make sure the other plane is also updated + * when required, and for update_slave() to find the correct + * plane_state to pass as argument. + */ + struct intel_plane *linked_plane; + + /* + * slave: + * If set don't update use the linked plane's state for updating + * this plane during atomic commit with the update_slave() callback. + * + * It's also used by the watermark code to ignore wm calculations on + * this plane. They're calculated by the linked plane's wm code. + */ + u32 slave; + + struct drm_intel_sprite_colorkey ckey; +}; + +struct intel_initial_plane_config { + struct intel_framebuffer *fb; + unsigned int tiling; + int size; + u32 base; + u8 rotation; +}; + +struct intel_scaler { + int in_use; + u32 mode; +}; + +struct intel_crtc_scaler_state { +#define SKL_NUM_SCALERS 2 + struct intel_scaler scalers[SKL_NUM_SCALERS]; + + /* + * scaler_users: keeps track of users requesting scalers on this crtc. + * + * If a bit is set, a user is using a scaler. + * Here user can be a plane or crtc as defined below: + * bits 0-30 - plane (bit position is index from drm_plane_index) + * bit 31 - crtc + * + * Instead of creating a new index to cover planes and crtc, using + * existing drm_plane_index for planes which is well less than 31 + * planes and bit 31 for crtc. This should be fine to cover all + * our platforms. + * + * intel_atomic_setup_scalers will setup available scalers to users + * requesting scalers. It will gracefully fail if request exceeds + * avilability. + */ +#define SKL_CRTC_INDEX 31 + unsigned scaler_users; + + /* scaler used by crtc for panel fitting purpose */ + int scaler_id; +}; + +/* drm_mode->private_flags */ +#define I915_MODE_FLAG_INHERITED (1<<0) +/* Flag to get scanline using frame time stamps */ +#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1) +/* Flag to use the scanline counter instead of the pixel counter */ +#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2) + +struct intel_pipe_wm { + struct intel_wm_level wm[5]; + u32 linetime; + bool fbc_wm_enabled; + bool pipe_enabled; + bool sprites_enabled; + bool sprites_scaled; +}; + +struct skl_plane_wm { + struct skl_wm_level wm[8]; + struct skl_wm_level uv_wm[8]; + struct skl_wm_level trans_wm; + bool is_planar; +}; + +struct skl_pipe_wm { + struct skl_plane_wm planes[I915_MAX_PLANES]; + u32 linetime; +}; + +enum vlv_wm_level { + VLV_WM_LEVEL_PM2, + VLV_WM_LEVEL_PM5, + VLV_WM_LEVEL_DDR_DVFS, + NUM_VLV_WM_LEVELS, +}; + +struct vlv_wm_state { + struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS]; + struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS]; + u8 num_levels; + bool cxsr; +}; + +struct vlv_fifo_state { + u16 plane[I915_MAX_PLANES]; +}; + +enum g4x_wm_level { + G4X_WM_LEVEL_NORMAL, + G4X_WM_LEVEL_SR, + G4X_WM_LEVEL_HPLL, + NUM_G4X_WM_LEVELS, +}; + +struct g4x_wm_state { + struct g4x_pipe_wm wm; + struct g4x_sr_wm sr; + struct g4x_sr_wm hpll; + bool cxsr; + bool hpll_en; + bool fbc_en; +}; + +struct intel_crtc_wm_state { + union { + struct { + /* + * Intermediate watermarks; these can be + * programmed immediately since they satisfy + * both the current configuration we're + * switching away from and the new + * configuration we're switching to. + */ + struct intel_pipe_wm intermediate; + + /* + * Optimal watermarks, programmed post-vblank + * when this state is committed. + */ + struct intel_pipe_wm optimal; + } ilk; + + struct { + /* gen9+ only needs 1-step wm programming */ + struct skl_pipe_wm optimal; + struct skl_ddb_entry ddb; + struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES]; + struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES]; + } skl; + + struct { + /* "raw" watermarks (not inverted) */ + struct g4x_pipe_wm raw[NUM_VLV_WM_LEVELS]; + /* intermediate watermarks (inverted) */ + struct vlv_wm_state intermediate; + /* optimal watermarks (inverted) */ + struct vlv_wm_state optimal; + /* display FIFO split */ + struct vlv_fifo_state fifo_state; + } vlv; + + struct { + /* "raw" watermarks */ + struct g4x_pipe_wm raw[NUM_G4X_WM_LEVELS]; + /* intermediate watermarks */ + struct g4x_wm_state intermediate; + /* optimal watermarks */ + struct g4x_wm_state optimal; + } g4x; + }; + + /* + * Platforms with two-step watermark programming will need to + * update watermark programming post-vblank to switch from the + * safe intermediate watermarks to the optimal final + * watermarks. + */ + bool need_postvbl_update; +}; + +enum intel_output_format { + INTEL_OUTPUT_FORMAT_INVALID, + INTEL_OUTPUT_FORMAT_RGB, + INTEL_OUTPUT_FORMAT_YCBCR420, + INTEL_OUTPUT_FORMAT_YCBCR444, +}; + +struct intel_crtc_state { + struct drm_crtc_state base; + + /** + * quirks - bitfield with hw state readout quirks + * + * For various reasons the hw state readout code might not be able to + * completely faithfully read out the current state. These cases are + * tracked with quirk flags so that fastboot and state checker can act + * accordingly. + */ +#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ + unsigned long quirks; + + unsigned fb_bits; /* framebuffers to flip */ + bool update_pipe; /* can a fast modeset be performed? */ + bool disable_cxsr; + bool update_wm_pre, update_wm_post; /* watermarks are updated */ + bool fb_changed; /* fb on any of the planes is changed */ + bool fifo_changed; /* FIFO split is changed */ + + /* Pipe source size (ie. panel fitter input size) + * All planes will be positioned inside this space, + * and get clipped at the edges. */ + int pipe_src_w, pipe_src_h; + + /* + * Pipe pixel rate, adjusted for + * panel fitter/pipe scaler downscaling. + */ + unsigned int pixel_rate; + + /* Whether to set up the PCH/FDI. Note that we never allow sharing + * between pch encoders and cpu encoders. */ + bool has_pch_encoder; + + /* Are we sending infoframes on the attached port */ + bool has_infoframe; + + /* CPU Transcoder for the pipe. Currently this can only differ from the + * pipe on Haswell and later (where we have a special eDP transcoder) + * and Broxton (where we have special DSI transcoders). */ + enum transcoder cpu_transcoder; + + /* + * Use reduced/limited/broadcast rbg range, compressing from the full + * range fed into the crtcs. + */ + bool limited_color_range; + + /* Bitmask of encoder types (enum intel_output_type) + * driven by the pipe. + */ + unsigned int output_types; + + /* Whether we should send NULL infoframes. Required for audio. */ + bool has_hdmi_sink; + + /* Audio enabled on this pipe. Only valid if either has_hdmi_sink or + * has_dp_encoder is set. */ + bool has_audio; + + /* + * Enable dithering, used when the selected pipe bpp doesn't match the + * plane bpp. + */ + bool dither; + + /* + * Dither gets enabled for 18bpp which causes CRC mismatch errors for + * compliance video pattern tests. + * Disable dither only if it is a compliance test request for + * 18bpp. + */ + bool dither_force_disable; + + /* Controls for the clock computation, to override various stages. */ + bool clock_set; + + /* SDVO TV has a bunch of special case. To make multifunction encoders + * work correctly, we need to track this at runtime.*/ + bool sdvo_tv_clock; + + /* + * crtc bandwidth limit, don't increase pipe bpp or clock if not really + * required. This is set in the 2nd loop of calling encoder's + * ->compute_config if the first pick doesn't work out. + */ + bool bw_constrained; + + /* Settings for the intel dpll used on pretty much everything but + * haswell. */ + struct dpll dpll; + + /* Selected dpll when shared or NULL. */ + struct intel_shared_dpll *shared_dpll; + + /* Actual register state of the dpll, for shared dpll cross-checking. */ + struct intel_dpll_hw_state dpll_hw_state; + + /* + * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by + * setting shared_dpll and dpll_hw_state to one of these reserved ones. + */ + struct icl_port_dpll { + struct intel_shared_dpll *pll; + struct intel_dpll_hw_state hw_state; + } icl_port_dplls[ICL_PORT_DPLL_COUNT]; + + /* DSI PLL registers */ + struct { + u32 ctrl, div; + } dsi_pll; + + int pipe_bpp; + struct intel_link_m_n dp_m_n; + + /* m2_n2 for eDP downclock */ + struct intel_link_m_n dp_m2_n2; + bool has_drrs; + + bool has_psr; + bool has_psr2; + + /* + * Frequence the dpll for the port should run at. Differs from the + * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also + * already multiplied by pixel_multiplier. + */ + int port_clock; + + /* Used by SDVO (and if we ever fix it, HDMI). */ + unsigned pixel_multiplier; + + u8 lane_count; + + /* + * Used by platforms having DP/HDMI PHY with programmable lane + * latency optimization. + */ + u8 lane_lat_optim_mask; + + /* minimum acceptable voltage level */ + u8 min_voltage_level; + + /* Panel fitter controls for gen2-gen4 + VLV */ + struct { + u32 control; + u32 pgm_ratios; + u32 lvds_border_bits; + } gmch_pfit; + + /* Panel fitter placement and size for Ironlake+ */ + struct { + u32 pos; + u32 size; + bool enabled; + bool force_thru; + } pch_pfit; + + /* FDI configuration, only valid if has_pch_encoder is set. */ + int fdi_lanes; + struct intel_link_m_n fdi_m_n; + + bool ips_enabled; + + bool crc_enabled; + + bool enable_fbc; + + bool double_wide; + + int pbn; + + struct intel_crtc_scaler_state scaler_state; + + /* w/a for waiting 2 vblanks during crtc enable */ + enum pipe hsw_workaround_pipe; + + /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ + bool disable_lp_wm; + + struct intel_crtc_wm_state wm; + + u32 data_rate[I915_MAX_PLANES]; + + /* Gamma mode programmed on the pipe */ + u32 gamma_mode; + + union { + /* CSC mode programmed on the pipe */ + u32 csc_mode; + + /* CHV CGM mode */ + u32 cgm_mode; + }; + + /* bitmask of visible planes (enum plane_id) */ + u8 active_planes; + u8 nv12_planes; + u8 c8_planes; + + /* bitmask of planes that will be updated during the commit */ + u8 update_planes; + + struct { + u32 enable; + u32 gcp; + union hdmi_infoframe avi; + union hdmi_infoframe spd; + union hdmi_infoframe hdmi; + union hdmi_infoframe drm; + } infoframes; + + /* HDMI scrambling status */ + bool hdmi_scrambling; + + /* HDMI High TMDS char rate ratio */ + bool hdmi_high_tmds_clock_ratio; + + /* Output format RGB/YCBCR etc */ + enum intel_output_format output_format; + + /* Output down scaling is done in LSPCON device */ + bool lspcon_downsampling; + + /* enable pipe gamma? */ + bool gamma_enable; + + /* enable pipe csc? */ + bool csc_enable; + + /* Display Stream compression state */ + struct { + bool compression_enable; + bool dsc_split; + u16 compressed_bpp; + u8 slice_count; + } dsc_params; + struct drm_dsc_config dp_dsc_cfg; + + /* Forward Error correction State */ + bool fec_enable; +}; + +struct intel_crtc { + struct drm_crtc base; + enum pipe pipe; + /* + * Whether the crtc and the connected output pipeline is active. Implies + * that crtc->enabled is set, i.e. the current mode configuration has + * some outputs connected to this crtc. + */ + bool active; + u8 plane_ids_mask; + unsigned long long enabled_power_domains; + struct intel_overlay *overlay; + + struct intel_crtc_state *config; + + /* Access to these should be protected by dev_priv->irq_lock. */ + bool cpu_fifo_underrun_disabled; + bool pch_fifo_underrun_disabled; + + /* per-pipe watermark state */ + struct { + /* watermarks currently being used */ + union { + struct intel_pipe_wm ilk; + struct vlv_wm_state vlv; + struct g4x_wm_state g4x; + } active; + } wm; + + int scanline_offset; + + struct { + unsigned start_vbl_count; + ktime_t start_vbl_time; + int min_vbl, max_vbl; + int scanline_start; + } debug; + + /* scalers available on this crtc */ + int num_scalers; +}; + +struct intel_plane { + struct drm_plane base; + enum i9xx_plane_id i9xx_plane; + enum plane_id id; + enum pipe pipe; + bool has_fbc; + bool has_ccs; + u32 frontbuffer_bit; + + struct { + u32 base, cntl, size; + } cursor; + + /* + * NOTE: Do not place new plane state fields here (e.g., when adding + * new plane properties). New runtime state should now be placed in + * the intel_plane_state structure and accessed via plane_state. + */ + + unsigned int (*max_stride)(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation); + void (*update_plane)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + void (*update_slave)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + void (*disable_plane)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); + bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); + int (*check_plane)(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state); +}; + +struct intel_watermark_params { + u16 fifo_size; + u16 max_wm; + u8 default_wm; + u8 guard_size; + u8 cacheline_size; +}; + +struct cxsr_latency { + bool is_desktop : 1; + bool is_ddr3 : 1; + u16 fsb_freq; + u16 mem_freq; + u16 display_sr; + u16 display_hpll_disable; + u16 cursor_sr; + u16 cursor_hpll_disable; +}; + +#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) +#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) +#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base) +#define to_intel_connector(x) container_of(x, struct intel_connector, base) +#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) +#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +#define to_intel_plane(x) container_of(x, struct intel_plane, base) +#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base) +#define intel_fb_obj(x) ((x) ? to_intel_bo((x)->obj[0]) : NULL) + +struct intel_hdmi { + i915_reg_t hdmi_reg; + int ddc_bus; + struct { + enum drm_dp_dual_mode_type type; + int max_tmds_clock; + } dp_dual_mode; + bool has_hdmi_sink; + bool has_audio; + struct intel_connector *attached_connector; + struct cec_notifier *cec_notifier; +}; + +struct intel_dp_mst_encoder; +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +/* + * enum link_m_n_set: + * When platform provides two set of M_N registers for dp, we can + * program them and switch between them incase of DRRS. + * But When only one such register is provided, we have to program the + * required divider value on that registers itself based on the DRRS state. + * + * M1_N1 : Program dp_m_n on M1_N1 registers + * dp_m2_n2 on M2_N2 registers (If supported) + * + * M2_N2 : Program dp_m2_n2 on M1_N1 registers + * M2_N2 registers are not supported + */ + +enum link_m_n_set { + /* Sets the m1_n1 and m2_n2 */ + M1_N1 = 0, + M2_N2 +}; + +struct intel_dp_compliance_data { + unsigned long edid; + u8 video_pattern; + u16 hdisplay, vdisplay; + u8 bpc; +}; + +struct intel_dp_compliance { + unsigned long test_type; + struct intel_dp_compliance_data test_data; + bool test_active; + int test_link_rate; + u8 test_lane_count; +}; + +struct intel_dp { + i915_reg_t output_reg; + u32 DP; + int link_rate; + u8 lane_count; + u8 sink_count; + bool link_mst; + bool link_trained; + bool has_audio; + bool reset_link_params; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; + u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; + u8 fec_capable; + /* source rates */ + int num_source_rates; + const int *source_rates; + /* sink rates as reported by DP_MAX_LINK_RATE/DP_SUPPORTED_LINK_RATES */ + int num_sink_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + bool use_rate_select; + /* intersection of source and sink rates */ + int num_common_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + /* Max lane count for the current link */ + int max_link_lane_count; + /* Max rate for the current link */ + int max_link_rate; + /* sink or branch descriptor */ + struct drm_dp_desc desc; + struct drm_dp_aux aux; + u8 train_set[4]; + int panel_power_up_delay; + int panel_power_down_delay; + int panel_power_cycle_delay; + int backlight_on_delay; + int backlight_off_delay; + struct delayed_work panel_vdd_work; + bool want_panel_vdd; + unsigned long last_power_on; + unsigned long last_backlight_off; + ktime_t panel_power_off_time; + + struct notifier_block edp_notifier; + + /* + * Pipe whose power sequencer is currently locked into + * this port. Only relevant on VLV/CHV. + */ + enum pipe pps_pipe; + /* + * Pipe currently driving the port. Used for preventing + * the use of the PPS for any pipe currentrly driving + * external DP as that will mess things up on VLV. + */ + enum pipe active_pipe; + /* + * Set if the sequencer may be reset due to a power transition, + * requiring a reinitialization. Only relevant on BXT. + */ + bool pps_reset; + struct edp_power_seq pps_delays; + + bool can_mst; /* this port supports mst */ + bool is_mst; + int active_mst_links; + /* connector directly attached - won't be use for modeset in mst world */ + struct intel_connector *attached_connector; + + /* mst connector list */ + struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES]; + struct drm_dp_mst_topology_mgr mst_mgr; + + u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index); + /* + * This function returns the value we have to program the AUX_CTL + * register with to kick off an AUX transaction. + */ + u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes, + u32 aux_clock_divider); + + i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp); + i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index); + + /* This is called before a link training is starterd */ + void (*prepare_link_retrain)(struct intel_dp *intel_dp); + + /* Displayport compliance testing */ + struct intel_dp_compliance compliance; + + /* Display stream compression testing */ + bool force_dsc_en; +}; + +enum lspcon_vendor { + LSPCON_VENDOR_MCA, + LSPCON_VENDOR_PARADE +}; + +struct intel_lspcon { + bool active; + enum drm_lspcon_mode mode; + enum lspcon_vendor vendor; +}; + +struct intel_digital_port { + struct intel_encoder base; + u32 saved_port_bits; + struct intel_dp dp; + struct intel_hdmi hdmi; + struct intel_lspcon lspcon; + enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); + bool release_cl2_override; + u8 max_lanes; + /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ + enum aux_ch aux_ch; + enum intel_display_power_domain ddi_io_power_domain; + struct mutex tc_lock; /* protects the TypeC port mode */ + intel_wakeref_t tc_lock_wakeref; + int tc_link_refcount; + bool tc_legacy_port:1; + char tc_port_name[8]; + enum tc_port_mode tc_mode; + enum phy_fia tc_phy_fia; + + void (*write_infoframe)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + const void *frame, ssize_t len); + void (*read_infoframe)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len); + void (*set_infoframes)(struct intel_encoder *encoder, + bool enable, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + u32 (*infoframes_enabled)(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config); +}; + +struct intel_dp_mst_encoder { + struct intel_encoder base; + enum pipe pipe; + struct intel_digital_port *primary; + struct intel_connector *connector; +}; + +static inline enum dpio_channel +vlv_dport_to_channel(struct intel_digital_port *dport) +{ + switch (dport->base.port) { + case PORT_B: + case PORT_D: + return DPIO_CH0; + case PORT_C: + return DPIO_CH1; + default: + BUG(); + } +} + +static inline enum dpio_phy +vlv_dport_to_phy(struct intel_digital_port *dport) +{ + switch (dport->base.port) { + case PORT_B: + case PORT_C: + return DPIO_PHY0; + case PORT_D: + return DPIO_PHY1; + default: + BUG(); + } +} + +static inline enum dpio_channel +vlv_pipe_to_channel(enum pipe pipe) +{ + switch (pipe) { + case PIPE_A: + case PIPE_C: + return DPIO_CH0; + case PIPE_B: + return DPIO_CH1; + default: + BUG(); + } +} + +static inline struct intel_crtc * +intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + return dev_priv->pipe_to_crtc_mapping[pipe]; +} + +static inline struct intel_crtc * +intel_get_crtc_for_plane(struct drm_i915_private *dev_priv, enum i9xx_plane_id plane) +{ + return dev_priv->plane_to_crtc_mapping[plane]; +} + +struct intel_load_detect_pipe { + struct drm_atomic_state *restore_state; +}; + +static inline struct intel_encoder * +intel_attached_encoder(struct drm_connector *connector) +{ + return to_intel_connector(connector)->encoder; +} + +static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder) +{ + switch (encoder->type) { + case INTEL_OUTPUT_DDI: + case INTEL_OUTPUT_DP: + case INTEL_OUTPUT_EDP: + case INTEL_OUTPUT_HDMI: + return true; + default: + return false; + } +} + +static inline struct intel_digital_port * +enc_to_dig_port(struct drm_encoder *encoder) +{ + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + if (intel_encoder_is_dig_port(intel_encoder)) + return container_of(encoder, struct intel_digital_port, + base.base); + else + return NULL; +} + +static inline struct intel_digital_port * +conn_to_dig_port(struct intel_connector *connector) +{ + return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); +} + +static inline struct intel_dp_mst_encoder * +enc_to_mst(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_dp_mst_encoder, base.base); +} + +static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) +{ + return &enc_to_dig_port(encoder)->dp; +} + +static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) +{ + switch (encoder->type) { + case INTEL_OUTPUT_DP: + case INTEL_OUTPUT_EDP: + return true; + case INTEL_OUTPUT_DDI: + /* Skip pure HDMI/DVI DDI encoders */ + return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg); + default: + return false; + } +} + +static inline struct intel_lspcon * +enc_to_intel_lspcon(struct drm_encoder *encoder) +{ + return &enc_to_dig_port(encoder)->lspcon; +} + +static inline struct intel_digital_port * +dp_to_dig_port(struct intel_dp *intel_dp) +{ + return container_of(intel_dp, struct intel_digital_port, dp); +} + +static inline struct intel_lspcon * +dp_to_lspcon(struct intel_dp *intel_dp) +{ + return &dp_to_dig_port(intel_dp)->lspcon; +} + +static inline struct drm_i915_private * +dp_to_i915(struct intel_dp *intel_dp) +{ + return to_i915(dp_to_dig_port(intel_dp)->base.base.dev); +} + +static inline struct intel_digital_port * +hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) +{ + return container_of(intel_hdmi, struct intel_digital_port, hdmi); +} + +static inline struct intel_plane_state * +intel_atomic_get_plane_state(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + struct drm_plane_state *ret = + drm_atomic_get_plane_state(&state->base, &plane->base); + + if (IS_ERR(ret)) + return ERR_CAST(ret); + + return to_intel_plane_state(ret); +} + +static inline struct intel_plane_state * +intel_atomic_get_old_plane_state(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base, + &plane->base)); +} + +static inline struct intel_plane_state * +intel_atomic_get_new_plane_state(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + return to_intel_plane_state(drm_atomic_get_new_plane_state(&state->base, + &plane->base)); +} + +static inline struct intel_crtc_state * +intel_atomic_get_old_crtc_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return to_intel_crtc_state(drm_atomic_get_old_crtc_state(&state->base, + &crtc->base)); +} + +static inline struct intel_crtc_state * +intel_atomic_get_new_crtc_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return to_intel_crtc_state(drm_atomic_get_new_crtc_state(&state->base, + &crtc->base)); +} + +/* intel_display.c */ +static inline bool +intel_crtc_has_type(const struct intel_crtc_state *crtc_state, + enum intel_output_type type) +{ + return crtc_state->output_types & (1 << type); +} +static inline bool +intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_types & + ((1 << INTEL_OUTPUT_DP) | + (1 << INTEL_OUTPUT_DP_MST) | + (1 << INTEL_OUTPUT_EDP)); +} +static inline void +intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + drm_wait_one_vblank(&dev_priv->drm, pipe); +} +static inline void +intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) +{ + const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + + if (crtc->active) + intel_wait_for_vblank(dev_priv, pipe); +} + +static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) +{ + return i915_ggtt_offset(state->vma); +} + +#endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 956e886f9f1e..109249911576 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -49,11 +49,11 @@ #include "intel_audio.h" #include "intel_connector.h" #include "intel_ddi.h" +#include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" -#include "intel_drv.h" #include "intel_fifo_underrun.h" #include "intel_hdcp.h" #include "intel_hdmi.h" diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 6b0b73479fb8..020422da2ae2 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -22,8 +22,8 @@ * */ +#include "intel_display_types.h" #include "intel_dp_aux_backlight.h" -#include "intel_drv.h" static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 9b1fccea966b..2a1130dd1ad0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -21,9 +21,9 @@ * IN THE SOFTWARE. */ +#include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_link_training.h" -#include "intel_drv.h" static void intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE]) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 4071c9da1065..b1a3df185f4c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -32,10 +32,10 @@ #include "intel_audio.h" #include "intel_connector.h" #include "intel_ddi.h" +#include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" -#include "intel_drv.h" static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 7ccf7f3974db..749e5a74825f 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -23,8 +23,8 @@ #include "display/intel_dp.h" +#include "intel_display_types.h" #include "intel_dpio_phy.h" -#include "intel_drv.h" #include "intel_sideband.h" /** diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index f9bdf8514a53..84a09d96d52f 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -21,9 +21,9 @@ * DEALINGS IN THE SOFTWARE. */ +#include "intel_display_types.h" #include "intel_dpio_phy.h" #include "intel_dpll_mgr.h" -#include "intel_drv.h" /** * DOC: Display PLLs diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index 1cd24bd46518..b15be5814599 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -26,7 +26,8 @@ #include #include -#include "intel_drv.h" + +#include "intel_display_types.h" #define INTEL_DSI_VIDEO_MODE 0 #define INTEL_DSI_COMMAND_MODE 1 diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 8c33262cb0b2..bb3fd8b786a2 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -27,7 +27,7 @@ #include